• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LOS_ARCH_ATOMIC_H
33 #define _LOS_ARCH_ATOMIC_H
34 
35 #include "los_compiler.h"
36 
37 #ifdef __cplusplus
38 #if __cplusplus
39 extern "C" {
40 #endif /* __cplusplus */
41 #endif /* __cplusplus */
42 
ArchAtomicRead(const Atomic * v)43 STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
44 {
45     INT32 val;
46     UINT32 intSave;
47 
48     intSave = LOS_IntLock();
49 
50     __asm__ __volatile__("lr.w %0, (%1)\n"
51                          "fence rw, rw\n"
52                          : "=&r"(val)
53                          : "r"(v)
54                          : "memory");
55     LOS_IntRestore(intSave);
56 
57     return val;
58 }
59 
ArchAtomicSet(Atomic * v,INT32 setVal)60 STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
61 {
62     UINT32 prevVal;
63     UINT32 intSave;
64 
65     intSave = LOS_IntLock();
66 
67     __asm__ __volatile__("amoswap.w %0, %2, (%1)\n"
68                          : "=r"(prevVal)
69                          : "r"(v), "r"(setVal)
70                          : "memory");
71     LOS_IntRestore(intSave);
72 }
73 
ArchAtomicAdd(Atomic * v,INT32 addVal)74 STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
75 {
76     INT32 val;
77     UINT32 intSave;
78 
79     intSave = LOS_IntLock();
80 
81     __asm__ __volatile__("amoadd.w %0, %2, (%1)\n"
82                          "lw %0, (%1)\n"
83                          "fence rw, rw\n"
84                          : "=&r"(val)
85                          : "r"(v), "r"(addVal)
86                          : "memory");
87     LOS_IntRestore(intSave);
88 
89     return val;
90 }
91 
ArchAtomicSub(Atomic * v,INT32 subVal)92 STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
93 {
94     INT32 val;
95     UINT32 intSave;
96 
97     intSave = LOS_IntLock();
98 
99     __asm__ __volatile__("amoadd.w %0, %2, (%1)\n"
100                          "lw %0, (%1)\n"
101                          "fence rw, rw\n"
102                          : "=&r"(val)
103                          : "r"(v), "r"(-subVal)
104                          : "memory");
105     LOS_IntRestore(intSave);
106 
107     return val;
108 }
109 
ArchAtomicInc(Atomic * v)110 STATIC INLINE VOID ArchAtomicInc(Atomic *v)
111 {
112     (VOID)ArchAtomicAdd(v, 1);
113 }
114 
ArchAtomicDec(Atomic * v)115 STATIC INLINE VOID ArchAtomicDec(Atomic *v)
116 {
117     (VOID)ArchAtomicSub(v, 1);
118 }
119 
ArchAtomicIncRet(Atomic * v)120 STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
121 {
122     return ArchAtomicAdd(v, 1);
123 }
124 
ArchAtomicDecRet(Atomic * v)125 STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
126 {
127     return ArchAtomicSub(v, 1);
128 }
129 
130 /**
131  * @ingroup  los_arch_atomic
132  * @brief Atomic exchange for 32-bit variable.
133  *
134  * @par Description:
135  * This API is used to implement the atomic exchange for 32-bit variable
136  * and return the previous value of the atomic variable.
137  * @attention
138  * <ul>The pointer v must not be NULL.</ul>
139  *
140  * @param  v       [IN] The variable pointer.
141  * @param  val     [IN] The exchange value.
142  *
143  * @retval #INT32       The previous value of the atomic variable
144  * @par Dependency:
145  * <ul><li>los_arch_atomic.h: the header file that contains the API declaration.</li></ul>
146  * @see
147  */
ArchAtomicXchg32bits(volatile INT32 * v,INT32 val)148 STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
149 {
150     INT32 prevVal = 0;
151     UINT32 intSave;
152 
153     intSave = LOS_IntLock();
154 
155     __asm__ __volatile__("lw %0, 0(%1)\n"
156                          "amoswap.w %0, %2, (%1)\n"
157                          : "=&r"(prevVal)
158                          : "r"(v), "r"(val)
159                          : "memory");
160     LOS_IntRestore(intSave);
161 
162     return prevVal;
163 }
164 
165 /**
166  * @ingroup  los_arch_atomic
167  * @brief Atomic exchange for 32-bit variable with compare.
168  *
169  * @par Description:
170  * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
171  * @attention
172  * <ul>The pointer v must not be NULL.</ul>
173  *
174  * @param  v       [IN] The variable pointer.
175  * @param  val     [IN] The new value.
176  * @param  oldVal  [IN] The old value.
177  *
178  * @retval TRUE  The previous value of the atomic variable is not equal to oldVal.
179  * @retval FALSE The previous value of the atomic variable is equal to oldVal.
180  * @par Dependency:
181  * <ul><li>los_arch_atomic.h: the header file that contains the API declaration.</li></ul>
182  * @see
183  */
ArchAtomicCmpXchg32bits(volatile INT32 * v,INT32 val,INT32 oldVal)184 STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
185 {
186     INT32 prevVal = 0;
187     UINT32 intSave;
188 
189     intSave = LOS_IntLock();
190     __asm__ __volatile__("lw %0, 0(%1)\n"
191                          "bne %0, %2, 1f\n"
192                          "amoswap.w %0, %3, (%1)\n"
193                          "1:"
194                          : "=&r"(prevVal)
195                          : "r"(v), "r"(oldVal), "r"(val)
196                          : "memory");
197     LOS_IntRestore(intSave);
198 
199     return prevVal != oldVal;
200 }
201 
ArchAtomic64Read(const Atomic64 * v)202 STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
203 {
204     INT64 val;
205     UINT32 intSave;
206 
207     intSave = LOS_IntLock();
208     val = *v;
209     LOS_IntRestore(intSave);
210 
211     return val;
212 }
213 
ArchAtomic64Set(Atomic64 * v,INT64 setVal)214 STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
215 {
216     UINT32 intSave;
217 
218     intSave = LOS_IntLock();
219     *v = setVal;
220     LOS_IntRestore(intSave);
221 }
222 
ArchAtomic64Add(Atomic64 * v,INT64 addVal)223 STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
224 {
225     INT64 val;
226     UINT32 intSave;
227 
228     intSave = LOS_IntLock();
229     *v += addVal;
230     val = *v;
231     LOS_IntRestore(intSave);
232 
233     return val;
234 }
235 
ArchAtomic64Sub(Atomic64 * v,INT64 subVal)236 STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
237 {
238     INT64 val;
239     UINT32 intSave;
240 
241     intSave = LOS_IntLock();
242     *v -= subVal;
243     val = *v;
244     LOS_IntRestore(intSave);
245 
246     return val;
247 }
248 
ArchAtomic64Inc(Atomic64 * v)249 STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
250 {
251     (VOID)ArchAtomic64Add(v, 1);
252 }
253 
ArchAtomic64IncRet(Atomic64 * v)254 STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
255 {
256     return ArchAtomic64Add(v, 1);
257 }
258 
ArchAtomic64Dec(Atomic64 * v)259 STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
260 {
261     (VOID)ArchAtomic64Sub(v, 1);
262 }
263 
ArchAtomic64DecRet(Atomic64 * v)264 STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
265 {
266     return ArchAtomic64Sub(v, 1);
267 }
268 
ArchAtomicXchg64bits(Atomic64 * v,INT64 val)269 STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
270 {
271     INT64 prevVal;
272     UINT32 intSave;
273 
274     intSave = LOS_IntLock();
275     prevVal = *v;
276     *v = val;
277     LOS_IntRestore(intSave);
278 
279     return prevVal;
280 }
281 
ArchAtomicCmpXchg64bits(Atomic64 * v,INT64 val,INT64 oldVal)282 STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
283 {
284     INT64 prevVal;
285     UINT32 intSave;
286 
287     intSave = LOS_IntLock();
288     prevVal = *v;
289     if (prevVal == oldVal) {
290         *v = val;
291     }
292     LOS_IntRestore(intSave);
293 
294     return prevVal != oldVal;
295 }
296 
297 #ifdef __cplusplus
298 #if __cplusplus
299 }
300 #endif /* __cplusplus */
301 #endif /* __cplusplus */
302 
303 #endif /* _LOS_ARCH_ATOMIC_H */
304