• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LOS_ARCH_ATOMIC_H
33 #define _LOS_ARCH_ATOMIC_H
34 
35 #include "los_compiler.h"
36 #include "los_interrupt.h"
37 
38 #ifdef __cplusplus
39 #if __cplusplus
40 extern "C" {
41 #endif /* __cplusplus */
42 #endif /* __cplusplus */
43 
ArchAtomicRead(const Atomic * v)44 STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
45 {
46     INT32 val;
47 
48     __asm__ __volatile__("l32ai %0, %1, 0\n"
49                          : "=&a"(val)
50                          : "a"(v)
51                          : "memory");
52 
53     return val;
54 }
55 
ArchAtomicSet(Atomic * v,INT32 setVal)56 STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
57 {
58     INT32 val;
59 
60     __asm__ __volatile__("1:l32ai %0, %2, 0\n"
61                          "  wsr %0, SCOMPARE1\n"
62                          "  s32c1i %3, %1\n"
63                          "  bne %3, %0, 1b"
64                          : "=&a"(val), "+m"(*v)
65                          :  "a"(v), "a"(setVal)
66                          : "memory");
67 }
68 
ArchAtomicAdd(Atomic * v,INT32 addVal)69 STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
70 {
71     INT32 val;
72     INT32 tmp;
73 
74     __asm__ __volatile__("1:l32ai %0, %3, 0\n"
75                          "  wsr %0, SCOMPARE1\n"
76                          "  mov %1, %0\n"
77                          "  add %0, %0, %4\n"
78                          "  s32c1i %0, %2\n"
79                          "  bne %0, %1, 1b"
80                          : "=&a"(val), "=&a"(tmp), "+m"(*v)
81                          : "a"(v), "a"(addVal)
82                          : "memory");
83 
84     return *v;
85 }
86 
ArchAtomicSub(Atomic * v,INT32 subVal)87 STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
88 {
89     INT32 val;
90     INT32 tmp;
91 
92     __asm__ __volatile__("1:l32ai %0, %3, 0\n"
93                          "  wsr %0, SCOMPARE1\n"
94                          "  mov %1, %0\n"
95                          "  sub %0, %0, %4\n"
96                          "  s32c1i %0, %2\n"
97                          "  bne %0, %1, 1b"
98                          : "=&a"(val), "=&a"(tmp), "+m"(*v)
99                          : "a"(v), "a"(subVal)
100                          : "memory");
101     return *v;
102 }
103 
ArchAtomicInc(Atomic * v)104 STATIC INLINE VOID ArchAtomicInc(Atomic *v)
105 {
106     (VOID)ArchAtomicAdd(v, 1);
107 }
108 
ArchAtomicDec(Atomic * v)109 STATIC INLINE VOID ArchAtomicDec(Atomic *v)
110 {
111     (VOID)ArchAtomicSub(v, 1);
112 }
113 
ArchAtomicIncRet(Atomic * v)114 STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
115 {
116     return ArchAtomicAdd(v, 1);
117 }
118 
ArchAtomicDecRet(Atomic * v)119 STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
120 {
121     return ArchAtomicSub(v, 1);
122 }
123 
124 /**
125  * @ingroup  los_arch_atomic
126  * @brief Atomic exchange for 32-bit variable.
127  *
128  * @par Description:
129  * This API is used to implement the atomic exchange for 32-bit variable
130  * and return the previous value of the atomic variable.
131  * @attention
132  * <ul>The pointer v must not be NULL.</ul>
133  *
134  * @param  v       [IN] The variable pointer.
135  * @param  val     [IN] The exchange value.
136  *
137  * @retval #INT32       The previous value of the atomic variable
138  * @par Dependency:
139  * <ul><li>los_arch_atomic.h: the header file that contains the API declaration.</li></ul>
140  * @see
141  */
ArchAtomicXchg32bits(volatile INT32 * v,INT32 val)142 STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
143 {
144     INT32 prevVal = 0;
145     INT32 tmp;
146 
147     __asm__ __volatile__("1:l32ai %0, %3, 0\n"
148                          "  wsr %0, SCOMPARE1\n"
149                          "  mov %1, %0\n"
150                          "  s32c1i %4, %2\n"
151                          "  bne %4, %1, 1b"
152                          : "=&a"(prevVal), "=&a"(tmp), "+m"(*v)
153                          : "a"(v), "a"(val)
154                          : "memory");
155 
156     return prevVal;
157 }
158 
159 /**
160  * @ingroup  los_arch_atomic
161  * @brief Atomic exchange for 32-bit variable with compare.
162  *
163  * @par Description:
164  * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
165  * @attention
166  * <ul>The pointer v must not be NULL.</ul>
167  *
168  * @param  v       [IN] The variable pointer.
169  * @param  val     [IN] The new value.
170  * @param  oldVal  [IN] The old value.
171  *
172  * @retval TRUE  The previous value of the atomic variable is not equal to oldVal.
173  * @retval FALSE The previous value of the atomic variable is equal to oldVal.
174  * @par Dependency:
175  * <ul><li>los_arch_atomic.h: the header file that contains the API declaration.</li></ul>
176  * @see
177  */
ArchAtomicCmpXchg32bits(volatile INT32 * v,INT32 val,INT32 oldVal)178 STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
179 {
180     INT32 prevVal = 0;
181 
182     __asm__ __volatile__("l32ai %0, %2, 0\n"
183                          "wsr %0, SCOMPARE1\n"
184                          "bne %0, %3, 1f\n"
185                          "s32c1i %4, %1\n"
186                          "1:"
187                          : "=&a"(prevVal), "+m"(*v)
188                          : "a"(v), "a"(oldVal), "a"(val)
189                          : "cc");
190 
191     return prevVal != oldVal;
192 }
193 
ArchAtomic64Read(const Atomic64 * v)194 STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
195 {
196     INT64 val;
197     UINT32 intSave;
198 
199     intSave = LOS_IntLock();
200     val = *v;
201     LOS_IntRestore(intSave);
202 
203     return val;
204 }
205 
ArchAtomic64Set(Atomic64 * v,INT64 setVal)206 STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
207 {
208     UINT32 intSave;
209 
210     intSave = LOS_IntLock();
211     *v = setVal;
212     LOS_IntRestore(intSave);
213 }
214 
ArchAtomic64Add(Atomic64 * v,INT64 addVal)215 STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
216 {
217     INT64 val;
218     UINT32 intSave;
219 
220     intSave = LOS_IntLock();
221     *v += addVal;
222     val = *v;
223     LOS_IntRestore(intSave);
224 
225     return val;
226 }
227 
ArchAtomic64Sub(Atomic64 * v,INT64 subVal)228 STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
229 {
230     INT64 val;
231     UINT32 intSave;
232 
233     intSave = LOS_IntLock();
234     *v -= subVal;
235     val = *v;
236     LOS_IntRestore(intSave);
237 
238     return val;
239 }
240 
ArchAtomic64Inc(Atomic64 * v)241 STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
242 {
243     (VOID)ArchAtomic64Add(v, 1);
244 }
245 
ArchAtomic64IncRet(Atomic64 * v)246 STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
247 {
248     return ArchAtomic64Add(v, 1);
249 }
250 
ArchAtomic64Dec(Atomic64 * v)251 STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
252 {
253     (VOID)ArchAtomic64Sub(v, 1);
254 }
255 
ArchAtomic64DecRet(Atomic64 * v)256 STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
257 {
258     return ArchAtomic64Sub(v, 1);
259 }
260 
ArchAtomicXchg64bits(Atomic64 * v,INT64 val)261 STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
262 {
263     INT64 prevVal;
264     UINT32 intSave;
265 
266     intSave = LOS_IntLock();
267     prevVal = *v;
268     *v = val;
269     LOS_IntRestore(intSave);
270 
271     return prevVal;
272 }
273 
ArchAtomicCmpXchg64bits(Atomic64 * v,INT64 val,INT64 oldVal)274 STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
275 {
276     INT64 prevVal;
277     UINT32 intSave;
278 
279     intSave = LOS_IntLock();
280     prevVal = *v;
281     if (prevVal == oldVal) {
282         *v = val;
283     }
284     LOS_IntRestore(intSave);
285 
286     return prevVal != oldVal;
287 }
288 
289 #ifdef __cplusplus
290 #if __cplusplus
291 }
292 #endif /* __cplusplus */
293 #endif /* __cplusplus */
294 
295 #endif /* _LOS_ARCH_ATOMIC_H */
296