• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3  * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice, this list of
9  *    conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12  *    of conditions and the following disclaimer in the documentation and/or other materials
13  *    provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16  *    to endorse or promote products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LOS_ARCH_ATOMIC_H
33 #define _LOS_ARCH_ATOMIC_H
34 
35 #include "los_compiler.h"
36 
37 #ifdef __cplusplus
38 #if __cplusplus
39 extern "C" {
40 #endif /* __cplusplus */
41 #endif /* __cplusplus */
42 
ArchAtomicRead(const Atomic * v)43 STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
44 {
45     INT32 val;
46 
47     __asm__ __volatile__("l32ai %0, %1, 0\n"
48                          : "=&a"(val)
49                          : "a"(v)
50                          : "memory");
51 
52     return val;
53 }
54 
ArchAtomicSet(Atomic * v,INT32 setVal)55 STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
56 {
57     INT32 val;
58 
59     __asm__ __volatile__("1:l32ai %0, %2, 0\n"
60                          "  wsr %0, SCOMPARE1\n"
61                          "  s32c1i %3, %1\n"
62                          "  bne %3, %0, 1b"
63                          : "=&a"(val), "+m"(*v)
64                          :  "a"(v), "a"(setVal)
65                          : "memory");
66 }
67 
ArchAtomicAdd(Atomic * v,INT32 addVal)68 STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
69 {
70     INT32 val;
71     INT32 tmp;
72 
73     __asm__ __volatile__("1:l32ai %0, %3, 0\n"
74                          "  wsr %0, SCOMPARE1\n"
75                          "  mov %1, %0\n"
76                          "  add %0, %0, %4\n"
77                          "  s32c1i %0, %2\n"
78                          "  bne %0, %1, 1b"
79                          : "=&a"(val), "=&a"(tmp), "+m"(*v)
80                          : "a"(v), "a"(addVal)
81                          : "memory");
82 
83     return *v;
84 }
85 
ArchAtomicSub(Atomic * v,INT32 subVal)86 STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
87 {
88     INT32 val;
89     INT32 tmp;
90 
91     __asm__ __volatile__("1:l32ai %0, %3, 0\n"
92                          "  wsr %0, SCOMPARE1\n"
93                          "  mov %1, %0\n"
94                          "  sub %0, %0, %4\n"
95                          "  s32c1i %0, %2\n"
96                          "  bne %0, %1, 1b"
97                          : "=&a"(val), "=&a"(tmp), "+m"(*v)
98                          : "a"(v), "a"(subVal)
99                          : "memory");
100     return *v;
101 }
102 
ArchAtomicInc(Atomic * v)103 STATIC INLINE VOID ArchAtomicInc(Atomic *v)
104 {
105     (VOID)ArchAtomicAdd(v, 1);
106 }
107 
ArchAtomicDec(Atomic * v)108 STATIC INLINE VOID ArchAtomicDec(Atomic *v)
109 {
110     (VOID)ArchAtomicSub(v, 1);
111 }
112 
ArchAtomicIncRet(Atomic * v)113 STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
114 {
115     return ArchAtomicAdd(v, 1);
116 }
117 
ArchAtomicDecRet(Atomic * v)118 STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
119 {
120     return ArchAtomicSub(v, 1);
121 }
122 
123 /**
124  * @ingroup  los_arch_atomic
125  * @brief Atomic exchange for 32-bit variable.
126  *
127  * @par Description:
128  * This API is used to implement the atomic exchange for 32-bit variable
129  * and return the previous value of the atomic variable.
130  * @attention
131  * <ul>The pointer v must not be NULL.</ul>
132  *
133  * @param  v       [IN] The variable pointer.
134  * @param  val     [IN] The exchange value.
135  *
136  * @retval #INT32       The previous value of the atomic variable
137  * @par Dependency:
138  * <ul><li>los_arch_atomic.h: the header file that contains the API declaration.</li></ul>
139  * @see
140  */
ArchAtomicXchg32bits(volatile INT32 * v,INT32 val)141 STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
142 {
143     INT32 prevVal = 0;
144     INT32 tmp;
145 
146     __asm__ __volatile__("1:l32ai %0, %3, 0\n"
147                          "  wsr %0, SCOMPARE1\n"
148                          "  mov %1, %0\n"
149                          "  s32c1i %4, %2\n"
150                          "  bne %4, %1, 1b"
151                          : "=&a"(prevVal), "=&a"(tmp), "+m"(*v)
152                          : "a"(v), "a"(val)
153                          : "memory");
154 
155     return prevVal;
156 }
157 
158 /**
159  * @ingroup  los_arch_atomic
160  * @brief Atomic exchange for 32-bit variable with compare.
161  *
162  * @par Description:
163  * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
164  * @attention
165  * <ul>The pointer v must not be NULL.</ul>
166  *
167  * @param  v       [IN] The variable pointer.
168  * @param  val     [IN] The new value.
169  * @param  oldVal  [IN] The old value.
170  *
171  * @retval TRUE  The previous value of the atomic variable is not equal to oldVal.
172  * @retval FALSE The previous value of the atomic variable is equal to oldVal.
173  * @par Dependency:
174  * <ul><li>los_arch_atomic.h: the header file that contains the API declaration.</li></ul>
175  * @see
176  */
ArchAtomicCmpXchg32bits(volatile INT32 * v,INT32 val,INT32 oldVal)177 STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
178 {
179     INT32 prevVal = 0;
180 
181     __asm__ __volatile__("l32ai %0, %2, 0\n"
182                          "wsr %0, SCOMPARE1\n"
183                          "bne %0, %3, 1f\n"
184                          "s32c1i %4, %1\n"
185                          "1:"
186                          : "=&a"(prevVal), "+m"(*v)
187                          : "a"(v), "a"(oldVal), "a"(val)
188                          : "cc");
189 
190     return prevVal != oldVal;
191 }
192 
ArchAtomic64Read(const Atomic64 * v)193 STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
194 {
195     INT64 val;
196     UINT32 intSave;
197 
198     intSave = LOS_IntLock();
199     val = *v;
200     LOS_IntRestore(intSave);
201 
202     return val;
203 }
204 
ArchAtomic64Set(Atomic64 * v,INT64 setVal)205 STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
206 {
207     UINT32 intSave;
208 
209     intSave = LOS_IntLock();
210     *v = setVal;
211     LOS_IntRestore(intSave);
212 }
213 
ArchAtomic64Add(Atomic64 * v,INT64 addVal)214 STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
215 {
216     INT64 val;
217     UINT32 intSave;
218 
219     intSave = LOS_IntLock();
220     *v += addVal;
221     val = *v;
222     LOS_IntRestore(intSave);
223 
224     return val;
225 }
226 
ArchAtomic64Sub(Atomic64 * v,INT64 subVal)227 STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
228 {
229     INT64 val;
230     UINT32 intSave;
231 
232     intSave = LOS_IntLock();
233     *v -= subVal;
234     val = *v;
235     LOS_IntRestore(intSave);
236 
237     return val;
238 }
239 
ArchAtomic64Inc(Atomic64 * v)240 STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
241 {
242     (VOID)ArchAtomic64Add(v, 1);
243 }
244 
ArchAtomic64IncRet(Atomic64 * v)245 STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
246 {
247     return ArchAtomic64Add(v, 1);
248 }
249 
ArchAtomic64Dec(Atomic64 * v)250 STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
251 {
252     (VOID)ArchAtomic64Sub(v, 1);
253 }
254 
ArchAtomic64DecRet(Atomic64 * v)255 STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
256 {
257     return ArchAtomic64Sub(v, 1);
258 }
259 
ArchAtomicXchg64bits(Atomic64 * v,INT64 val)260 STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
261 {
262     INT64 prevVal;
263     UINT32 intSave;
264 
265     intSave = LOS_IntLock();
266     prevVal = *v;
267     *v = val;
268     LOS_IntRestore(intSave);
269 
270     return prevVal;
271 }
272 
ArchAtomicCmpXchg64bits(Atomic64 * v,INT64 val,INT64 oldVal)273 STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
274 {
275     INT64 prevVal;
276     UINT32 intSave;
277 
278     intSave = LOS_IntLock();
279     prevVal = *v;
280     if (prevVal == oldVal) {
281         *v = val;
282     }
283     LOS_IntRestore(intSave);
284 
285     return prevVal != oldVal;
286 }
287 
288 #ifdef __cplusplus
289 #if __cplusplus
290 }
291 #endif /* __cplusplus */
292 #endif /* __cplusplus */
293 
294 #endif /* _LOS_ARCH_ATOMIC_H */
295