1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2
3 /*
4 Atomic load/store implementation on MSP430.
5
6 Adapted from https://github.com/pftbest/msp430-atomic.
7
8 Operations not supported here are provided by disabling interrupts.
9 See also src/imp/interrupt/msp430.rs.
10
11 Note: Ordering is always SeqCst.
12
13 Refs: https://www.ti.com/lit/ug/slau208q/slau208q.pdf
14 */
15
16 #[cfg(not(portable_atomic_no_asm))]
17 use core::arch::asm;
18 #[cfg(any(test, not(feature = "critical-section")))]
19 use core::cell::UnsafeCell;
20 use core::sync::atomic::Ordering;
21
22 /// An atomic fence.
23 ///
24 /// # Panics
25 ///
26 /// Panics if `order` is [`Relaxed`](Ordering::Relaxed).
27 #[inline]
28 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
fence(order: Ordering)29 pub fn fence(order: Ordering) {
30 match order {
31 Ordering::Relaxed => panic!("there is no such thing as a relaxed fence"),
32 // MSP430 is single-core and a compiler fence works as an atomic fence.
33 _ => compiler_fence(order),
34 }
35 }
36
37 /// A compiler memory fence.
38 ///
39 /// # Panics
40 ///
41 /// Panics if `order` is [`Relaxed`](Ordering::Relaxed).
42 #[inline]
43 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
compiler_fence(order: Ordering)44 pub fn compiler_fence(order: Ordering) {
45 match order {
46 Ordering::Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
47 _ => {}
48 }
49 // SAFETY: using an empty asm is safe.
50 unsafe {
51 // Do not use `nomem` and `readonly` because prevent preceding and subsequent memory accesses from being reordered.
52 #[cfg(not(portable_atomic_no_asm))]
53 asm!("", options(nostack, preserves_flags));
54 #[cfg(portable_atomic_no_asm)]
55 llvm_asm!("" ::: "memory" : "volatile");
56 }
57 }
58
59 macro_rules! atomic {
60 (load_store, $([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
61 #[cfg(any(test, not(feature = "critical-section")))]
62 #[repr(transparent)]
63 pub(crate) struct $atomic_type $(<$($generics)*>)? {
64 v: UnsafeCell<$value_type>,
65 }
66
67 #[cfg(any(test, not(feature = "critical-section")))]
68 // Send is implicitly implemented for atomic integers, but not for atomic pointers.
69 // SAFETY: any data races are prevented by atomic operations.
70 unsafe impl $(<$($generics)*>)? Send for $atomic_type $(<$($generics)*>)? {}
71 #[cfg(any(test, not(feature = "critical-section")))]
72 // SAFETY: any data races are prevented by atomic operations.
73 unsafe impl $(<$($generics)*>)? Sync for $atomic_type $(<$($generics)*>)? {}
74
75 #[cfg(any(test, not(feature = "critical-section")))]
76 impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
77 #[cfg(test)]
78 #[inline]
79 pub(crate) const fn new(v: $value_type) -> Self {
80 Self { v: UnsafeCell::new(v) }
81 }
82
83 #[cfg(test)]
84 #[inline]
85 pub(crate) fn is_lock_free() -> bool {
86 Self::IS_ALWAYS_LOCK_FREE
87 }
88 #[cfg(test)]
89 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = true;
90
91 #[cfg(test)]
92 #[inline]
93 pub(crate) fn get_mut(&mut self) -> &mut $value_type {
94 // SAFETY: the mutable reference guarantees unique ownership.
95 // (UnsafeCell::get_mut requires Rust 1.50)
96 unsafe { &mut *self.v.get() }
97 }
98
99 #[inline]
100 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
101 pub(crate) fn load(&self, order: Ordering) -> $value_type {
102 crate::utils::assert_load_ordering(order);
103 let src = self.v.get();
104 // SAFETY: any data races are prevented by atomic intrinsics and the raw
105 // pointer passed in is valid because we got it from a reference.
106 unsafe {
107 let out;
108 #[cfg(not(portable_atomic_no_asm))]
109 asm!(
110 concat!("mov", $asm_suffix, " @{src}, {out}"),
111 src = in(reg) src,
112 out = lateout(reg) out,
113 options(nostack, preserves_flags),
114 );
115 #[cfg(portable_atomic_no_asm)]
116 llvm_asm!(
117 concat!("mov", $asm_suffix, " $1, $0")
118 : "=r"(out) : "*m"(src) : "memory" : "volatile"
119 );
120 out
121 }
122 }
123
124 #[inline]
125 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
126 pub(crate) fn store(&self, val: $value_type, order: Ordering) {
127 crate::utils::assert_store_ordering(order);
128 let dst = self.v.get();
129 // SAFETY: any data races are prevented by atomic intrinsics and the raw
130 // pointer passed in is valid because we got it from a reference.
131 unsafe {
132 #[cfg(not(portable_atomic_no_asm))]
133 asm!(
134 concat!("mov", $asm_suffix, " {val}, 0({dst})"),
135 dst = in(reg) dst,
136 val = in(reg) val,
137 options(nostack, preserves_flags),
138 );
139 #[cfg(portable_atomic_no_asm)]
140 llvm_asm!(
141 concat!("mov", $asm_suffix, " $1, $0")
142 :: "*m"(dst), "ir"(val) : "memory" : "volatile"
143 );
144 }
145 }
146 }
147 };
148 ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
149 atomic!(load_store, $([$($generics)*])? $atomic_type, $value_type, $asm_suffix);
150 #[cfg(any(test, not(feature = "critical-section")))]
151 impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
152 #[inline]
153 pub(crate) fn add(&self, val: $value_type, _order: Ordering) {
154 let dst = self.v.get();
155 // SAFETY: any data races are prevented by atomic intrinsics and the raw
156 // pointer passed in is valid because we got it from a reference.
157 unsafe {
158 #[cfg(not(portable_atomic_no_asm))]
159 asm!(
160 concat!("add", $asm_suffix, " {val}, 0({dst})"),
161 dst = in(reg) dst,
162 val = in(reg) val,
163 // Do not use `preserves_flags` because ADD modifies the V, N, Z, and C bits of the status register.
164 options(nostack),
165 );
166 #[cfg(portable_atomic_no_asm)]
167 llvm_asm!(
168 concat!("add", $asm_suffix, " $1, $0")
169 :: "*m"(dst), "ir"(val) : "memory" : "volatile"
170 );
171 }
172 }
173
174 #[inline]
175 pub(crate) fn sub(&self, val: $value_type, _order: Ordering) {
176 let dst = self.v.get();
177 // SAFETY: any data races are prevented by atomic intrinsics and the raw
178 // pointer passed in is valid because we got it from a reference.
179 unsafe {
180 #[cfg(not(portable_atomic_no_asm))]
181 asm!(
182 concat!("sub", $asm_suffix, " {val}, 0({dst})"),
183 dst = in(reg) dst,
184 val = in(reg) val,
185 // Do not use `preserves_flags` because SUB modifies the V, N, Z, and C bits of the status register.
186 options(nostack),
187 );
188 #[cfg(portable_atomic_no_asm)]
189 llvm_asm!(
190 concat!("sub", $asm_suffix, " $1, $0")
191 :: "*m"(dst), "ir"(val) : "memory" : "volatile"
192 );
193 }
194 }
195
196 #[inline]
197 pub(crate) fn and(&self, val: $value_type, _order: Ordering) {
198 let dst = self.v.get();
199 // SAFETY: any data races are prevented by atomic intrinsics and the raw
200 // pointer passed in is valid because we got it from a reference.
201 unsafe {
202 #[cfg(not(portable_atomic_no_asm))]
203 asm!(
204 concat!("and", $asm_suffix, " {val}, 0({dst})"),
205 dst = in(reg) dst,
206 val = in(reg) val,
207 // Do not use `preserves_flags` because AND modifies the V, N, Z, and C bits of the status register.
208 options(nostack),
209 );
210 #[cfg(portable_atomic_no_asm)]
211 llvm_asm!(
212 concat!("and", $asm_suffix, " $1, $0")
213 :: "*m"(dst), "ir"(val) : "memory" : "volatile"
214 );
215 }
216 }
217
218 #[inline]
219 pub(crate) fn or(&self, val: $value_type, _order: Ordering) {
220 let dst = self.v.get();
221 // SAFETY: any data races are prevented by atomic intrinsics and the raw
222 // pointer passed in is valid because we got it from a reference.
223 unsafe {
224 #[cfg(not(portable_atomic_no_asm))]
225 asm!(
226 concat!("bis", $asm_suffix, " {val}, 0({dst})"),
227 dst = in(reg) dst,
228 val = in(reg) val,
229 options(nostack, preserves_flags),
230 );
231 #[cfg(portable_atomic_no_asm)]
232 llvm_asm!(
233 concat!("bis", $asm_suffix, " $1, $0")
234 :: "*m"(dst), "ir"(val) : "memory" : "volatile"
235 );
236 }
237 }
238
239 #[inline]
240 pub(crate) fn xor(&self, val: $value_type, _order: Ordering) {
241 let dst = self.v.get();
242 // SAFETY: any data races are prevented by atomic intrinsics and the raw
243 // pointer passed in is valid because we got it from a reference.
244 unsafe {
245 #[cfg(not(portable_atomic_no_asm))]
246 asm!(
247 concat!("xor", $asm_suffix, " {val}, 0({dst})"),
248 dst = in(reg) dst,
249 val = in(reg) val,
250 // Do not use `preserves_flags` because XOR modifies the V, N, Z, and C bits of the status register.
251 options(nostack),
252 );
253 #[cfg(portable_atomic_no_asm)]
254 llvm_asm!(
255 concat!("xor", $asm_suffix, " $1, $0")
256 :: "*m"(dst), "ir"(val) : "memory" : "volatile"
257 );
258 }
259 }
260
261 #[inline]
262 pub(crate) fn not(&self, _order: Ordering) {
263 let dst = self.v.get();
264 // SAFETY: any data races are prevented by atomic intrinsics and the raw
265 // pointer passed in is valid because we got it from a reference.
266 unsafe {
267 #[cfg(not(portable_atomic_no_asm))]
268 asm!(
269 concat!("inv", $asm_suffix, " 0({dst})"),
270 dst = in(reg) dst,
271 // Do not use `preserves_flags` because INV modifies the V, N, Z, and C bits of the status register.
272 options(nostack),
273 );
274 #[cfg(portable_atomic_no_asm)]
275 llvm_asm!(
276 concat!("inv", $asm_suffix, " $0")
277 :: "*m"(dst) : "memory" : "volatile"
278 );
279 }
280 }
281 }
282 }
283 }
284
285 atomic!(AtomicI8, i8, ".b");
286 atomic!(AtomicU8, u8, ".b");
287 atomic!(AtomicI16, i16, ".w");
288 atomic!(AtomicU16, u16, ".w");
289 atomic!(AtomicIsize, isize, ".w");
290 atomic!(AtomicUsize, usize, ".w");
291 atomic!(load_store, [T] AtomicPtr, *mut T, ".w");
292