• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copied from the kernel sources:
4  *
5  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
6  */
7 #ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
8 #define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
9 
10 /*
11  * Memory barrier.
12  * The sync instruction guarantees that all memory accesses initiated
13  * by this processor have been performed (with respect to all other
14  * mechanisms that access memory).  The eieio instruction is a barrier
15  * providing an ordering (separately) for (a) cacheable stores and (b)
16  * loads and stores to non-cacheable memory (e.g. I/O devices).
17  *
18  * mb() prevents loads and stores being reordered across this point.
19  * rmb() prevents loads being reordered across this point.
20  * wmb() prevents stores being reordered across this point.
21  *
22  * *mb() variants without smp_ prefix must order all types of memory
23  * operations with one another. sync is the only instruction sufficient
24  * to do this.
25  */
26 #define mb()   __asm__ __volatile__ ("sync" : : : "memory")
27 #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
28 #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
29 
30 #if defined(__powerpc64__)
31 #define smp_lwsync()	__asm__ __volatile__ ("lwsync" : : : "memory")
32 
33 #define smp_store_release(p, v)			\
34 do {						\
35 	smp_lwsync();				\
36 	WRITE_ONCE(*p, v);			\
37 } while (0)
38 
39 #define smp_load_acquire(p)			\
40 ({						\
41 	typeof(*p) ___p1 = READ_ONCE(*p);	\
42 	smp_lwsync();				\
43 	___p1;					\
44 })
45 #endif /* defined(__powerpc64__) */
46 #endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
47