| /kernel/linux/linux-4.19/tools/include/asm/ |
| D | barrier.h | 3 #include "../../arch/x86/include/asm/barrier.h" 5 #include "../../arch/arm/include/asm/barrier.h" 7 #include "../../arch/arm64/include/asm/barrier.h" 9 #include "../../arch/powerpc/include/asm/barrier.h" 11 #include "../../arch/s390/include/asm/barrier.h" 13 #include "../../arch/sh/include/asm/barrier.h" 15 #include "../../arch/sparc/include/asm/barrier.h" 17 #include "../../arch/tile/include/asm/barrier.h" 19 #include "../../arch/alpha/include/asm/barrier.h" 21 #include "../../arch/mips/include/asm/barrier.h" [all …]
|
| /kernel/linux/linux-5.10/tools/include/asm/ |
| D | barrier.h | 4 #include "../../arch/x86/include/asm/barrier.h" 6 #include "../../arch/arm/include/asm/barrier.h" 8 #include "../../arch/arm64/include/asm/barrier.h" 10 #include "../../arch/powerpc/include/asm/barrier.h" 12 #include "../../arch/s390/include/asm/barrier.h" 14 #include "../../arch/sh/include/asm/barrier.h" 16 #include "../../arch/sparc/include/asm/barrier.h" 18 #include "../../arch/tile/include/asm/barrier.h" 20 #include "../../arch/alpha/include/asm/barrier.h" 22 #include "../../arch/mips/include/asm/barrier.h" [all …]
|
| /kernel/linux/linux-4.19/include/linux/ |
| D | spinlock_up.h | 9 #include <asm/barrier.h> 32 barrier(); in arch_spin_lock() 40 barrier(); in arch_spin_trylock() 47 barrier(); in arch_spin_unlock() 54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) 55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) 56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) 57 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) 58 #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) 59 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) [all …]
|
| D | preempt.h | 175 barrier(); \ 180 barrier(); \ 191 barrier(); \ 198 barrier(); \ 212 barrier(); \ 218 barrier(); \ 228 barrier(); \ 233 barrier(); \ 245 #define preempt_disable() barrier() 246 #define sched_preempt_enable_no_resched() barrier() [all …]
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | spinlock_up.h | 9 #include <asm/barrier.h> 32 barrier(); in arch_spin_lock() 40 barrier(); in arch_spin_trylock() 47 barrier(); in arch_spin_unlock() 54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) 55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) 56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) 57 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) 58 #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) 59 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) [all …]
|
| D | preempt.h | 172 barrier(); \ 177 barrier(); \ 188 barrier(); \ 195 barrier(); \ 209 barrier(); \ 215 barrier(); \ 225 barrier(); \ 230 barrier(); \ 242 #define preempt_disable() barrier() 243 #define sched_preempt_enable_no_resched() barrier() [all …]
|
| /kernel/linux/linux-4.19/arch/mips/include/asm/ |
| D | barrier.h | 19 * As compared to the completion barrier, the ordering barrier is a 25 * This potentially reduces how many cycles the barrier instruction must stall 40 * - The barrier does not guarantee the order in which instruction fetches are 45 * stype zero always does a completion barrier that affects both loads and 52 * act the same as stype zero completion barrier. This allows software written 53 * for an implementation with a lighter-weight barrier to work on another 54 * implementation which only implements the stype zero completion barrier. 56 * - A completion barrier is required, potentially in conjunction with SSNOP (in 59 * mode changes. For example, a completion barrier is required on some 65 * stype 0 - A completion barrier that affects preceding loads and stores and [all …]
|
| /kernel/linux/linux-5.10/Documentation/ |
| D | memory-barriers.txt | 29 particular barrier, and 34 for any particular barrier, but if the architecture provides less than 37 Note also that it is possible that a barrier may be a no-op for an 38 architecture because the way that arch works renders an explicit barrier 53 - Varieties of memory barrier. 57 - SMP barrier pairing. 58 - Examples of memory barrier sequences. 64 - Compiler barrier. 74 (*) Inter-CPU acquiring barrier effects. 85 (*) Kernel I/O barrier effects. [all …]
|
| /kernel/linux/linux-4.19/Documentation/ |
| D | memory-barriers.txt | 29 particular barrier, and 34 for any particular barrier, but if the architecture provides less than 37 Note also that it is possible that a barrier may be a no-op for an 38 architecture because the way that arch works renders an explicit barrier 53 - Varieties of memory barrier. 57 - SMP barrier pairing. 58 - Examples of memory barrier sequences. 64 - Compiler barrier. 66 - MMIO write barrier. 75 (*) Inter-CPU acquiring barrier effects. [all …]
|
| /kernel/linux/linux-5.10/arch/sparc/include/asm/ |
| D | barrier_64.h | 6 * #51. Essentially, if a memory barrier occurs soon after a mispredicted 10 * It used to be believed that the memory barrier had to be right in the 11 * delay slot, but a case has been traced recently wherein the memory barrier 23 * the memory barrier explicitly into a "branch always, predicted taken" 44 barrier(); \ 52 barrier(); \ 56 #define __smp_mb__before_atomic() barrier() 57 #define __smp_mb__after_atomic() barrier() 59 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-4.19/arch/sparc/include/asm/ |
| D | barrier_64.h | 6 * #51. Essentially, if a memory barrier occurs soon after a mispredicted 10 * It used to be believed that the memory barrier had to be right in the 11 * delay slot, but a case has been traced recently wherein the memory barrier 23 * the memory barrier explicitly into a "branch always, predicted taken" 44 barrier(); \ 52 barrier(); \ 56 #define __smp_mb__before_atomic() barrier() 57 #define __smp_mb__after_atomic() barrier() 59 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-5.10/arch/mips/include/asm/ |
| D | sync.h | 11 * Two types of barrier are provided: 18 * restrictions imposed by the barrier. 31 * b) Multiple variants of ordering barrier are provided which allow the 34 * than a barrier are observed prior to stores that are younger than a 35 * barrier & don't care about the ordering of loads then the 'wmb' 36 * ordering barrier can be used. Limiting the barrier's effects to stores 49 * A full completion barrier; all memory accesses appearing prior to this sync 56 * For now we use a full completion barrier to implement all sync types, until 66 * barrier since 2010 & omit 'rmb' barriers because the CPUs don't perform 104 * don't implicitly provide a memory barrier. In general this is most MIPS [all …]
|
| /kernel/linux/linux-5.10/arch/csky/include/asm/ |
| D | barrier.h | 12 * sync: completion barrier, all sync.xx instructions 19 * bar.brwarw: ordering barrier for all load/store instructions before it 20 * bar.brwarws: ordering barrier for all load/store instructions before it 22 * bar.brar: ordering barrier for all load instructions before it 23 * bar.brars: ordering barrier for all load instructions before it 25 * bar.bwaw: ordering barrier for all store instructions before it 26 * bar.bwaws: ordering barrier for all store instructions before it 45 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-5.10/tools/virtio/ringtest/ |
| D | main.h | 90 /* Compiler barrier - similar to what Linux uses */ 91 #define barrier() asm volatile("" ::: "memory") macro 97 #define cpu_relax() barrier() 110 barrier(); in busy_wait() 125 * adds a compiler barrier. 128 barrier(); \ 134 barrier(); \ 138 #define smp_wmb() barrier() 158 barrier(); \ in __read_once_size() 160 barrier(); \ in __read_once_size() [all …]
|
| /kernel/linux/linux-4.19/tools/virtio/ringtest/ |
| D | main.h | 90 /* Compiler barrier - similar to what Linux uses */ 91 #define barrier() asm volatile("" ::: "memory") macro 97 #define cpu_relax() barrier() 110 barrier(); in busy_wait() 125 * adds a compiler barrier. 128 barrier(); \ 134 barrier(); \ 138 #define smp_wmb() barrier() 158 barrier(); \ in __read_once_size() 160 barrier(); \ in __read_once_size() [all …]
|
| /kernel/linux/linux-4.19/include/asm-generic/ |
| D | barrier.h | 2 * Generic barrier definitions. 34 #define mb() barrier() 94 #define smp_mb() barrier() 98 #define smp_rmb() barrier() 102 #define smp_wmb() barrier() 167 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) 171 #define smp_mb__before_atomic() barrier() 175 #define smp_mb__after_atomic() barrier() 182 barrier(); \ 192 barrier(); \ [all …]
|
| /kernel/linux/linux-5.10/include/asm-generic/ |
| D | barrier.h | 3 * Generic barrier definitions. 31 #define mb() barrier() 79 #define smp_mb() barrier() 83 #define smp_rmb() barrier() 87 #define smp_wmb() barrier() 148 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) 152 #define smp_mb__before_atomic() barrier() 156 #define smp_mb__after_atomic() barrier() 163 barrier(); \ 173 barrier(); \ [all …]
|
| /kernel/linux/linux-4.19/arch/s390/include/asm/ |
| D | barrier.h | 26 #define rmb() barrier() 27 #define wmb() barrier() 37 barrier(); \ 45 barrier(); \ 49 #define __smp_mb__before_atomic() barrier() 50 #define __smp_mb__after_atomic() barrier() 76 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-5.10/arch/s390/include/asm/ |
| D | barrier.h | 26 #define rmb() barrier() 27 #define wmb() barrier() 37 barrier(); \ 45 barrier(); \ 49 #define __smp_mb__before_atomic() barrier() 50 #define __smp_mb__after_atomic() barrier() 76 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-5.10/arch/arc/include/asm/ |
| D | barrier.h | 15 * Explicit barrier provided by DMB instruction 19 * - DMB guarantees SMP as well as local barrier semantics 20 * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. 21 * UP: barrier(), SMP: smp_*mb == *mb) 23 * in the general case. Plus it only provides full barrier. 42 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-5.10/arch/x86/include/asm/ |
| D | barrier.h | 51 /* Prevent speculative execution past this barrier. */ 54 #define dma_rmb() barrier() 55 #define dma_wmb() barrier() 63 #define __smp_wmb() barrier() 69 barrier(); \ 77 barrier(); \ 85 #include <asm-generic/barrier.h> 97 * do not require this barrier. This is only required for the
|
| /kernel/linux/linux-5.10/arch/ia64/include/asm/ |
| D | barrier.h | 3 * Memory barrier definitions. This is based on information published 48 #define __smp_mb__before_atomic() barrier() 49 #define __smp_mb__after_atomic() barrier() 59 barrier(); \ 67 barrier(); \ 72 * The group barrier in front of the rsm & ssm are necessary to ensure 77 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-4.19/arch/ia64/include/asm/ |
| D | barrier.h | 3 * Memory barrier definitions. This is based on information published 48 #define __smp_mb__before_atomic() barrier() 49 #define __smp_mb__after_atomic() barrier() 59 barrier(); \ 67 barrier(); \ 72 * The group barrier in front of the rsm & ssm are necessary to ensure 77 #include <asm-generic/barrier.h>
|
| /kernel/linux/linux-4.19/arch/mips/mm/ |
| D | tlb-r3k.c | 33 #define BARRIER \ macro 52 entry++; /* BARRIER */ in local_flush_tlb_from() 109 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_range() 114 if (idx < 0) /* BARRIER */ in local_flush_tlb_range() 146 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_kernel_range() 151 if (idx < 0) /* BARRIER */ in local_flush_tlb_kernel_range() 179 BARRIER; in local_flush_tlb_page() 184 if (idx < 0) /* BARRIER */ in local_flush_tlb_page() 218 BARRIER; in __update_tlb() 223 if (idx < 0) { /* BARRIER */ in __update_tlb() [all …]
|
| /kernel/linux/linux-5.10/arch/mips/mm/ |
| D | tlb-r3k.c | 32 #define BARRIER \ macro 51 entry++; /* BARRIER */ in local_flush_tlb_from() 96 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_range() 101 if (idx < 0) /* BARRIER */ in local_flush_tlb_range() 133 start += PAGE_SIZE; /* BARRIER */ in local_flush_tlb_kernel_range() 138 if (idx < 0) /* BARRIER */ in local_flush_tlb_kernel_range() 166 BARRIER; in local_flush_tlb_page() 171 if (idx < 0) /* BARRIER */ in local_flush_tlb_page() 205 BARRIER; in __update_tlb() 210 if (idx < 0) { /* BARRIER */ in __update_tlb() [all …]
|