/arch/arm64/include/asm/ |
D | atomic_lse.h | 30 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ in ATOMIC_OP() argument 57 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ argument 90 #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ argument 121 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ argument 145 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ argument 183 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ in ATOMIC64_OP() argument 210 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ argument 243 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ argument 274 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ argument 298 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ argument [all …]
|
D | atomic_ll_sc.h | 42 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ argument 63 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ argument 138 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ argument 159 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ argument 239 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ argument 297 #define __CMPXCHG_DBL(name, mb, rel, cl) \ argument
|
D | barrier.h | 41 #define mb() dsb(sy) macro
|
D | cmpxchg.h | 21 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \ argument
|
/arch/arc/include/asm/ |
D | barrier.h | 26 #define mb() asm volatile("dmb 3\n" : : : "memory") macro 38 #define mb() asm volatile("sync\n" : : : "memory") macro
|
/arch/x86/um/asm/ |
D | barrier.h | 14 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) macro 20 #define mb() asm volatile("mfence" : : : "memory") macro
|
/arch/parisc/include/asm/ |
D | barrier.h | 16 #define mb() do { synchronize_caches(); } while (0) macro 22 #define mb() barrier() macro
|
/arch/openrisc/include/asm/ |
D | barrier.h | 5 #define mb() asm volatile ("l.msync" ::: "memory") macro
|
/arch/microblaze/include/asm/ |
D | barrier.h | 9 #define mb() __asm__ __volatile__ ("mbar 1" : : : "memory") macro
|
/arch/nds32/include/asm/ |
D | barrier.h | 8 #define mb() asm volatile("msync all":::"memory") macro
|
/arch/xtensa/include/asm/ |
D | barrier.h | 14 #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) macro
|
/arch/x86/include/asm/ |
D | barrier.h | 15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ macro 22 #define mb() asm volatile("mfence":::"memory") macro
|
/arch/alpha/include/asm/ |
D | barrier.h | 5 #define mb() __asm__ __volatile__("mb": : :"memory") macro
|
/arch/mips/include/asm/ |
D | barrier.h | 71 #define mb() wbflush() macro 76 #define mb() fast_mb() macro
|
/arch/sh/include/asm/ |
D | barrier.h | 28 #define mb() __asm__ __volatile__ ("synco": : :"memory") macro
|
/arch/sparc/include/asm/ |
D | barrier_64.h | 37 #define mb() membar_safe("#StoreLoad") macro
|
/arch/ia64/include/asm/ |
D | barrier.h | 39 #define mb() ia64_mf() macro
|
/arch/riscv/include/asm/ |
D | barrier.h | 21 #define mb() RISCV_FENCE(iorw,iorw) macro
|
/arch/arm/include/asm/ |
D | barrier.h | 64 #define mb() __arm_heavy_mb() macro 70 #define mb() barrier() macro
|
/arch/s390/include/asm/ |
D | barrier.h | 24 #define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0) macro
|
/arch/csky/include/asm/ |
D | barrier.h | 73 #define mb() asm volatile ("sync\n":::"memory") macro
|
/arch/powerpc/include/asm/ |
D | barrier.h | 38 #define mb() __asm__ __volatile__ ("sync" : : : "memory") macro
|
/arch/x86/mm/ |
D | numa.c | 506 struct numa_memblk *mb = numa_meminfo.blk + i; in numa_clear_kernel_node_hotplug() local 538 struct numa_memblk *mb = numa_meminfo.blk + i; in numa_clear_kernel_node_hotplug() local 558 struct numa_memblk *mb = &mi->blk[i]; in numa_register_memblks() local
|
/arch/arc/plat-axs10x/ |
D | axs10x.c | 95 char mb[32]; in axs10x_early_init() local
|
/arch/x86/kernel/fpu/ |
D | regset.c | 333 struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) }; in fpregs_get() local
|