/kernel/linux/linux-5.10/net/ipv6/ |
D | protocol.c | 30 return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], in inet6_add_protocol() 39 ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], in inet6_del_protocol() 54 return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol], in inet6_add_offload() 63 ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol], in inet6_del_offload()
|
D | ip6_icmp.c | 18 return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ? in inet6_register_icmp_sender() 27 ret = (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, fn, NULL) == fn) ? in inet6_unregister_icmp_sender()
|
/kernel/linux/linux-5.10/net/ipv4/ |
D | protocol.c | 40 return !cmpxchg((const struct net_protocol **)&inet_protos[protocol], in inet_add_protocol() 47 return !cmpxchg((const struct net_offload **)&inet_offloads[protocol], in inet_add_offload() 56 ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol], in inet_del_protocol() 69 ret = (cmpxchg((const struct net_offload **)&inet_offloads[protocol], in inet_del_offload()
|
D | gre_demux.c | 36 return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? in gre_add_protocol() 48 ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? in gre_del_protocol()
|
/kernel/linux/linux-5.10/arch/s390/include/asm/ |
D | cmpxchg.h | 15 #define cmpxchg(ptr, o, n) \ macro 22 #define cmpxchg64 cmpxchg 23 #define cmpxchg_local cmpxchg 24 #define cmpxchg64_local cmpxchg
|
/kernel/linux/linux-5.10/include/asm-generic/ |
D | atomic.h | 42 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ 52 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ 64 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ 194 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
/kernel/linux/linux-5.10/kernel/ |
D | task_work.c | 42 } while (cmpxchg(&task->task_works, head, work) != head); in task_work_add() 90 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match() 147 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
|
/kernel/linux/common_modules/newip/third_party/linux-5.10/net/newip/ |
D | protocol.c | 31 return !cmpxchg((const struct ninet_protocol **)&ninet_protos[protocol], in ninet_add_protocol() 40 ret = (cmpxchg((const struct ninet_protocol **)&ninet_protos[protocol], in ninet_del_protocol()
|
/kernel/linux/linux-5.10/tools/include/asm-generic/ |
D | atomic-gcc.h | 64 #define cmpxchg(ptr, oldval, newval) \ macro 69 return cmpxchg(&(v)->counter, oldval, newval); in atomic_cmpxchg()
|
/kernel/linux/linux-5.10/lib/ |
D | llist.c | 33 } while (cmpxchg(&head->first, first, new_first) != first); in llist_add_batch() 63 entry = cmpxchg(&head->first, old_entry, next); in llist_del_first()
|
D | errseq.c | 94 cur = cmpxchg(eseq, old, new); in errseq_set() 200 cmpxchg(eseq, old, new); in errseq_check_and_advance()
|
/kernel/linux/linux-5.10/arch/arc/include/asm/ |
D | cmpxchg.h | 66 #define cmpxchg(ptr, o, n) ({ \ macro 78 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
D | drm_lock.c | 75 prev = cmpxchg(lock, old, new); in drm_lock_take() 118 prev = cmpxchg(lock, old, new); in drm_lock_transfer() 141 prev = cmpxchg(lock, old, new); in drm_legacy_lock_free() 319 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); in drm_legacy_idlelock_release()
|
/kernel/linux/linux-5.10/arch/mips/kernel/ |
D | cmpxchg.c | 44 load32 = cmpxchg(ptr32, old32, new32); in __xchg_small() 100 load32 = cmpxchg(ptr32, old32, new32); in __cmpxchg_small()
|
/kernel/linux/linux-5.10/arch/alpha/include/asm/ |
D | cmpxchg.h | 56 #define cmpxchg(ptr, o, n) \ macro 71 cmpxchg((ptr), (o), (n)); \
|
/kernel/linux/linux-5.10/arch/sparc/include/asm/ |
D | atomic_64.h | 52 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) in ATOMIC_OPS() 60 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
|
/kernel/linux/linux-5.10/arch/ia64/include/uapi/asm/ |
D | cmpxchg.h | 130 #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) macro 133 #define cmpxchg_local cmpxchg
|
/kernel/linux/linux-5.10/net/rxrpc/ |
D | call_event.c | 341 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 348 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 355 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 364 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call() 371 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); in rxrpc_process_call()
|
/kernel/linux/linux-5.10/drivers/accessibility/speakup/ |
D | selection.c | 71 if (cmpxchg(&speakup_sel_work.tty, NULL, tty)) { in speakup_set_selection() 124 if (cmpxchg(&speakup_paste_work.tty, NULL, tty)) { in speakup_paste_selection()
|
/kernel/linux/linux-5.10/arch/sh/kernel/cpu/sh2/ |
D | smp-j2.c | 28 while (cmpxchg(pmsg, messages, 0) != messages); in j2_ipi_interrupt_handler() 118 while (cmpxchg(pmsg, old, old|(1U<<message)) != old); in j2_send_ipi()
|
/kernel/linux/linux-5.10/arch/riscv/include/asm/ |
D | cmpxchg.h | 340 #define cmpxchg(ptr, o, n) \ macro 354 cmpxchg((ptr), (o), (n)); \ 366 cmpxchg((ptr), (o), (n)); \
|
/kernel/linux/linux-5.10/drivers/dma-buf/ |
D | dma-fence-array.c | 36 cmpxchg(&array->base.error, PENDING_ERROR, error); in dma_fence_array_set_pending_error() 42 cmpxchg(&array->base.error, PENDING_ERROR, 0); in dma_fence_array_clear_pending_error()
|
/kernel/linux/linux-5.10/Documentation/locking/ |
D | rt-mutex.rst | 42 without waiters. The optimized fastpath operations require cmpxchg 66 with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 72 To prevent a cmpxchg of the owner releasing the lock, we need to
|
/kernel/linux/linux-5.10/arch/nios2/include/asm/ |
D | Kbuild | 2 generic-y += cmpxchg.h
|
/kernel/linux/linux-5.10/arch/nds32/include/asm/ |
D | Kbuild | 3 generic-y += cmpxchg.h
|