/kernel/locking/ |
D | rtmutex.c | 94 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_owner_encode() argument 96 unsigned long val = (unsigned long)owner; in rt_mutex_owner_encode() 105 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument 111 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner() 117 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner() 122 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters() 123 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters() 129 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters() local 192 owner = READ_ONCE(*p); in fixup_rt_mutex_waiters() 193 if (owner & RT_MUTEX_HAS_WAITERS) { in fixup_rt_mutex_waiters() [all …]
|
D | mutex.c | 51 atomic_long_set(&lock->owner, 0); in __mutex_init() 85 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); in __mutex_owner() 88 static inline struct task_struct *__owner_task(unsigned long owner) in __owner_task() argument 90 return (struct task_struct *)(owner & ~MUTEX_FLAGS); in __owner_task() 99 static inline unsigned long __owner_flags(unsigned long owner) in __owner_flags() argument 101 return owner & MUTEX_FLAGS; in __owner_flags() 109 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_common() local 111 owner = atomic_long_read(&lock->owner); in __mutex_trylock_common() 113 unsigned long flags = __owner_flags(owner); in __mutex_trylock_common() 114 unsigned long task = owner & ~MUTEX_FLAGS; in __mutex_trylock_common() [all …]
|
D | spinlock_debug.c | 28 lock->owner = SPINLOCK_OWNER_INIT; in __raw_spin_lock_init() 47 lock->owner = SPINLOCK_OWNER_INIT; in __rwlock_init() 56 struct task_struct *owner = READ_ONCE(lock->owner); in spin_dump() local 58 if (owner == SPINLOCK_OWNER_INIT) in spin_dump() 59 owner = NULL; in spin_dump() 66 owner ? owner->comm : "<none>", in spin_dump() 67 owner ? task_pid_nr(owner) : -1, in spin_dump() 86 SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion"); in debug_spin_lock_before() 94 WRITE_ONCE(lock->owner, current); in debug_spin_lock_after() 101 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); in debug_spin_unlock() [all …]
|
D | rwsem.c | 75 atomic_long_read(&(sem)->owner), (long)current, \ 145 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner() 151 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner() 159 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags() 173 struct task_struct *owner) in __rwsem_set_reader_owned() argument 175 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED | in __rwsem_set_reader_owned() 176 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned() 178 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned() 212 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned() 215 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned() [all …]
|
D | rtmutex_common.h | 138 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); in rt_mutex_owner() local 140 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); in rt_mutex_owner() 162 lock->owner = NULL; in __rt_mutex_base_init()
|
D | rwbase_rt.c | 153 struct task_struct *owner; in __rwbase_read_unlock() local 163 owner = rt_mutex_owner(rtm); in __rwbase_read_unlock() 164 if (owner) in __rwbase_read_unlock() 165 rt_mutex_wake_q_add_task(&wqh, owner, state); in __rwbase_read_unlock()
|
D | ww_mutex.h | 70 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS; in __ww_mutex_has_waiters() 304 struct task_struct *owner = __ww_mutex_owner(lock); in __ww_mutex_wound() local 321 if (!owner) in __ww_mutex_wound() 333 if (owner != current) in __ww_mutex_wound() 334 wake_up_process(owner); in __ww_mutex_wound()
|
/kernel/bpf/ |
D | bpf_local_storage.c | 27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size) in mem_charge() argument 34 return map->ops->map_local_storage_charge(smap, owner, size); in mem_charge() 37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner, in mem_uncharge() argument 43 map->ops->map_local_storage_uncharge(smap, owner, size); in mem_uncharge() 47 owner_storage(struct bpf_local_storage_map *smap, void *owner) in owner_storage() argument 51 return map->ops->map_owner_storage_ptr(owner); in owner_storage() 75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, in bpf_selem_alloc() argument 80 if (charge_mem && mem_charge(smap, owner, smap->elem_size)) in bpf_selem_alloc() 92 mem_uncharge(smap, owner, smap->elem_size); in bpf_selem_alloc() 123 void *owner; in bpf_selem_unlink_storage_nolock() local [all …]
|
D | bpf_inode_storage.c | 25 inode_storage_ptr(void *owner) in inode_storage_ptr() argument 27 struct inode *inode = owner; in inode_storage_ptr()
|
D | bpf_task_storage.c | 49 static struct bpf_local_storage __rcu **task_storage_ptr(void *owner) in task_storage_ptr() argument 51 struct task_struct *task = owner; in task_storage_ptr()
|
D | inode.c | 694 if (!try_module_get(bpf_preload_ops->owner)) { in bpf_preload_mod_get() 705 module_put(bpf_preload_ops->owner); in bpf_preload_mod_put() 799 .owner = THIS_MODULE,
|
/kernel/ |
D | audit_tree.c | 35 struct audit_tree *owner; member 133 if (chunk->owners[i].owner) in free_chunk() 134 put_tree(chunk->owners[i].owner); in free_chunk() 265 if (chunk->owners[n].owner == tree) in audit_tree_match() 295 struct audit_tree *owner; in replace_chunk() local 300 list_for_each_entry(owner, &new->trees, same_root) in replace_chunk() 301 owner->root = new; in replace_chunk() 303 if (!old->owners[j].owner) { in replace_chunk() 307 owner = old->owners[j].owner; in replace_chunk() 308 new->owners[i].owner = owner; in replace_chunk() [all …]
|
D | user_namespace.c | 85 kuid_t owner = new->euid; in create_user_ns() local 94 ucounts = inc_user_namespaces(parent_ns, owner); in create_user_ns() 113 if (!kuid_has_mapping(parent_ns, owner) || in create_user_ns() 136 ns->owner = owner; in create_user_ns() 1173 uid_eq(ns->owner, cred->euid)) { in new_idmap_permitted() 1373 struct user_namespace *owner, *p; in ns_get_owner() local 1376 owner = p = ns->ops->owner(ns); in ns_get_owner() 1385 return &get_user_ns(owner)->ns; in ns_get_owner() 1399 .owner = userns_owner,
|
D | gen_kheaders.sh | 88 --owner=0 --group=0 --numeric-owner --no-recursion \
|
D | pid_namespace.c | 453 .owner = pidns_owner, 464 .owner = pidns_owner,
|
/kernel/futex/ |
D | pi.c | 26 pi_state->owner = NULL; in refill_pi_state_cache() 48 struct task_struct *old_owner = pi_state->owner; in pi_state_update_owner() 63 pi_state->owner = new_owner; in pi_state_update_owner() 89 if (pi_state->owner) { in put_pi_state() 106 pi_state->owner = NULL; in put_pi_state() 257 if (!pi_state->owner) { in attach_to_pi_state() 285 if (!pi_state->owner) in attach_to_pi_state() 294 if (pid != task_pid_vnr(pi_state->owner)) in attach_to_pi_state() 402 pi_state->owner = p; in __attach_to_pi_owner() 691 oldowner = pi_state->owner; in __fixup_pi_state_owner() [all …]
|
D | core.c | 655 pid_t owner; in handle_futex_death() local 702 owner = uval & FUTEX_TID_MASK; in handle_futex_death() 704 if (pending_op && !pi && !owner) { in handle_futex_death() 709 if (owner != task_pid_vnr(curr)) in handle_futex_death() 1009 WARN_ON(pi_state->owner != curr); in exit_pi_state_list() 1012 pi_state->owner = NULL; in exit_pi_state_list()
|
/kernel/irq/ |
D | irqdesc.c | 104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument 124 desc->owner = owner; in desc_set_defaults() 392 struct module *owner) in alloc_desc() argument 413 desc_set_defaults(irq, desc, node, affinity, owner); in alloc_desc() 472 struct module *owner) in alloc_descs() argument 499 desc = alloc_desc(start + i, node, flags, mask, owner); in alloc_descs() 604 struct module *owner) in alloc_descs() argument 611 desc->owner = owner; in alloc_descs() 789 struct module *owner, const struct irq_affinity_desc *affinity) in __irq_alloc_descs() argument 822 ret = alloc_descs(start, cnt, node, affinity, owner); in __irq_alloc_descs()
|
D | devres.c | 179 unsigned int cnt, int node, struct module *owner, in __devm_irq_alloc_descs() argument 189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs()
|
/kernel/module/ |
D | main.c | 276 struct module *owner, in find_exported_symbol_in_section() argument 289 fsa->owner = owner; in find_exported_symbol_in_section() 787 module_put(fsa.owner); in __symbol_put() 1042 static bool inherit_taint(struct module *mod, struct module *owner, const char *name) in inherit_taint() argument 1044 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) in inherit_taint() 1049 mod->name, name, owner->name); in inherit_taint() 1055 mod->name, name, owner->name); in inherit_taint() 1087 if (!inherit_taint(mod, fsa.owner, name)) { in resolve_symbol() 1113 fsa.owner && fsa.owner->sig_ok) { in resolve_symbol() 1118 err = ref_module(mod, fsa.owner); in resolve_symbol() [all …]
|
/kernel/time/ |
D | posix-clock.c | 150 .owner = THIS_MODULE, 175 clk->cdev.owner = clk->ops.owner; in posix_clock_register()
|
D | clockevents.c | 369 if (!try_module_get(dev->owner)) in clockevents_replace() 373 module_put(newdev->owner); in clockevents_replace() 576 module_put(old->owner); in clockevents_exchange_device()
|
D | namespace.c | 448 .owner = timens_owner, 458 .owner = timens_owner,
|
/kernel/bpf/preload/ |
D | bpf_preload.h | 12 struct module *owner; member
|
D | bpf_preload_kern.c | 31 .owner = THIS_MODULE,
|