/kernel/ |
D | cred.c | 218 struct cred *new; in cred_alloc_blank() local 220 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); in cred_alloc_blank() 221 if (!new) in cred_alloc_blank() 224 atomic_set(&new->usage, 1); in cred_alloc_blank() 226 new->magic = CRED_MAGIC; in cred_alloc_blank() 229 if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) in cred_alloc_blank() 232 return new; in cred_alloc_blank() 235 abort_creds(new); in cred_alloc_blank() 257 struct cred *new; in prepare_creds() local 261 new = kmem_cache_alloc(cred_jar, GFP_KERNEL); in prepare_creds() [all …]
|
D | resource.c | 185 static struct resource * __request_resource(struct resource *root, struct resource *new) in __request_resource() argument 187 resource_size_t start = new->start; in __request_resource() 188 resource_size_t end = new->end; in __request_resource() 201 new->sibling = tmp; in __request_resource() 202 *p = new; in __request_resource() 203 new->parent = root; in __request_resource() 279 struct resource *request_resource_conflict(struct resource *root, struct resource *new) in request_resource_conflict() argument 284 conflict = __request_resource(root, new); in request_resource_conflict() 296 int request_resource(struct resource *root, struct resource *new) in request_resource() argument 300 conflict = request_resource_conflict(root, new); in request_resource() [all …]
|
D | sys.c | 365 struct cred *new; in __sys_setregid() local 377 new = prepare_creds(); in __sys_setregid() 378 if (!new) in __sys_setregid() 387 new->gid = krgid; in __sys_setregid() 396 new->egid = kegid; in __sys_setregid() 403 new->sgid = new->egid; in __sys_setregid() 404 new->fsgid = new->egid; in __sys_setregid() 406 retval = security_task_fix_setgid(new, old, LSM_SETID_RE); in __sys_setregid() 410 return commit_creds(new); in __sys_setregid() 413 abort_creds(new); in __sys_setregid() [all …]
|
D | user.c | 180 struct user_struct *up, *new; in alloc_uid() local 187 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); in alloc_uid() 188 if (!new) in alloc_uid() 191 new->uid = uid; in alloc_uid() 192 refcount_set(&new->__count, 1); in alloc_uid() 193 ratelimit_state_init(&new->ratelimit, HZ, 100); in alloc_uid() 194 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); in alloc_uid() 203 kmem_cache_free(uid_cachep, new); in alloc_uid() 205 uid_hash_insert(new, hashent); in alloc_uid() 206 up = new; in alloc_uid()
|
D | ucount.c | 131 struct ucounts *ucounts, *new; in get_ucounts() local 138 new = kzalloc(sizeof(*new), GFP_KERNEL); in get_ucounts() 139 if (!new) in get_ucounts() 142 new->ns = ns; in get_ucounts() 143 new->uid = uid; in get_ucounts() 144 new->count = 0; in get_ucounts() 149 kfree(new); in get_ucounts() 151 hlist_add_head(&new->node, hashent); in get_ucounts() 152 ucounts = new; in get_ucounts()
|
D | umh.c | 68 struct cred *new; in call_usermodehelper_exec_async() local 90 new = prepare_kernel_cred(current); in call_usermodehelper_exec_async() 91 if (!new) in call_usermodehelper_exec_async() 95 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); in call_usermodehelper_exec_async() 96 new->cap_inheritable = cap_intersect(usermodehelper_inheritable, in call_usermodehelper_exec_async() 97 new->cap_inheritable); in call_usermodehelper_exec_async() 101 retval = sub_info->init(sub_info, new); in call_usermodehelper_exec_async() 103 abort_creds(new); in call_usermodehelper_exec_async() 108 commit_creds(new); in call_usermodehelper_exec_async() 358 int (*init)(struct subprocess_info *info, struct cred *new), in call_usermodehelper_setup() argument
|
D | groups.c | 123 void set_groups(struct cred *new, struct group_info *group_info) in set_groups() argument 125 put_group_info(new->group_info); in set_groups() 127 new->group_info = group_info; in set_groups() 141 struct cred *new; in set_current_groups() local 143 new = prepare_creds(); in set_current_groups() 144 if (!new) in set_current_groups() 147 set_groups(new, group_info); in set_current_groups() 148 return commit_creds(new); in set_current_groups()
|
D | tracepoint.c | 183 struct tracepoint_func *old, *new; in func_add() local 207 new = allocate_probes(nr_probes + 2 - stub_funcs); in func_add() 208 if (new == NULL) in func_add() 221 new[probes++] = old[nr_probes]; in func_add() 231 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); in func_add() 234 memcpy(new, old, pos * sizeof(struct tracepoint_func)); in func_add() 236 memcpy(new + pos + 1, old + pos, in func_add() 241 new[pos] = *tp_func; in func_add() 242 new[nr_probes + 1].func = NULL; in func_add() 243 *funcs = new; in func_add() [all …]
|
D | capability.c | 227 struct cred *new; in SYSCALL_DEFINE2() local 265 new = prepare_creds(); in SYSCALL_DEFINE2() 266 if (!new) in SYSCALL_DEFINE2() 269 ret = security_capset(new, current_cred(), in SYSCALL_DEFINE2() 274 audit_log_capset(new, current_cred()); in SYSCALL_DEFINE2() 276 return commit_creds(new); in SYSCALL_DEFINE2() 279 abort_creds(new); in SYSCALL_DEFINE2()
|
D | audit_watch.c | 205 struct audit_watch *new; in audit_dupe_watch() local 211 new = audit_init_watch(path); in audit_dupe_watch() 212 if (IS_ERR(new)) { in audit_dupe_watch() 217 new->dev = old->dev; in audit_dupe_watch() 218 new->ino = old->ino; in audit_dupe_watch() 220 new->parent = old->parent; in audit_dupe_watch() 223 return new; in audit_dupe_watch() 505 int audit_dupe_exe(struct audit_krule *new, struct audit_krule *old) in audit_dupe_exe() argument 514 audit_mark = audit_alloc_mark(new, pathname, strlen(pathname)); in audit_dupe_exe() 519 new->exe = audit_mark; in audit_dupe_exe()
|
D | audit_tree.c | 293 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old) in replace_chunk() argument 298 new->key = old->key; in replace_chunk() 299 list_splice_init(&old->trees, &new->trees); in replace_chunk() 300 list_for_each_entry(owner, &new->trees, same_root) in replace_chunk() 301 owner->root = new; in replace_chunk() 308 new->owners[i].owner = owner; in replace_chunk() 309 new->owners[i].index = old->owners[j].index - j + i; in replace_chunk() 313 list_replace_init(&old->owners[j].list, &new->owners[i].list); in replace_chunk() 315 replace_mark_chunk(old->mark, new); in replace_chunk() 322 list_replace_rcu(&old->hash, &new->hash); in replace_chunk() [all …]
|
D | auditfilter.c | 815 struct audit_krule *new; in audit_dupe_rule() local 823 new = &entry->rule; in audit_dupe_rule() 824 new->flags = old->flags; in audit_dupe_rule() 825 new->pflags = old->pflags; in audit_dupe_rule() 826 new->listnr = old->listnr; in audit_dupe_rule() 827 new->action = old->action; in audit_dupe_rule() 829 new->mask[i] = old->mask[i]; in audit_dupe_rule() 830 new->prio = old->prio; in audit_dupe_rule() 831 new->buflen = old->buflen; in audit_dupe_rule() 832 new->inode_f = old->inode_f; in audit_dupe_rule() [all …]
|
D | pid_namespace.c | 403 struct pid_namespace *ancestor, *new = to_pid_ns(ns); in pidns_install() local 405 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || in pidns_install() 417 if (new->level < active->level) in pidns_install() 420 ancestor = new; in pidns_install() 427 nsproxy->pid_ns_for_children = get_pid_ns(new); in pidns_install()
|
D | pid.c | 338 struct pid *new) in __change_pid() argument 347 *pid_ptr = new; in __change_pid() 388 void transfer_pid(struct task_struct *old, struct task_struct *new, in transfer_pid() argument 392 new->thread_pid = old->thread_pid; in transfer_pid() 393 hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); in transfer_pid()
|
D | audit.c | 386 static int audit_log_config_change(char *function_name, u32 new, u32 old, in audit_log_config_change() argument 395 audit_log_format(ab, "op=set %s=%u old=%u ", function_name, new, old); in audit_log_config_change() 405 static int audit_do_config_change(char *function_name, u32 *to_change, u32 new) in audit_do_config_change() argument 417 rc = audit_log_config_change(function_name, new, old, allow_changes); in audit_do_config_change() 424 *to_change = new; in audit_do_config_change() 1443 char *old, *new; in audit_receive_msg() local 1456 new = audit_unpack_string(&bufp, &msglen, sizes[1]); in audit_receive_msg() 1457 if (IS_ERR(new)) { in audit_receive_msg() 1458 err = PTR_ERR(new); in audit_receive_msg() 1463 err = audit_tag_tree(old, new); in audit_receive_msg() [all …]
|
/kernel/irq/ |
D | manage.c | 1265 static int irq_setup_forced_threading(struct irqaction *new) in irq_setup_forced_threading() argument 1269 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) in irq_setup_forced_threading() 1276 if (new->handler == irq_default_primary_handler) in irq_setup_forced_threading() 1279 new->flags |= IRQF_ONESHOT; in irq_setup_forced_threading() 1286 if (new->handler && new->thread_fn) { in irq_setup_forced_threading() 1288 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); in irq_setup_forced_threading() 1289 if (!new->secondary) in irq_setup_forced_threading() 1291 new->secondary->handler = irq_forced_secondary_handler; in irq_setup_forced_threading() 1292 new->secondary->thread_fn = new->thread_fn; in irq_setup_forced_threading() 1293 new->secondary->dev_id = new->dev_id; in irq_setup_forced_threading() [all …]
|
/kernel/bpf/ |
D | local_storage.c | 107 struct rb_node **new = &(root->rb_node), *parent = NULL; in cgroup_storage_insert() local 109 while (*new) { in cgroup_storage_insert() 112 this = container_of(*new, struct bpf_cgroup_storage, node); in cgroup_storage_insert() 114 parent = *new; in cgroup_storage_insert() 117 new = &((*new)->rb_left); in cgroup_storage_insert() 120 new = &((*new)->rb_right); in cgroup_storage_insert() 127 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert() 149 struct bpf_storage_buffer *new; in cgroup_storage_update_elem() local 168 new = kmalloc_node(sizeof(struct bpf_storage_buffer) + in cgroup_storage_update_elem() 172 if (!new) in cgroup_storage_update_elem() [all …]
|
D | dispatcher.c | 107 void *old, *new; in bpf_dispatcher_update() local 119 new = d->num_progs ? d->image + noff : NULL; in bpf_dispatcher_update() 120 if (new) { in bpf_dispatcher_update() 121 if (bpf_dispatcher_prepare(d, new)) in bpf_dispatcher_update() 125 err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new); in bpf_dispatcher_update() 126 if (err || !new) in bpf_dispatcher_update()
|
/kernel/trace/ |
D | trace_stat.c | 77 struct rb_node **new = &(root->rb_node), *parent = NULL; in insert_stat() local 89 while (*new) { in insert_stat() 93 this = container_of(*new, struct stat_node, node); in insert_stat() 96 parent = *new; in insert_stat() 98 new = &((*new)->rb_left); in insert_stat() 100 new = &((*new)->rb_right); in insert_stat() 103 rb_link_node(&data->node, parent, new); in insert_stat()
|
/kernel/rcu/ |
D | rcu.h | 123 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) in rcu_seq_completed_gp() argument 125 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); in rcu_seq_completed_gp() 131 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) in rcu_seq_new_gp() argument 134 new); in rcu_seq_new_gp() 141 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) in rcu_seq_diff() argument 145 if (old == new) in rcu_seq_diff() 151 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - in rcu_seq_diff() 153 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); in rcu_seq_diff()
|
/kernel/locking/ |
D | rwsem.c | 563 long count, new; in rwsem_try_write_lock() local 574 new = count; in rwsem_try_write_lock() 580 new |= RWSEM_FLAG_HANDOFF; in rwsem_try_write_lock() 582 new |= RWSEM_WRITER_LOCKED; in rwsem_try_write_lock() 583 new &= ~RWSEM_FLAG_HANDOFF; in rwsem_try_write_lock() 586 new &= ~RWSEM_FLAG_WAITERS; in rwsem_try_write_lock() 588 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock() 594 if (new & RWSEM_FLAG_HANDOFF) in rwsem_try_write_lock() 716 struct task_struct *new, *owner; in rwsem_spin_on_owner() local 738 new = rwsem_owner_flags(sem, &new_flags); in rwsem_spin_on_owner() [all …]
|
D | qspinlock.c | 222 u32 old, new, val = atomic_read(&lock->val); in xchg_tail() local 225 new = (val & _Q_LOCKED_PENDING_MASK) | tail; in xchg_tail() 231 old = atomic_cmpxchg_relaxed(&lock->val, val, new); in xchg_tail()
|
D | qspinlock_paravirt.h | 136 int old, new; in trylock_clear_pending() local 145 new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; in trylock_clear_pending() 146 val = atomic_cmpxchg_acquire(&lock->val, old, new); in trylock_clear_pending()
|
/kernel/cgroup/ |
D | rdma.c | 265 s64 new; in rdmacg_try_charge() local 284 new = rpool->resources[index].usage + 1; in rdmacg_try_charge() 285 if (new > rpool->resources[index].max) { in rdmacg_try_charge() 289 rpool->resources[index].usage = new; in rdmacg_try_charge()
|
/kernel/power/ |
D | swap.c | 132 struct rb_node **new = &(swsusp_extents.rb_node); in swsusp_extents_insert() local 137 while (*new) { in swsusp_extents_insert() 138 ext = rb_entry(*new, struct swsusp_extent, node); in swsusp_extents_insert() 139 parent = *new; in swsusp_extents_insert() 146 new = &((*new)->rb_left); in swsusp_extents_insert() 153 new = &((*new)->rb_right); in swsusp_extents_insert() 166 rb_link_node(&ext->node, parent, new); in swsusp_extents_insert()
|