/kernel/ |
D | cred.c | 221 struct cred *new; in cred_alloc_blank() local 223 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); in cred_alloc_blank() 224 if (!new) in cred_alloc_blank() 227 atomic_set(&new->usage, 1); in cred_alloc_blank() 229 new->magic = CRED_MAGIC; in cred_alloc_blank() 231 if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) in cred_alloc_blank() 234 return new; in cred_alloc_blank() 237 abort_creds(new); in cred_alloc_blank() 259 struct cred *new; in prepare_creds() local 263 new = kmem_cache_alloc(cred_jar, GFP_KERNEL); in prepare_creds() [all …]
|
D | ucount.c | 167 struct ucounts *ucounts, *new; in alloc_ucounts() local 175 new = kzalloc(sizeof(*new), GFP_KERNEL); in alloc_ucounts() 176 if (!new) in alloc_ucounts() 179 new->ns = ns; in alloc_ucounts() 180 new->uid = uid; in alloc_ucounts() 181 atomic_set(&new->count, 1); in alloc_ucounts() 186 kfree(new); in alloc_ucounts() 188 hlist_add_head(&new->node, hashent); in alloc_ucounts() 189 get_user_ns(new->ns); in alloc_ucounts() 191 return new; in alloc_ucounts() [all …]
|
D | sys.c | 371 struct cred *new; in __sys_setregid() local 383 new = prepare_creds(); in __sys_setregid() 384 if (!new) in __sys_setregid() 393 new->gid = krgid; in __sys_setregid() 402 new->egid = kegid; in __sys_setregid() 409 new->sgid = new->egid; in __sys_setregid() 410 new->fsgid = new->egid; in __sys_setregid() 412 retval = security_task_fix_setgid(new, old, LSM_SETID_RE); in __sys_setregid() 416 return commit_creds(new); in __sys_setregid() 419 abort_creds(new); in __sys_setregid() [all …]
|
D | user.c | 197 struct user_struct *up, *new; in alloc_uid() local 204 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); in alloc_uid() 205 if (!new) in alloc_uid() 208 new->uid = uid; in alloc_uid() 209 refcount_set(&new->__count, 1); in alloc_uid() 210 trace_android_vh_alloc_uid(new); in alloc_uid() 211 if (user_epoll_alloc(new)) { in alloc_uid() 212 kmem_cache_free(uid_cachep, new); in alloc_uid() 215 ratelimit_state_init(&new->ratelimit, HZ, 100); in alloc_uid() 216 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); in alloc_uid() [all …]
|
D | resource.c | 171 static struct resource * __request_resource(struct resource *root, struct resource *new) in __request_resource() argument 173 resource_size_t start = new->start; in __request_resource() 174 resource_size_t end = new->end; in __request_resource() 187 new->sibling = tmp; in __request_resource() 188 *p = new; in __request_resource() 189 new->parent = root; in __request_resource() 265 struct resource *request_resource_conflict(struct resource *root, struct resource *new) in request_resource_conflict() argument 270 conflict = __request_resource(root, new); in request_resource_conflict() 282 int request_resource(struct resource *root, struct resource *new) in request_resource() argument 286 conflict = request_resource_conflict(root, new); in request_resource() [all …]
|
D | groups.c | 118 void set_groups(struct cred *new, struct group_info *group_info) in set_groups() argument 120 put_group_info(new->group_info); in set_groups() 122 new->group_info = group_info; in set_groups() 136 struct cred *new; in set_current_groups() local 140 new = prepare_creds(); in set_current_groups() 141 if (!new) in set_current_groups() 146 set_groups(new, group_info); in set_current_groups() 148 retval = security_task_fix_setgroups(new, old); in set_current_groups() 152 return commit_creds(new); in set_current_groups() 155 abort_creds(new); in set_current_groups()
|
D | umh.c | 70 struct cred *new; in call_usermodehelper_exec_async() local 92 new = prepare_kernel_cred(current); in call_usermodehelper_exec_async() 93 if (!new) in call_usermodehelper_exec_async() 97 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); in call_usermodehelper_exec_async() 98 new->cap_inheritable = cap_intersect(usermodehelper_inheritable, in call_usermodehelper_exec_async() 99 new->cap_inheritable); in call_usermodehelper_exec_async() 103 retval = sub_info->init(sub_info, new); in call_usermodehelper_exec_async() 105 abort_creds(new); in call_usermodehelper_exec_async() 110 commit_creds(new); in call_usermodehelper_exec_async() 361 int (*init)(struct subprocess_info *info, struct cred *new), in call_usermodehelper_setup() argument
|
D | tracepoint.c | 183 struct tracepoint_func *old, *new; in func_add() local 205 new = allocate_probes(nr_probes + 2); in func_add() 206 if (new == NULL) in func_add() 216 new[nr_probes++] = old[iter_probes]; in func_add() 225 new[pos] = *tp_func; in func_add() 226 new[nr_probes].func = NULL; in func_add() 227 *funcs = new; in func_add() 236 struct tracepoint_func *old, *new; in func_remove() local 267 new = allocate_probes(nr_probes - nr_del + 1); in func_remove() 268 if (new) { in func_remove() [all …]
|
D | audit_watch.c | 206 struct audit_watch *new; in audit_dupe_watch() local 212 new = audit_init_watch(path); in audit_dupe_watch() 213 if (IS_ERR(new)) { in audit_dupe_watch() 218 new->dev = old->dev; in audit_dupe_watch() 219 new->ino = old->ino; in audit_dupe_watch() 221 new->parent = old->parent; in audit_dupe_watch() 224 return new; in audit_dupe_watch() 505 int audit_dupe_exe(struct audit_krule *new, struct audit_krule *old) in audit_dupe_exe() argument 514 audit_mark = audit_alloc_mark(new, pathname, strlen(pathname)); in audit_dupe_exe() 519 new->exe = audit_mark; in audit_dupe_exe()
|
D | capability.c | 227 struct cred *new; in SYSCALL_DEFINE2() local 265 new = prepare_creds(); in SYSCALL_DEFINE2() 266 if (!new) in SYSCALL_DEFINE2() 269 ret = security_capset(new, current_cred(), in SYSCALL_DEFINE2() 274 audit_log_capset(new, current_cred()); in SYSCALL_DEFINE2() 276 return commit_creds(new); in SYSCALL_DEFINE2() 279 abort_creds(new); in SYSCALL_DEFINE2()
|
D | audit_tree.c | 293 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old) in replace_chunk() argument 298 new->key = old->key; in replace_chunk() 299 list_splice_init(&old->trees, &new->trees); in replace_chunk() 300 list_for_each_entry(owner, &new->trees, same_root) in replace_chunk() 301 owner->root = new; in replace_chunk() 308 new->owners[i].owner = owner; in replace_chunk() 309 new->owners[i].index = old->owners[j].index - j + i; in replace_chunk() 313 list_replace_init(&old->owners[j].list, &new->owners[i].list); in replace_chunk() 315 replace_mark_chunk(old->mark, new); in replace_chunk() 322 list_replace_rcu(&old->hash, &new->hash); in replace_chunk() [all …]
|
D | auditfilter.c | 823 struct audit_krule *new; in audit_dupe_rule() local 831 new = &entry->rule; in audit_dupe_rule() 832 new->flags = old->flags; in audit_dupe_rule() 833 new->pflags = old->pflags; in audit_dupe_rule() 834 new->listnr = old->listnr; in audit_dupe_rule() 835 new->action = old->action; in audit_dupe_rule() 837 new->mask[i] = old->mask[i]; in audit_dupe_rule() 838 new->prio = old->prio; in audit_dupe_rule() 839 new->buflen = old->buflen; in audit_dupe_rule() 840 new->inode_f = old->inode_f; in audit_dupe_rule() [all …]
|
D | smp.c | 199 union cfd_seq_cnt new, old; in cfd_seq_inc() local 201 new = CFD_SEQ(src, dst, type, 0); in cfd_seq_inc() 205 new.u.cnt = old.u.cnt + 1; in cfd_seq_inc() 206 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val); in cfd_seq_inc() 253 union cfd_seq_cnt new[2]; in cfd_seq_data_add() local 256 new[0].val = val; in cfd_seq_data_add() 257 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1); in cfd_seq_data_add() 260 if (new[i].u.cnt <= now) in cfd_seq_data_add() 261 new[i].u.cnt |= 0x80000000U; in cfd_seq_data_add() 263 if (new[i].u.cnt == data[j].u.cnt) { in cfd_seq_data_add() [all …]
|
D | pid_namespace.c | 396 struct pid_namespace *ancestor, *new = to_pid_ns(ns); in pidns_install() local 398 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || in pidns_install() 410 if (new->level < active->level) in pidns_install() 413 ancestor = new; in pidns_install() 420 nsproxy->pid_ns_for_children = get_pid_ns(new); in pidns_install()
|
/kernel/irq/ |
D | manage.c | 1359 static int irq_setup_forced_threading(struct irqaction *new) in irq_setup_forced_threading() argument 1363 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) in irq_setup_forced_threading() 1370 if (new->handler == irq_default_primary_handler) in irq_setup_forced_threading() 1373 new->flags |= IRQF_ONESHOT; in irq_setup_forced_threading() 1380 if (new->handler && new->thread_fn) { in irq_setup_forced_threading() 1382 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); in irq_setup_forced_threading() 1383 if (!new->secondary) in irq_setup_forced_threading() 1385 new->secondary->handler = irq_forced_secondary_handler; in irq_setup_forced_threading() 1386 new->secondary->thread_fn = new->thread_fn; in irq_setup_forced_threading() 1387 new->secondary->dev_id = new->dev_id; in irq_setup_forced_threading() [all …]
|
/kernel/bpf/ |
D | local_storage.c | 106 struct rb_node **new = &(root->rb_node), *parent = NULL; in cgroup_storage_insert() local 108 while (*new) { in cgroup_storage_insert() 111 this = container_of(*new, struct bpf_cgroup_storage, node); in cgroup_storage_insert() 113 parent = *new; in cgroup_storage_insert() 116 new = &((*new)->rb_left); in cgroup_storage_insert() 119 new = &((*new)->rb_right); in cgroup_storage_insert() 126 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert() 148 struct bpf_storage_buffer *new; in cgroup_storage_update_elem() local 167 new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), in cgroup_storage_update_elem() 170 if (!new) in cgroup_storage_update_elem() [all …]
|
D | dispatcher.c | 108 void *new, *tmp; in bpf_dispatcher_update() local 114 new = d->num_progs ? d->image + noff : NULL; in bpf_dispatcher_update() 116 if (new) { in bpf_dispatcher_update() 120 if (bpf_dispatcher_prepare(d, new, tmp)) in bpf_dispatcher_update() 122 if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2))) in bpf_dispatcher_update() 126 __BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func); in bpf_dispatcher_update() 128 if (new) in bpf_dispatcher_update()
|
/kernel/futex/ |
D | requeue.c | 95 int old, new; in futex_requeue_pi_prepare() local 118 new = Q_REQUEUE_PI_IN_PROGRESS; in futex_requeue_pi_prepare() 119 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); in futex_requeue_pi_prepare() 127 int old, new; in futex_requeue_pi_complete() local 138 new = Q_REQUEUE_PI_DONE + locked; in futex_requeue_pi_complete() 141 new = Q_REQUEUE_PI_NONE; in futex_requeue_pi_complete() 145 new = Q_REQUEUE_PI_IGNORE; in futex_requeue_pi_complete() 147 } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new)); in futex_requeue_pi_complete() 158 int old, new; in futex_requeue_pi_wakeup_sync() local 170 new = Q_REQUEUE_PI_WAIT; in futex_requeue_pi_wakeup_sync() [all …]
|
/kernel/trace/ |
D | trace_stat.c | 77 struct rb_node **new = &(root->rb_node), *parent = NULL; in insert_stat() local 89 while (*new) { in insert_stat() 93 this = container_of(*new, struct stat_node, node); in insert_stat() 96 parent = *new; in insert_stat() 98 new = &((*new)->rb_left); in insert_stat() 100 new = &((*new)->rb_right); in insert_stat() 103 rb_link_node(&data->node, parent, new); in insert_stat()
|
/kernel/rcu/ |
D | rcu.h | 136 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) in rcu_seq_completed_gp() argument 138 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); in rcu_seq_completed_gp() 144 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) in rcu_seq_new_gp() argument 147 new); in rcu_seq_new_gp() 154 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) in rcu_seq_diff() argument 158 if (old == new) in rcu_seq_diff() 164 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - in rcu_seq_diff() 166 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); in rcu_seq_diff()
|
/kernel/kcsan/ |
D | report.c | 391 u64 old, u64 new, u64 mask) in print_report() argument 474 u64 diff = old ^ new; in print_report() 481 hex_len, old, hex_len, new); in print_report() 668 int watchpoint_idx, u64 old, u64 new, u64 mask) in kcsan_report_known_origin() argument 692 print_report(value_change, &ai, other_info, old, new, mask); in kcsan_report_known_origin() 701 unsigned long ip, u64 old, u64 new, u64 mask) in kcsan_report_unknown_origin() argument 710 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask); in kcsan_report_unknown_origin()
|
D | permissive.h | 38 kcsan_ignore_data_race(size_t size, int type, u64 old, u64 new, u64 diff) in kcsan_ignore_data_race() argument 87 if (!((!old || !new) && diff == 1)) in kcsan_ignore_data_race()
|
D | kcsan.h | 133 int watchpoint_idx, u64 old, u64 new, u64 mask); 140 unsigned long ip, u64 old, u64 new, u64 mask);
|
/kernel/cgroup/ |
D | pids.c | 143 int64_t new = atomic64_add_return(num, &p->counter); in pids_charge() local 145 pids_update_watermark(p, new); in pids_charge() 163 int64_t new = atomic64_add_return(num, &p->counter); in pids_try_charge() local 171 if (new > limit) in pids_try_charge() 178 pids_update_watermark(p, new); in pids_try_charge()
|
/kernel/locking/ |
D | rwsem.c | 616 long count, new; in rwsem_try_write_lock() local 634 new = count; in rwsem_try_write_lock() 646 new |= RWSEM_FLAG_HANDOFF; in rwsem_try_write_lock() 648 new |= RWSEM_WRITER_LOCKED; in rwsem_try_write_lock() 649 new &= ~RWSEM_FLAG_HANDOFF; in rwsem_try_write_lock() 652 new &= ~RWSEM_FLAG_WAITERS; in rwsem_try_write_lock() 654 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock() 661 if (new & RWSEM_FLAG_HANDOFF) { in rwsem_try_write_lock() 760 struct task_struct *new, *owner; in rwsem_spin_on_owner() local 783 new = rwsem_owner_flags(sem, &new_flags); in rwsem_spin_on_owner() [all …]
|