/kernel/bpf/ |
D | stackmap.c | 294 u64 *ips, u32 trace_nr, bool user) in stack_map_get_build_id_offset() argument 327 if (!user || !current || !current->mm || irq_work_busy || in stack_map_get_build_id_offset() 404 bool user = flags & BPF_F_USER_STACK; in __bpf_get_stackid() local 433 ips, trace_nr, user); in __bpf_get_stackid() 472 bool user = flags & BPF_F_USER_STACK; in BPF_CALL_3() local 474 bool kernel = !user; in BPF_CALL_3() 484 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in BPF_CALL_3() 520 bool kernel, user; in BPF_CALL_3() local 533 user = flags & BPF_F_USER_STACK; in BPF_CALL_3() 534 kernel = !user; in BPF_CALL_3() [all …]
|
D | syscall.c | 362 static int bpf_charge_memlock(struct user_struct *user, u32 pages) in bpf_charge_memlock() argument 366 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { in bpf_charge_memlock() 367 atomic_long_sub(pages, &user->locked_vm); in bpf_charge_memlock() 373 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) in bpf_uncharge_memlock() argument 375 if (user) in bpf_uncharge_memlock() 376 atomic_long_sub(pages, &user->locked_vm); in bpf_uncharge_memlock() 382 struct user_struct *user; in bpf_map_charge_init() local 388 user = get_current_user(); in bpf_map_charge_init() 389 ret = bpf_charge_memlock(user, pages); in bpf_map_charge_init() 391 free_uid(user); in bpf_map_charge_init() [all …]
|
/kernel/printk/ |
D | printk.c | 706 struct devkmsg_user *user = file->private_data; in devkmsg_write() local 710 if (!user || len > LOG_LINE_MAX) in devkmsg_write() 719 if (!___ratelimit(&user->rs, current->comm)) in devkmsg_write() 766 struct devkmsg_user *user = file->private_data; in devkmsg_read() local 767 struct printk_record *r = &user->record; in devkmsg_read() 771 if (!user) in devkmsg_read() 774 ret = mutex_lock_interruptible(&user->lock); in devkmsg_read() 779 if (!prb_read_valid(prb, user->seq, r)) { in devkmsg_read() 788 prb_read_valid(prb, user->seq, r)); in devkmsg_read() 794 if (r->info->seq != user->seq) { in devkmsg_read() [all …]
|
/kernel/ |
D | user.c | 123 struct user_struct *user; in uid_hash_find() local 125 hlist_for_each_entry(user, hashent, uidhash_node) { in uid_hash_find() 126 if (uid_eq(user->uid, uid)) { in uid_hash_find() 127 refcount_inc(&user->__count); in uid_hash_find() 128 return user; in uid_hash_find()
|
D | uid16.c | 23 SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) in SYSCALL_DEFINE3() argument 25 return ksys_chown(filename, low2highuid(user), low2highgid(group)); in SYSCALL_DEFINE3() 28 SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) in SYSCALL_DEFINE3() argument 30 return ksys_lchown(filename, low2highuid(user), low2highgid(group)); in SYSCALL_DEFINE3() 33 SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group) in SYSCALL_DEFINE3() argument 35 return ksys_fchown(fd, low2highuid(user), low2highgid(group)); in SYSCALL_DEFINE3()
|
D | cred.c | 62 .user = INIT_USER, 123 free_uid(cred->user); in put_cred_rcu() 274 get_uid(new->user); in prepare_creds() 357 atomic_inc(&p->cred->user->processes); in copy_creds() 390 atomic_inc(&new->user->processes); in copy_creds() 491 if (new->user != old->user) in commit_creds() 492 atomic_inc(&new->user->processes); in commit_creds() 496 if (new->user != old->user) in commit_creds() 497 atomic_dec(&old->user->processes); in commit_creds() 710 get_uid(new->user); in prepare_kernel_cred()
|
D | watch_queue.c | 262 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); in watch_queue_set_size() 307 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); in watch_queue_set_size() 426 atomic_dec(&watch->cred->user->nr_watches); in free_watch() 473 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { in add_one_watch() 474 atomic_dec(&cred->user->nr_watches); in add_one_watch()
|
D | sys.c | 210 struct user_struct *user; in SYSCALL_DEFINE3() local 248 user = cred->user; in SYSCALL_DEFINE3() 252 user = find_user(uid); in SYSCALL_DEFINE3() 253 if (!user) in SYSCALL_DEFINE3() 261 free_uid(user); /* For find_user() */ in SYSCALL_DEFINE3() 280 struct user_struct *user; in SYSCALL_DEFINE2() local 316 user = cred->user; in SYSCALL_DEFINE2() 320 user = find_user(uid); in SYSCALL_DEFINE2() 321 if (!user) in SYSCALL_DEFINE2() 332 free_uid(user); /* for find_user() */ in SYSCALL_DEFINE2() [all …]
|
D | Makefile | 8 sysctl.o capability.o ptrace.o user.o \ 118 obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
|
D | Kconfig.hz | 13 a fast response for user interaction and that may experience bus
|
D | signal.c | 419 struct user_struct *user; in __sigqueue_alloc() local 431 user = __task_cred(t)->user; in __sigqueue_alloc() 432 sigpending = atomic_inc_return(&user->sigpending); in __sigqueue_alloc() 434 get_uid(user); in __sigqueue_alloc() 444 if (atomic_dec_and_test(&user->sigpending)) in __sigqueue_alloc() 445 free_uid(user); in __sigqueue_alloc() 449 q->user = user; in __sigqueue_alloc() 459 if (atomic_dec_and_test(&q->user->sigpending)) in __sigqueue_free() 460 free_uid(q->user); in __sigqueue_free()
|
D | fork.c | 2042 if (atomic_read(&p->real_cred->user->processes) >= in copy_process() 2044 if (p->real_cred->user != INIT_USER && in copy_process() 2455 atomic_dec(&p->cred->user->processes); in copy_process()
|
/kernel/cgroup/ |
D | rstat.c | 407 u64 user = 0; in root_cgroup_cputime() local 412 user += cpustat[CPUTIME_USER]; in root_cgroup_cputime() 413 user += cpustat[CPUTIME_NICE]; in root_cgroup_cputime() 414 cputime->utime += user; in root_cgroup_cputime() 421 cputime->sum_exec_runtime += user; in root_cgroup_cputime()
|
/kernel/bpf/preload/ |
D | Kconfig | 21 tristate "bpf_preload kernel module with user mode driver" 26 This builds bpf_preload kernel module with embedded user mode driver.
|
/kernel/power/ |
D | Kconfig | 27 Skip the kernel sys_sync() before freezing user processes. 30 user-space before invoking suspend. There's a run-time switch 47 called "hibernation" in user interfaces. STD checkpoints the 103 The partition specified here will be different for almost every user. 152 Allow user space to create, activate and deactivate wakeup source 156 int "Maximum number of user space wakeup sources (0 = no limit)" 162 bool "Garbage collector for user space wakeup sources" 192 fields of device objects from user space. If you are not a kernel 267 battery status information, and user-space programs will receive
|
D | Makefile | 14 obj-$(CONFIG_HIBERNATION_SNAPSHOT_DEV) += user.o
|
/kernel/rcu/ |
D | tiny.c | 68 void rcu_sched_clock_irq(int user) in rcu_sched_clock_irq() argument 70 if (user) { in rcu_sched_clock_irq()
|
D | tree.c | 565 static int rcu_pending(int user); 621 static noinstr void rcu_eqs_enter(bool user) in rcu_eqs_enter() argument 638 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); in rcu_eqs_enter() 842 static void noinstr rcu_eqs_exit(bool user) in rcu_eqs_exit() argument 867 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); in rcu_eqs_exit() 2575 void rcu_sched_clock_irq(int user) in rcu_sched_clock_irq() argument 2583 if (!rcu_is_cpu_rrupt_from_idle() && !user) { in rcu_sched_clock_irq() 2589 rcu_flavor_sched_clock_irq(user); in rcu_sched_clock_irq() 2590 if (rcu_pending(user)) in rcu_sched_clock_irq() 3773 static int rcu_pending(int user) in rcu_pending() argument [all …]
|
D | Kconfig | 85 user-mode execution as quiescent states. Not for manual selection. 92 only context switch (including preemption) and user-mode 241 bool "Tasks Trace RCU readers use memory barriers in user and idle"
|
D | tree.h | 411 static void rcu_flavor_sched_clock_irq(int user);
|
D | tree_plugin.h | 682 static void rcu_flavor_sched_clock_irq(int user) in rcu_flavor_sched_clock_irq() argument 687 if (user || rcu_is_cpu_rrupt_from_idle()) { in rcu_flavor_sched_clock_irq() 920 static void rcu_flavor_sched_clock_irq(int user) in rcu_flavor_sched_clock_irq() argument 922 if (user || rcu_is_cpu_rrupt_from_idle()) { in rcu_flavor_sched_clock_irq()
|
D | Kconfig.debug | 114 Say N here if you need ultimate kernel/user switch latencies
|
/kernel/events/ |
D | callchain.c | 180 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, in get_perf_callchain() argument 203 if (user) { in get_perf_callchain()
|
/kernel/trace/ |
D | Kconfig | 384 allowing the user to pick and choose which trace point they 539 This allows the user to add tracing events (similar to tracepoints) 580 This allows the user to add tracing events on top of userspace 585 of perf tools on user space applications. 593 This allows the user to attach BPF programs to kprobe, uprobe, and 656 Synthetic events are user-defined trace events that can be 693 Allow user-space to inject a specific trace event into the ring 749 instead of their values. This can cause problems for user space tools 750 that use this string to parse the raw data as user space does not know 857 tracers by executing a preempt or irq disable section with a user
|
/kernel/time/ |
D | Kconfig | 122 This is implemented at the expense of some overhead in user <-> kernel
|