/kernel/trace/ |
D | trace_events_user.c | 146 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, 275 void user_event_register_set(struct user_event *user) in user_event_register_set() argument 277 int i = user->index; in user_event_register_set() 279 user->group->register_page_data[MAP_STATUS_BYTE(i)] |= MAP_STATUS_MASK(i); in user_event_register_set() 283 void user_event_register_clear(struct user_event *user) in user_event_register_clear() argument 285 int i = user->index; in user_event_register_clear() 287 user->group->register_page_data[MAP_STATUS_BYTE(i)] &= ~MAP_STATUS_MASK(i); in user_event_register_clear() 291 bool user_event_last_ref(struct user_event *user) in user_event_last_ref() argument 293 return refcount_read(&user->refcnt) == 1; in user_event_last_ref() 312 struct user_event *user = (struct user_event *)call->data; in user_event_get_fields() local [all …]
|
D | Kconfig | 493 allowing the user to pick and choose which trace point they 648 This allows the user to add tracing events (similar to tracepoints) 689 This allows the user to add tracing events on top of userspace 694 of perf tools on user space applications. 702 This allows the user to attach BPF programs to kprobe, uprobe, and 766 Synthetic events are user-defined trace events that can be 783 User trace events are user-defined trace events that 818 Allow user-space to inject a specific trace event into the ring 874 instead of their values. This can cause problems for user space tools 875 that use this string to parse the raw data as user space does not know [all …]
|
/kernel/bpf/ |
D | stackmap.c | 128 u64 *ips, u32 trace_nr, bool user) in stack_map_get_build_id_offset() argument 140 if (!user || !current || !current->mm || irq_work_busy || in stack_map_get_build_id_offset() 220 bool user = flags & BPF_F_USER_STACK; in __bpf_get_stackid() local 249 ips, trace_nr, user); in __bpf_get_stackid() 288 bool user = flags & BPF_F_USER_STACK; in BPF_CALL_3() local 290 bool kernel = !user; in BPF_CALL_3() 300 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in BPF_CALL_3() 336 bool kernel, user; in BPF_CALL_3() local 349 user = flags & BPF_F_USER_STACK; in BPF_CALL_3() 350 kernel = !user; in BPF_CALL_3() [all …]
|
D | syscall.c | 98 res = check_zeroed_user(uaddr.user + expected_size, in bpf_check_uarg_tail_zero() 2030 free_uid(aux->user); in __bpf_prog_put_rcu() 2578 prog->aux->user = get_current_user(); in bpf_prog_load() 2654 free_uid(prog->aux->user); in bpf_prog_load() 3940 prog->aux->user->uid); in bpf_prog_get_info_by_fd() 5002 err = bpf_prog_query(&attr, uattr.user); in __sys_bpf() 5005 err = bpf_prog_test_run(&attr, uattr.user); in __sys_bpf() 5008 err = bpf_obj_get_next_id(&attr, uattr.user, in __sys_bpf() 5012 err = bpf_obj_get_next_id(&attr, uattr.user, in __sys_bpf() 5016 err = bpf_obj_get_next_id(&attr, uattr.user, in __sys_bpf() [all …]
|
/kernel/printk/ |
D | printk.c | 663 struct devkmsg_user *user = file->private_data; in devkmsg_write() local 667 if (!user || len > LOG_LINE_MAX) in devkmsg_write() 676 if (!___ratelimit(&user->rs, current->comm)) in devkmsg_write() 722 struct devkmsg_user *user = file->private_data; in devkmsg_read() local 723 struct printk_record *r = &user->record; in devkmsg_read() 727 if (!user) in devkmsg_read() 730 ret = mutex_lock_interruptible(&user->lock); in devkmsg_read() 734 if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) { in devkmsg_read() 752 atomic64_read(&user->seq), r)); /* LMM(devkmsg_read:A) */ in devkmsg_read() 757 if (r->info->seq != atomic64_read(&user->seq)) { in devkmsg_read() [all …]
|
/kernel/ |
D | uid16.c | 23 SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) in SYSCALL_DEFINE3() argument 25 return ksys_chown(filename, low2highuid(user), low2highgid(group)); in SYSCALL_DEFINE3() 28 SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) in SYSCALL_DEFINE3() argument 30 return ksys_lchown(filename, low2highuid(user), low2highgid(group)); in SYSCALL_DEFINE3() 33 SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group) in SYSCALL_DEFINE3() argument 35 return ksys_fchown(fd, low2highuid(user), low2highgid(group)); in SYSCALL_DEFINE3()
|
D | user.c | 122 struct user_struct *user; in uid_hash_find() local 124 hlist_for_each_entry(user, hashent, uidhash_node) { in uid_hash_find() 125 if (uid_eq(user->uid, uid)) { in uid_hash_find() 126 refcount_inc(&user->__count); in uid_hash_find() 127 return user; in uid_hash_find()
|
D | context_tracking.c | 124 static void noinstr ct_kernel_exit(bool user, int offset) in ct_kernel_exit() argument 141 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); in ct_kernel_exit() 163 static void noinstr ct_kernel_enter(bool user, int offset) in ct_kernel_enter() argument 186 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); in ct_kernel_enter() 426 static __always_inline void ct_kernel_exit(bool user, int offset) { } in ct_kernel_exit() argument 427 static __always_inline void ct_kernel_enter(bool user, int offset) { } in ct_kernel_enter() argument
|
D | cred.c | 62 .user = INIT_USER, 124 free_uid(cred->user); in put_cred_rcu() 276 get_uid(new->user); in prepare_creds() 501 if (new->user != old->user || new->user_ns != old->user_ns) in commit_creds() 506 if (new->user != old->user || new->user_ns != old->user_ns) in commit_creds() 740 get_uid(new->user); in prepare_kernel_cred()
|
D | watch_queue.c | 258 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); in watch_queue_set_size() 301 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); in watch_queue_set_size() 420 atomic_dec(&watch->cred->user->nr_watches); in free_watch() 467 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { in add_one_watch() 468 atomic_dec(&cred->user->nr_watches); in add_one_watch()
|
D | sys.c | 216 struct user_struct *user; in SYSCALL_DEFINE3() local 255 user = cred->user; in SYSCALL_DEFINE3() 259 user = find_user(uid); in SYSCALL_DEFINE3() 260 if (!user) in SYSCALL_DEFINE3() 268 free_uid(user); /* For find_user() */ in SYSCALL_DEFINE3() 286 struct user_struct *user; in SYSCALL_DEFINE2() local 323 user = cred->user; in SYSCALL_DEFINE2() 327 user = find_user(uid); in SYSCALL_DEFINE2() 328 if (!user) in SYSCALL_DEFINE2() 339 free_uid(user); /* for find_user() */ in SYSCALL_DEFINE2() [all …]
|
D | Makefile | 8 sysctl.o capability.o ptrace.o user.o \ 117 obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
|
D | Kconfig.hz | 13 a fast response for user interaction and that may experience bus
|
/kernel/time/ |
D | Kconfig | 139 the expense of some overhead in user <-> kernel transitions: 154 Track transitions between kernel and user on behalf of RCU and 159 bool "Force user context tracking" 164 support the user context tracking subsystem. But there are also 169 user context tracking backend but doesn't yet fulfill all the 172 for user context tracking and the subsystems that rely on it: RCU 175 dynticks subsystem by forcing the user context tracking on all 179 architecture backend for the user context tracking.
|
D | timekeeping.h | 25 extern void update_process_times(int user);
|
/kernel/cgroup/ |
D | rstat.c | 464 u64 user = 0; in root_cgroup_cputime() local 469 user += cpustat[CPUTIME_USER]; in root_cgroup_cputime() 470 user += cpustat[CPUTIME_NICE]; in root_cgroup_cputime() 471 cputime->utime += user; in root_cgroup_cputime() 478 cputime->sum_exec_runtime += user; in root_cgroup_cputime()
|
/kernel/power/ |
D | Kconfig | 27 Skip the kernel sys_sync() before freezing user processes. 30 user-space before invoking suspend. There's a run-time switch 47 called "hibernation" in user interfaces. STD checkpoints the 103 The partition specified here will be different for almost every user. 170 Allow user space to create, activate and deactivate wakeup source 174 int "Maximum number of user space wakeup sources (0 = no limit)" 180 bool "Garbage collector for user space wakeup sources" 210 fields of device objects from user space. If you are not a kernel 285 battery status information, and user-space programs will receive
|
D | Makefile | 18 obj-$(CONFIG_HIBERNATION_SNAPSHOT_DEV) += user.o
|
/kernel/rcu/ |
D | tiny.c | 71 void rcu_sched_clock_irq(int user) in rcu_sched_clock_irq() argument 73 if (user) { in rcu_sched_clock_irq()
|
D | tree_plugin.h | 719 static void rcu_flavor_sched_clock_irq(int user) in rcu_flavor_sched_clock_irq() argument 964 static void rcu_flavor_sched_clock_irq(int user) in rcu_flavor_sched_clock_irq() argument 966 if (user || rcu_is_cpu_rrupt_from_idle()) { in rcu_flavor_sched_clock_irq()
|
D | Kconfig | 90 idle, and user-mode execution as quiescent states. Not for 106 user-mode execution as quiescent states. It forces IPIs and 297 bool "Tasks Trace RCU readers use memory barriers in user and idle"
|
D | tree.c | 502 static int rcu_pending(int user); 2371 void rcu_sched_clock_irq(int user) in rcu_sched_clock_irq() argument 2386 if (!rcu_is_cpu_rrupt_from_idle() && !user) { in rcu_sched_clock_irq() 2392 rcu_flavor_sched_clock_irq(user); in rcu_sched_clock_irq() 2393 if (rcu_pending(user)) in rcu_sched_clock_irq() 2395 if (user || rcu_is_cpu_rrupt_from_idle()) in rcu_sched_clock_irq() 3925 static int rcu_pending(int user) in rcu_pending() argument 3941 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu()) in rcu_pending()
|
D | tree.h | 435 static void rcu_flavor_sched_clock_irq(int user);
|
D | Kconfig.debug | 118 Say N here if you need ultimate kernel/user switch latencies
|
/kernel/events/ |
D | callchain.c | 180 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, in get_perf_callchain() argument 203 if (user) { in get_perf_callchain()
|