/kernel/ |
D | sysctl.c | 203 static int bpf_stats_handler(struct ctl_table *table, int write, in bpf_stats_handler() argument 217 if (write && !capable(CAP_SYS_ADMIN)) in bpf_stats_handler() 222 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in bpf_stats_handler() 223 if (write && !ret && val != saved_val) { in bpf_stats_handler() 238 static int bpf_unpriv_handler(struct ctl_table *table, int write, in bpf_unpriv_handler() argument 245 if (write && !capable(CAP_SYS_ADMIN)) in bpf_unpriv_handler() 249 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in bpf_unpriv_handler() 250 if (write && !ret) { in bpf_unpriv_handler() 268 static int _proc_do_string(char *data, int maxlen, int write, in _proc_do_string() argument 279 if (write) { in _proc_do_string() [all …]
|
D | watchdog.c | 653 static int proc_watchdog_common(int which, struct ctl_table *table, int write, in proc_watchdog_common() argument 660 if (!write) { in proc_watchdog_common() 666 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in proc_watchdog_common() 669 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in proc_watchdog_common() 680 int proc_watchdog(struct ctl_table *table, int write, in proc_watchdog() argument 684 table, write, buffer, lenp, ppos); in proc_watchdog() 690 int proc_nmi_watchdog(struct ctl_table *table, int write, in proc_nmi_watchdog() argument 693 if (!nmi_watchdog_available && write) in proc_nmi_watchdog() 696 table, write, buffer, lenp, ppos); in proc_nmi_watchdog() 702 int proc_soft_watchdog(struct ctl_table *table, int write, in proc_soft_watchdog() argument [all …]
|
D | stackleak.c | 22 int stack_erasing_sysctl(struct ctl_table *table, int write, in stack_erasing_sysctl() argument 31 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in stack_erasing_sysctl() 33 if (ret || !write || state == prev_state) in stack_erasing_sysctl()
|
D | utsname_sysctl.c | 32 static int proc_do_uts_string(struct ctl_table *table, int write, in proc_do_uts_string() argument 51 r = proc_dostring(&uts_table, write, buffer, lenp, ppos); in proc_do_uts_string() 53 if (write) { in proc_do_uts_string()
|
D | hung_task.c | 233 int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, in proc_dohung_task_timeout_secs() argument 238 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); in proc_dohung_task_timeout_secs() 240 if (ret || !write) in proc_dohung_task_timeout_secs()
|
D | pid_namespace.c | 283 static int pid_ns_ctl_handler(struct ctl_table *table, int write, in pid_ns_ctl_handler() argument 290 if (write && !checkpoint_restore_ns_capable(pid_ns->user_ns)) in pid_ns_ctl_handler() 302 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in pid_ns_ctl_handler() 303 if (!ret && write) in pid_ns_ctl_handler()
|
D | umh.c | 486 static int proc_cap_handler(struct ctl_table *table, int write, in proc_cap_handler() argument 494 if (write && (!capable(CAP_SETPCAP) || in proc_cap_handler() 520 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); in proc_cap_handler() 534 if (write) { in proc_cap_handler()
|
D | latencytop.c | 272 int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, in sysctl_latencytop() argument 277 err = proc_dointvec(table, write, buffer, lenp, ppos); in sysctl_latencytop()
|
/kernel/trace/ |
D | trace_stack.c | 372 .write = stack_max_size_write, 510 .write = ftrace_filter_write, 518 stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, in stack_trace_sysctl() argument 527 ret = proc_dointvec(table, write, buffer, lenp, ppos); in stack_trace_sysctl() 529 if (ret || !write || (was_enabled == !!stack_tracer_enabled)) in stack_trace_sysctl()
|
D | ring_buffer.c | 331 local_t write; /* index for next write */ member 1412 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update() 1442 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update() 1810 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write() 2536 local_sub(length, &tail_page->write); in rb_reset_tail() 2571 local_sub(length, &tail_page->write); in rb_reset_tail() 2589 local_sub(length, &tail_page->write); in rb_reset_tail() 2913 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard() 2945 index = local_cmpxchg(&bpage->write, old_index, new_index); in rb_try_to_discard() 3290 unsigned long tail, write, w; in __rb_reserve_next() local [all …]
|
D | trace_hwlat.c | 522 .write = hwlat_width_write, 528 .write = hwlat_window_write,
|
D | Kconfig | 90 bool "Register read/write tracing" 94 Create tracepoints for IO read/write operations. These trace events 95 can be used for logging all MMIO read/write operations. 705 it took to write to the tracepoint and the next iteration that 709 to keep the time the same. The initial string is simply a write of 711 write which is not added to the rest of the calculations. 825 a thread per cpu. Each thread will write various size events 827 to each of the threads, where the IPI handler will also write 847 as it will write garbage to IO memory starting at a given address.
|
D | trace.c | 282 export->write(export, entry, size); in trace_process_export() 384 if (WARN_ON_ONCE(!export->write)) in register_ftrace_export() 2836 int tracepoint_printk_sysctl(struct ctl_table *table, int write, in tracepoint_printk_sysctl() argument 2846 ret = proc_dointvec(table, write, buffer, lenp, ppos); in tracepoint_printk_sysctl() 4744 .write = tracing_write_stub, 4856 .write = tracing_cpumask_write, 5109 .write = tracing_trace_options_write, 5585 .write = tracing_saved_cmdlines_size_write, 7225 .write = tracing_thresh_write, 7233 .write = tracing_max_lat_write, [all …]
|
D | trace_events.c | 1836 .write = ftrace_event_write, 1844 .write = ftrace_event_pid_write, 1852 .write = ftrace_event_npid_write, 1860 .write = event_enable_write, 1880 .write = event_filter_write, 1888 .write = subsystem_filter_write, 1896 .write = system_enable_write, 1904 .write = system_enable_write,
|
/kernel/events/ |
D | callchain.c | 236 int perf_event_max_stack_handler(struct ctl_table *table, int write, in perf_event_max_stack_handler() argument 244 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos); in perf_event_max_stack_handler() 245 if (ret || !write) in perf_event_max_stack_handler()
|
/kernel/sched/ |
D | pelt.c | 541 int sched_pelt_multiplier(struct ctl_table *table, int write, void *buffer, in sched_pelt_multiplier() argument 551 ret = proc_dointvec(table, write, buffer, lenp, ppos); in sched_pelt_multiplier() 554 if (!write) in sched_pelt_multiplier()
|
D | core.c | 1406 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, in sysctl_sched_uclamp_handler() argument 1418 result = proc_dointvec(table, write, buffer, lenp, ppos); in sysctl_sched_uclamp_handler() 1421 if (!write) in sysctl_sched_uclamp_handler() 3362 int sysctl_numa_balancing(struct ctl_table *table, int write, in sysctl_numa_balancing() argument 3369 if (write && !capable(CAP_SYS_ADMIN)) in sysctl_numa_balancing() 3374 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); in sysctl_numa_balancing() 3377 if (write) in sysctl_numa_balancing() 3437 int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, in sysctl_schedstats() argument 3444 if (write && !capable(CAP_SYS_ADMIN)) in sysctl_schedstats() 3449 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); in sysctl_schedstats() [all …]
|
/kernel/bpf/ |
D | cgroup.c | 1280 struct ctl_table *table, int write, in __cgroup_bpf_run_filter_sysctl() argument 1287 .write = write, in __cgroup_bpf_run_filter_sysctl() 1306 if (write && *buf && *pcount) { in __cgroup_bpf_run_filter_sysctl() 1657 if (!ctx->write) { in BPF_CALL_3() 1677 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) in BPF_CALL_3() 1730 case bpf_ctx_range(struct bpf_sysctl, write): in sysctl_is_valid_access() 1756 case offsetof(struct bpf_sysctl, write): in sysctl_convert_ctx_access() 1759 bpf_target_off(struct bpf_sysctl_kern, write, in sysctl_convert_ctx_access() 1761 write), in sysctl_convert_ctx_access()
|
/kernel/printk/ |
D | printk.c | 180 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, in devkmsg_sysctl_set_loglvl() argument 187 if (write) { in devkmsg_sysctl_set_loglvl() 195 err = proc_dostring(table, write, buffer, lenp, ppos); in devkmsg_sysctl_set_loglvl() 199 if (write) { in devkmsg_sysctl_set_loglvl() 1908 if (!con->write) in call_console_drivers() 1914 con->write(con, ext_text, ext_len); in call_console_drivers() 1917 con->write(con, dropped_text, dropped_len); in call_console_drivers() 1918 con->write(con, text, len); in call_console_drivers() 2169 early_console->write(early_console, buf, n); in early_printk() 3029 init_section_contains(con->write, 0) || in printk_late_init()
|
/kernel/bpf/preload/iterators/ |
D | iterators.c | 33 if (write(to_kernel, &obj, sizeof(obj)) != sizeof(obj)) in send_link_to_kernel()
|
/kernel/locking/ |
D | lock_events.c | 114 .write = lockevent_write,
|
D | locktorture.c | 681 struct lock_stress_stats *statp, bool write) in __torture_print_stats() argument 688 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; in __torture_print_stats() 700 write ? "Writes" : "Reads ", in __torture_print_stats()
|
/kernel/time/ |
D | test_udelay.c | 133 .write = udelay_test_write,
|
/kernel/cgroup/ |
D | cgroup.c | 1516 if (cft->write_u64 || cft->write_s64 || cft->write) { in cgroup_file_mode() 3867 if (cft->write) in cgroup_file_write() 3868 return cft->write(of, buf, nbytes, off); in cgroup_file_write() 3944 .write = cgroup_file_write, 3953 .write = cgroup_file_write, 5014 .write = cgroup_type_write, 5024 .write = cgroup_procs_write, 5033 .write = cgroup_threads_write, 5043 .write = cgroup_subtree_control_write, 5054 .write = cgroup_max_descendants_write, [all …]
|
/kernel/gcov/ |
D | fs.c | 406 .write = gcov_seq_write, 555 .write = reset_write,
|