/kernel/ |
D | sysctl.c | 212 static int proc_do_cad_pid(struct ctl_table *table, int write, 214 static int proc_taint(struct ctl_table *table, int write, 219 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, 223 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, 226 static int proc_dostring_coredump(struct ctl_table *table, int write, 229 static int proc_dopipe_max_size(struct ctl_table *table, int write, 236 static int sysrq_sysctl_handler(struct ctl_table *table, int write, in sysrq_sysctl_handler() argument 242 error = proc_dointvec(table, write, buffer, lenp, ppos); in sysrq_sysctl_handler() 246 if (write) in sysrq_sysctl_handler() 2009 static int _proc_do_string(char *data, int maxlen, int write, in _proc_do_string() argument [all …]
|
D | watchdog.c | 680 static int proc_watchdog_common(int which, struct ctl_table *table, int write, in proc_watchdog_common() argument 687 if (!write) { in proc_watchdog_common() 693 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in proc_watchdog_common() 696 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in proc_watchdog_common() 707 int proc_watchdog(struct ctl_table *table, int write, in proc_watchdog() argument 711 table, write, buffer, lenp, ppos); in proc_watchdog() 717 int proc_nmi_watchdog(struct ctl_table *table, int write, in proc_nmi_watchdog() argument 720 if (!nmi_watchdog_available && write) in proc_nmi_watchdog() 723 table, write, buffer, lenp, ppos); in proc_nmi_watchdog() 729 int proc_soft_watchdog(struct ctl_table *table, int write, in proc_soft_watchdog() argument [all …]
|
D | stackleak.c | 22 int stack_erasing_sysctl(struct ctl_table *table, int write, in stack_erasing_sysctl() argument 31 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); in stack_erasing_sysctl() 33 if (ret || !write || state == prev_state) in stack_erasing_sysctl()
|
D | utsname_sysctl.c | 32 static int proc_do_uts_string(struct ctl_table *table, int write, in proc_do_uts_string() argument 51 r = proc_dostring(&uts_table, write, buffer, lenp, ppos); in proc_do_uts_string() 53 if (write) { in proc_do_uts_string()
|
D | hung_task.c | 221 int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, in proc_dohung_task_timeout_secs() argument 227 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); in proc_dohung_task_timeout_secs() 229 if (ret || !write) in proc_dohung_task_timeout_secs()
|
D | latencytop.c | 261 .write = lstats_write, 272 int sysctl_latencytop(struct ctl_table *table, int write, in sysctl_latencytop() argument 277 err = proc_dointvec(table, write, buffer, lenp, ppos); in sysctl_latencytop()
|
D | pid_namespace.c | 267 static int pid_ns_ctl_handler(struct ctl_table *table, int write, in pid_ns_ctl_handler() argument 274 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) in pid_ns_ctl_handler() 286 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); in pid_ns_ctl_handler() 287 if (!ret && write) in pid_ns_ctl_handler()
|
D | umh.c | 632 static int proc_cap_handler(struct ctl_table *table, int write, in proc_cap_handler() argument 640 if (write && (!capable(CAP_SETPCAP) || in proc_cap_handler() 666 err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); in proc_cap_handler() 680 if (write) { in proc_cap_handler()
|
D | profile.c | 450 .write = prof_cpu_mask_proc_write, 522 .write = write_profile,
|
/kernel/trace/ |
D | trace_stack.c | 372 .write = stack_max_size_write, 510 .write = ftrace_filter_write, 518 stack_trace_sysctl(struct ctl_table *table, int write, in stack_trace_sysctl() argument 528 ret = proc_dointvec(table, write, buffer, lenp, ppos); in stack_trace_sysctl() 530 if (ret || !write || (was_enabled == !!stack_tracer_enabled)) in stack_trace_sysctl()
|
D | ring_buffer.c | 321 local_t write; /* index for next write */ member 1078 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); in rb_tail_page_update() 1108 (void)local_cmpxchg(&next_page->write, old_write, val); in rb_tail_page_update() 1491 return local_read(&bpage->write) & RB_WRITE_MASK; in rb_page_write() 2149 local_sub(length, &tail_page->write); in rb_reset_tail() 2183 local_sub(length, &tail_page->write); in rb_reset_tail() 2195 local_sub(length, &tail_page->write); in rb_reset_tail() 2432 local_read(&bpage->write) & ~RB_WRITE_MASK; in rb_try_to_discard() 2442 index = local_cmpxchg(&bpage->write, old_index, new_index); in rb_try_to_discard() 2821 unsigned long tail, write; in __rb_reserve_next() local [all …]
|
D | trace_hwlat.c | 509 .write = hwlat_width_write, 515 .write = hwlat_window_write,
|
D | trace.c | 2554 int tracepoint_printk_sysctl(struct ctl_table *table, int write, in tracepoint_printk_sysctl() argument 2564 ret = proc_dointvec(table, write, buffer, lenp, ppos); in tracepoint_printk_sysctl() 2644 export->write(export, entry, size); in trace_process_export() 2731 if (WARN_ON_ONCE(!export->write)) in register_ftrace_export() 4428 .write = tracing_write_stub, 4518 .write = tracing_cpumask_write, 4760 .write = tracing_trace_options_write, 5249 .write = tracing_saved_cmdlines_size_write, 6860 .write = tracing_thresh_write, 6868 .write = tracing_max_lat_write, [all …]
|
D | trace_dynevent.c | 203 .write = dyn_event_write,
|
/kernel/events/ |
D | callchain.c | 238 int perf_event_max_stack_handler(struct ctl_table *table, int write, in perf_event_max_stack_handler() argument 246 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos); in perf_event_max_stack_handler() 247 if (ret || !write) in perf_event_max_stack_handler()
|
/kernel/bpf/ |
D | cgroup.c | 882 struct ctl_table *table, int write, in __cgroup_bpf_run_filter_sysctl() argument 890 .write = write, in __cgroup_bpf_run_filter_sysctl() 919 if (write && buf && *pcount) { in __cgroup_bpf_run_filter_sysctl() 1248 if (!ctx->write) { in BPF_CALL_3() 1268 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) in BPF_CALL_3() 1321 case bpf_ctx_range(struct bpf_sysctl, write): in sysctl_is_valid_access() 1347 case offsetof(struct bpf_sysctl, write): in sysctl_convert_ctx_access() 1350 bpf_target_off(struct bpf_sysctl_kern, write, in sysctl_convert_ctx_access() 1352 write), in sysctl_convert_ctx_access()
|
/kernel/irq/ |
D | proc.c | 184 .write = irq_affinity_proc_write, 192 .write = irq_affinity_list_proc_write, 254 .write = default_affinity_write,
|
/kernel/printk/ |
D | printk.c | 175 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, in devkmsg_sysctl_set_loglvl() argument 182 if (write) { in devkmsg_sysctl_set_loglvl() 190 err = proc_dostring(table, write, buffer, lenp, ppos); in devkmsg_sysctl_set_loglvl() 194 if (write) { in devkmsg_sysctl_set_loglvl() 1783 if (!con->write) in call_console_drivers() 1789 con->write(con, ext_text, ext_len); in call_console_drivers() 1791 con->write(con, text, len); in call_console_drivers() 2113 early_console->write(early_console, buf, n); in early_printk() 2915 init_section_contains(con->write, 0) || in printk_late_init()
|
/kernel/locking/ |
D | lock_events.c | 114 .write = lockevent_write,
|
D | locktorture.c | 688 struct lock_stress_stats *statp, bool write) in __torture_print_stats() argument 695 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; in __torture_print_stats() 707 write ? "Writes" : "Reads ", in __torture_print_stats()
|
/kernel/sched/ |
D | core.c | 1114 int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, in sysctl_sched_uclamp_handler() argument 1126 result = proc_dointvec(table, write, buffer, lenp, ppos); in sysctl_sched_uclamp_handler() 1129 if (!write) in sysctl_sched_uclamp_handler() 2738 int sysctl_numa_balancing(struct ctl_table *table, int write, in sysctl_numa_balancing() argument 2745 if (write && !capable(CAP_SYS_ADMIN)) in sysctl_numa_balancing() 2750 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); in sysctl_numa_balancing() 2753 if (write) in sysctl_numa_balancing() 2813 int sysctl_schedstats(struct ctl_table *table, int write, in sysctl_schedstats() argument 2820 if (write && !capable(CAP_SYS_ADMIN)) in sysctl_schedstats() 2825 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); in sysctl_schedstats() [all …]
|
D | rt.c | 2702 int sched_rt_handler(struct ctl_table *table, int write, in sched_rt_handler() argument 2714 ret = proc_dointvec(table, write, buffer, lenp, ppos); in sched_rt_handler() 2716 if (!ret && write) { in sched_rt_handler() 2742 int sched_rr_handler(struct ctl_table *table, int write, in sched_rr_handler() argument 2750 ret = proc_dointvec(table, write, buffer, lenp, ppos); in sched_rr_handler() 2755 if (!ret && write) { in sched_rr_handler()
|
/kernel/cgroup/ |
D | cgroup.c | 1499 if (cft->write_u64 || cft->write_s64 || cft->write) { in cgroup_file_mode() 3754 if (cft->write) in cgroup_file_write() 3755 return cft->write(of, buf, nbytes, off); in cgroup_file_write() 3831 .write = cgroup_file_write, 3840 .write = cgroup_file_write, 4843 .write = cgroup_type_write, 4853 .write = cgroup_procs_write, 4862 .write = cgroup_threads_write, 4872 .write = cgroup_subtree_control_write, 4883 .write = cgroup_max_descendants_write, [all …]
|
/kernel/time/ |
D | test_udelay.c | 133 .write = udelay_test_write,
|
/kernel/debug/kdb/ |
D | kdb_io.c | 712 c->write(c, cp, retlen - (cp - kdb_buffer)); in vkdb_printf() 776 c->write(c, moreprompt, strlen(moreprompt)); in vkdb_printf()
|