1 #ifndef _SCHED_SYSCTL_H
2 #define _SCHED_SYSCTL_H
3
4 #ifdef CONFIG_DETECT_HUNG_TASK
5 extern int sysctl_hung_task_check_count;
6 extern unsigned int sysctl_hung_task_panic;
7 extern unsigned long sysctl_hung_task_timeout_secs;
8 extern int sysctl_hung_task_warnings;
9 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
10 void __user *buffer,
11 size_t *lenp, loff_t *ppos);
12 #else
13 /* Avoid need for ifdefs elsewhere in the code */
14 enum { sysctl_hung_task_timeout_secs = 0 };
15 #endif
16
17 /*
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
20 * problem.
21 *
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
28 *
29 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
30 * not a hard limit any more. Although some userspace tools can be surprised by
31 * that.
32 */
33 #define MAPCOUNT_ELF_CORE_MARGIN (5)
34 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
35
36 extern int sysctl_max_map_count;
37
38 extern unsigned int sysctl_sched_latency;
39 extern unsigned int sysctl_sched_min_granularity;
40 extern unsigned int sysctl_sched_wakeup_granularity;
41 extern unsigned int sysctl_sched_child_runs_first;
42 extern unsigned int sysctl_sched_sync_hint_enable;
43 extern unsigned int sysctl_sched_cstate_aware;
44 #ifdef CONFIG_SCHED_WALT
45 extern unsigned int sysctl_sched_use_walt_cpu_util;
46 extern unsigned int sysctl_sched_use_walt_task_util;
47 extern unsigned int sysctl_sched_walt_init_task_load_pct;
48 extern unsigned int sysctl_sched_walt_cpu_high_irqload;
49 #endif
50
51 enum sched_tunable_scaling {
52 SCHED_TUNABLESCALING_NONE,
53 SCHED_TUNABLESCALING_LOG,
54 SCHED_TUNABLESCALING_LINEAR,
55 SCHED_TUNABLESCALING_END,
56 };
57 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
58
59 extern unsigned int sysctl_numa_balancing_scan_delay;
60 extern unsigned int sysctl_numa_balancing_scan_period_min;
61 extern unsigned int sysctl_numa_balancing_scan_period_max;
62 extern unsigned int sysctl_numa_balancing_scan_size;
63
64 #ifdef CONFIG_SCHED_DEBUG
65 extern unsigned int sysctl_sched_migration_cost;
66 extern unsigned int sysctl_sched_nr_migrate;
67 extern unsigned int sysctl_sched_time_avg;
68 extern unsigned int sysctl_sched_shares_window;
69
70 int sched_proc_update_handler(struct ctl_table *table, int write,
71 void __user *buffer, size_t *length,
72 loff_t *ppos);
73 #endif
74
75 /*
76 * control realtime throttling:
77 *
78 * /proc/sys/kernel/sched_rt_period_us
79 * /proc/sys/kernel/sched_rt_runtime_us
80 */
81 extern unsigned int sysctl_sched_rt_period;
82 extern int sysctl_sched_rt_runtime;
83
84 #ifdef CONFIG_CFS_BANDWIDTH
85 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
86 #endif
87
88 #ifdef CONFIG_SCHED_TUNE
89 extern unsigned int sysctl_sched_cfs_boost;
90 int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
91 void __user *buffer, size_t *length,
92 loff_t *ppos);
get_sysctl_sched_cfs_boost(void)93 static inline unsigned int get_sysctl_sched_cfs_boost(void)
94 {
95 return sysctl_sched_cfs_boost;
96 }
97 #else
get_sysctl_sched_cfs_boost(void)98 static inline unsigned int get_sysctl_sched_cfs_boost(void)
99 {
100 return 0;
101 }
102 #endif
103
104 #ifdef CONFIG_SCHED_AUTOGROUP
105 extern unsigned int sysctl_sched_autogroup_enabled;
106 #endif
107
108 extern int sysctl_sched_rr_timeslice;
109 extern int sched_rr_timeslice;
110
111 extern int sched_rr_handler(struct ctl_table *table, int write,
112 void __user *buffer, size_t *lenp,
113 loff_t *ppos);
114
115 extern int sched_rt_handler(struct ctl_table *table, int write,
116 void __user *buffer, size_t *lenp,
117 loff_t *ppos);
118
119 extern int sysctl_numa_balancing(struct ctl_table *table, int write,
120 void __user *buffer, size_t *lenp,
121 loff_t *ppos);
122
123 #endif /* _SCHED_SYSCTL_H */
124