1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/zswapd_control.c
4 *
5 * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6 */
7
8 #include <linux/memcontrol.h>
9 #include <linux/types.h>
10 #include <linux/cgroup-defs.h>
11 #include <linux/cgroup.h>
12 #include <linux/memcg_policy.h>
13 #include <linux/zswapd.h>
14
15 #include "zswapd_internal.h"
16
17 #define ANON_REFAULT_SNAPSHOT_MIN_INTERVAL 200
18 #define AREA_ANON_REFAULT_THRESHOLD 22000
19 #define EMPTY_ROUND_CHECK_THRESHOLD 10
20 #define EMPTY_ROUND_SKIP_INTERVAL 20
21 #define ZSWAPD_MAX_LEVEL_NUM 10
22 #define MAX_SKIP_INTERVAL 1000
23 #define MAX_RECLAIM_SIZE 100
24
25 #define INACTIVE_FILE_RATIO 90
26 #define ACTIVE_FILE_RATIO 70
27 #define COMPRESS_RATIO 30
28 #define ZRAM_WM_RATIO 0
29 #define MAX_RATIO 100
30
31 #define CHECK_BUFFER_VALID(var1, var2) (((var2) != 0) && ((var1) > (var2)))
32
33 struct zswapd_param {
34 unsigned int min_score;
35 unsigned int max_score;
36 unsigned int ub_mem2zram_ratio;
37 unsigned int ub_zram2ufs_ratio;
38 unsigned int refault_threshold;
39 };
40
41 static struct zswapd_param zswap_param[ZSWAPD_MAX_LEVEL_NUM];
42 struct eventfd_ctx *zswapd_press_efd[LEVEL_COUNT];
43 static DEFINE_MUTEX(pressure_event_lock);
44 static DEFINE_MUTEX(reclaim_para_lock);
45
46 atomic_t avail_buffers = ATOMIC_INIT(0);
47 atomic_t min_avail_buffers = ATOMIC_INIT(0);
48 atomic_t high_avail_buffers = ATOMIC_INIT(0);
49 atomic_t max_reclaim_size = ATOMIC_INIT(MAX_RECLAIM_SIZE);
50
51 atomic_t inactive_file_ratio = ATOMIC_INIT(INACTIVE_FILE_RATIO);
52 atomic_t active_file_ratio = ATOMIC_INIT(ACTIVE_FILE_RATIO);
53 atomic_t zram_wm_ratio = ATOMIC_INIT(ZRAM_WM_RATIO);
54 atomic_t compress_ratio = ATOMIC_INIT(COMPRESS_RATIO);
55
56 atomic64_t zram_critical_threshold = ATOMIC_LONG_INIT(0);
57 atomic64_t free_swap_threshold = ATOMIC_LONG_INIT(0);
58 atomic64_t area_anon_refault_threshold = ATOMIC_LONG_INIT(AREA_ANON_REFAULT_THRESHOLD);
59 atomic64_t anon_refault_snapshot_min_interval =
60 ATOMIC_LONG_INIT(ANON_REFAULT_SNAPSHOT_MIN_INTERVAL);
61 atomic64_t empty_round_skip_interval = ATOMIC_LONG_INIT(EMPTY_ROUND_SKIP_INTERVAL);
62 atomic64_t max_skip_interval = ATOMIC_LONG_INIT(MAX_SKIP_INTERVAL);
63 atomic64_t empty_round_check_threshold = ATOMIC_LONG_INIT(EMPTY_ROUND_CHECK_THRESHOLD);
64
get_zram_wm_ratio(void)65 inline unsigned int get_zram_wm_ratio(void)
66 {
67 return atomic_read(&zram_wm_ratio);
68 }
69
get_compress_ratio(void)70 inline unsigned int get_compress_ratio(void)
71 {
72 return atomic_read(&compress_ratio);
73 }
74
get_inactive_file_ratio(void)75 inline unsigned int get_inactive_file_ratio(void)
76 {
77 return atomic_read(&inactive_file_ratio);
78 }
79
get_active_file_ratio(void)80 inline unsigned int get_active_file_ratio(void)
81 {
82 return atomic_read(&active_file_ratio);
83 }
84
get_avail_buffers(void)85 inline unsigned int get_avail_buffers(void)
86 {
87 return atomic_read(&avail_buffers);
88 }
89
get_min_avail_buffers(void)90 inline unsigned int get_min_avail_buffers(void)
91 {
92 return atomic_read(&min_avail_buffers);
93 }
94
get_high_avail_buffers(void)95 inline unsigned int get_high_avail_buffers(void)
96 {
97 return atomic_read(&high_avail_buffers);
98 }
99
get_zswapd_max_reclaim_size(void)100 inline unsigned int get_zswapd_max_reclaim_size(void)
101 {
102 return atomic_read(&max_reclaim_size);
103 }
104
get_free_swap_threshold(void)105 inline unsigned long long get_free_swap_threshold(void)
106 {
107 return atomic64_read(&free_swap_threshold);
108 }
109
get_area_anon_refault_threshold(void)110 inline unsigned long long get_area_anon_refault_threshold(void)
111 {
112 return atomic64_read(&area_anon_refault_threshold);
113 }
114
get_anon_refault_snapshot_min_interval(void)115 inline unsigned long long get_anon_refault_snapshot_min_interval(void)
116 {
117 return atomic64_read(&anon_refault_snapshot_min_interval);
118 }
119
get_empty_round_skip_interval(void)120 inline unsigned long long get_empty_round_skip_interval(void)
121 {
122 return atomic64_read(&empty_round_skip_interval);
123 }
124
get_max_skip_interval(void)125 inline unsigned long long get_max_skip_interval(void)
126 {
127 return atomic64_read(&max_skip_interval);
128 }
129
get_empty_round_check_threshold(void)130 inline unsigned long long get_empty_round_check_threshold(void)
131 {
132 return atomic64_read(&empty_round_check_threshold);
133 }
134
get_zram_critical_threshold(void)135 inline unsigned long long get_zram_critical_threshold(void)
136 {
137 return atomic64_read(&zram_critical_threshold);
138 }
139
avail_buffers_params_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)140 static ssize_t avail_buffers_params_write(struct kernfs_open_file *of,
141 char *buf, size_t nbytes, loff_t off)
142 {
143 unsigned long long threshold;
144 unsigned int high_buffers;
145 unsigned int min_buffers;
146 unsigned int buffers;
147
148 buf = strstrip(buf);
149
150 if (sscanf(buf, "%u %u %u %llu", &buffers, &min_buffers, &high_buffers, &threshold) != 4)
151 return -EINVAL;
152
153 if (CHECK_BUFFER_VALID(min_buffers, buffers) ||
154 CHECK_BUFFER_VALID(min_buffers, high_buffers) ||
155 CHECK_BUFFER_VALID(buffers, high_buffers))
156 return -EINVAL;
157
158 atomic_set(&avail_buffers, buffers);
159 atomic_set(&min_avail_buffers, min_buffers);
160 atomic_set(&high_avail_buffers, high_buffers);
161 atomic64_set(&free_swap_threshold, (threshold * (SZ_1M / PAGE_SIZE)));
162
163 if (atomic_read(&min_avail_buffers) == 0)
164 set_snapshotd_init_flag(0);
165 else
166 set_snapshotd_init_flag(1);
167
168 wake_all_zswapd();
169
170 return nbytes;
171 }
172
zswapd_max_reclaim_size_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)173 static ssize_t zswapd_max_reclaim_size_write(struct kernfs_open_file *of,
174 char *buf, size_t nbytes, loff_t off)
175 {
176 u32 max;
177 int ret;
178
179 buf = strstrip(buf);
180 ret = kstrtouint(buf, 10, &max);
181 if (ret)
182 return -EINVAL;
183
184 atomic_set(&max_reclaim_size, max);
185
186 return nbytes;
187 }
188
buffers_ratio_params_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)189 static ssize_t buffers_ratio_params_write(struct kernfs_open_file *of,
190 char *buf, size_t nbytes, loff_t off)
191 {
192 unsigned int inactive;
193 unsigned int active;
194
195 buf = strstrip(buf);
196
197 if (sscanf(buf, "%u %u", &inactive, &active) != 2)
198 return -EINVAL;
199
200 if (inactive > MAX_RATIO || active > MAX_RATIO)
201 return -EINVAL;
202
203 atomic_set(&inactive_file_ratio, inactive);
204 atomic_set(&active_file_ratio, active);
205
206 return nbytes;
207 }
208
area_anon_refault_threshold_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)209 static int area_anon_refault_threshold_write(struct cgroup_subsys_state *css,
210 struct cftype *cft, u64 val)
211 {
212 atomic64_set(&area_anon_refault_threshold, val);
213
214 return 0;
215 }
216
empty_round_skip_interval_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)217 static int empty_round_skip_interval_write(struct cgroup_subsys_state *css,
218 struct cftype *cft, u64 val)
219 {
220 atomic64_set(&empty_round_skip_interval, val);
221
222 return 0;
223 }
224
max_skip_interval_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)225 static int max_skip_interval_write(struct cgroup_subsys_state *css,
226 struct cftype *cft, u64 val)
227 {
228 atomic64_set(&max_skip_interval, val);
229
230 return 0;
231 }
232
empty_round_check_threshold_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)233 static int empty_round_check_threshold_write(struct cgroup_subsys_state *css,
234 struct cftype *cft, u64 val)
235 {
236 atomic64_set(&empty_round_check_threshold, val);
237
238 return 0;
239 }
240
anon_refault_snapshot_min_interval_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)241 static int anon_refault_snapshot_min_interval_write(struct cgroup_subsys_state *css,
242 struct cftype *cft, u64 val)
243 {
244 atomic64_set(&anon_refault_snapshot_min_interval, val);
245
246 return 0;
247 }
248
zram_critical_thres_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)249 static int zram_critical_thres_write(struct cgroup_subsys_state *css,
250 struct cftype *cft, u64 val)
251 {
252 atomic64_set(&zram_critical_threshold, val);
253
254 return 0;
255 }
256
zswapd_pressure_event_control(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)257 static ssize_t zswapd_pressure_event_control(struct kernfs_open_file *of,
258 char *buf, size_t nbytes, loff_t off)
259 {
260 unsigned int level;
261 unsigned int efd;
262 struct fd efile;
263 int ret;
264
265 buf = strstrip(buf);
266 if (sscanf(buf, "%u %u", &efd, &level) != 2)
267 return -EINVAL;
268
269 if (level >= LEVEL_COUNT)
270 return -EINVAL;
271
272 mutex_lock(&pressure_event_lock);
273 efile = fdget(efd);
274 if (!efile.file) {
275 ret = -EBADF;
276 goto out;
277 }
278
279 zswapd_press_efd[level] = eventfd_ctx_fileget(efile.file);
280 if (IS_ERR(zswapd_press_efd[level])) {
281 ret = PTR_ERR(zswapd_press_efd[level]);
282 goto out_put_efile;
283 }
284 fdput(efile);
285 mutex_unlock(&pressure_event_lock);
286 return nbytes;
287
288 out_put_efile:
289 fdput(efile);
290 out:
291 mutex_unlock(&pressure_event_lock);
292
293 return ret;
294 }
295
zswapd_pressure_report(enum zswapd_pressure_level level)296 void zswapd_pressure_report(enum zswapd_pressure_level level)
297 {
298 int ret;
299
300 if (zswapd_press_efd[level] == NULL)
301 return;
302
303 ret = eventfd_signal(zswapd_press_efd[level], 1);
304 if (ret < 0)
305 pr_err("SWAP-MM: %s : level:%u, ret:%d ", __func__, level, ret);
306 }
307
zswapd_pid_read(struct cgroup_subsys_state * css,struct cftype * cft)308 static u64 zswapd_pid_read(struct cgroup_subsys_state *css, struct cftype *cft)
309 {
310 return get_zswapd_pid();
311 }
312
zswapd_memcgs_param_parse(int level_num)313 static void zswapd_memcgs_param_parse(int level_num)
314 {
315 struct mem_cgroup *memcg = NULL;
316 u64 score;
317 int i;
318
319 while ((memcg = get_next_memcg(memcg))) {
320 score = atomic64_read(&memcg->memcg_reclaimed.app_score);
321 for (i = 0; i < level_num; ++i)
322 if (score >= zswap_param[i].min_score &&
323 score <= zswap_param[i].max_score)
324 break;
325
326 atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio,
327 zswap_param[i].ub_mem2zram_ratio);
328 atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio,
329 zswap_param[i].ub_zram2ufs_ratio);
330 atomic_set(&memcg->memcg_reclaimed.refault_threshold,
331 zswap_param[i].refault_threshold);
332 }
333 }
334
zswapd_memcgs_param_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)335 static ssize_t zswapd_memcgs_param_write(struct kernfs_open_file *of, char *buf,
336 size_t nbytes, loff_t off)
337 {
338 char *token = NULL;
339 int level_num;
340 int i;
341
342 buf = strstrip(buf);
343 token = strsep(&buf, " ");
344
345 if (!token)
346 return -EINVAL;
347
348 if (kstrtoint(token, 0, &level_num))
349 return -EINVAL;
350
351 if (level_num > ZSWAPD_MAX_LEVEL_NUM)
352 return -EINVAL;
353
354 mutex_lock(&reclaim_para_lock);
355 for (i = 0; i < level_num; ++i) {
356 token = strsep(&buf, " ");
357 if (!token)
358 goto out;
359
360 if (kstrtoint(token, 0, &zswap_param[i].min_score) ||
361 zswap_param[i].min_score > MAX_APP_SCORE)
362 goto out;
363
364 token = strsep(&buf, " ");
365 if (!token)
366 goto out;
367
368 if (kstrtoint(token, 0, &zswap_param[i].max_score) ||
369 zswap_param[i].max_score > MAX_APP_SCORE)
370 goto out;
371
372 token = strsep(&buf, " ");
373 if (!token)
374 goto out;
375
376 if (kstrtoint(token, 0, &zswap_param[i].ub_mem2zram_ratio) ||
377 zswap_param[i].ub_mem2zram_ratio > MAX_RATIO)
378 goto out;
379
380 token = strsep(&buf, " ");
381 if (!token)
382 goto out;
383
384 if (kstrtoint(token, 0, &zswap_param[i].ub_zram2ufs_ratio) ||
385 zswap_param[i].ub_zram2ufs_ratio > MAX_RATIO)
386 goto out;
387
388 token = strsep(&buf, " ");
389 if (!token)
390 goto out;
391
392 if (kstrtoint(token, 0, &zswap_param[i].refault_threshold))
393 goto out;
394 }
395
396 zswapd_memcgs_param_parse(level_num);
397 mutex_unlock(&reclaim_para_lock);
398
399 return nbytes;
400
401 out:
402 mutex_unlock(&reclaim_para_lock);
403 return -EINVAL;
404 }
405
zswapd_single_memcg_param_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)406 static ssize_t zswapd_single_memcg_param_write(struct kernfs_open_file *of,
407 char *buf, size_t nbytes, loff_t off)
408 {
409 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
410 unsigned int ub_mem2zram_ratio;
411 unsigned int ub_zram2ufs_ratio;
412 unsigned int refault_threshold;
413
414 buf = strstrip(buf);
415
416 if (sscanf(buf, "%u %u %u", &ub_mem2zram_ratio, &ub_zram2ufs_ratio,
417 &refault_threshold) != 3)
418 return -EINVAL;
419
420 if (ub_mem2zram_ratio > MAX_RATIO || ub_zram2ufs_ratio > MAX_RATIO ||
421 refault_threshold > MAX_RATIO)
422 return -EINVAL;
423
424 atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio,
425 ub_mem2zram_ratio);
426 atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio,
427 ub_zram2ufs_ratio);
428 atomic_set(&memcg->memcg_reclaimed.refault_threshold,
429 refault_threshold);
430
431 return nbytes;
432 }
433
mem_cgroup_zram_wm_ratio_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)434 static ssize_t mem_cgroup_zram_wm_ratio_write(struct kernfs_open_file *of,
435 char *buf, size_t nbytes, loff_t off)
436 {
437 unsigned int ratio;
438 int ret;
439
440 buf = strstrip(buf);
441
442 ret = kstrtouint(buf, 10, &ratio);
443 if (ret)
444 return -EINVAL;
445
446 if (ratio > MAX_RATIO)
447 return -EINVAL;
448
449 atomic_set(&zram_wm_ratio, ratio);
450
451 return nbytes;
452 }
453
mem_cgroup_compress_ratio_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)454 static ssize_t mem_cgroup_compress_ratio_write(struct kernfs_open_file *of,
455 char *buf, size_t nbytes, loff_t off)
456 {
457 unsigned int ratio;
458 int ret;
459
460 buf = strstrip(buf);
461
462 ret = kstrtouint(buf, 10, &ratio);
463 if (ret)
464 return -EINVAL;
465
466 if (ratio > MAX_RATIO)
467 return -EINVAL;
468
469 atomic_set(&compress_ratio, ratio);
470
471 return nbytes;
472 }
473
zswapd_pressure_show(struct seq_file * m,void * v)474 static int zswapd_pressure_show(struct seq_file *m, void *v)
475 {
476 zswapd_status_show(m);
477
478 return 0;
479 }
480
memcg_active_app_info_list_show(struct seq_file * m,void * v)481 static int memcg_active_app_info_list_show(struct seq_file *m, void *v)
482 {
483 struct mem_cgroup_per_node *mz = NULL;
484 struct mem_cgroup *memcg = NULL;
485 struct lruvec *lruvec = NULL;
486 unsigned long eswap_size;
487 unsigned long anon_size;
488 unsigned long zram_size;
489
490 while ((memcg = get_next_memcg(memcg))) {
491 u64 score = atomic64_read(&memcg->memcg_reclaimed.app_score);
492
493 mz = mem_cgroup_nodeinfo(memcg, 0);
494 if (!mz) {
495 get_next_memcg_break(memcg);
496 return 0;
497 }
498
499 lruvec = &mz->lruvec;
500 if (!lruvec) {
501 get_next_memcg_break(memcg);
502 return 0;
503 }
504
505 anon_size = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON,
506 MAX_NR_ZONES) + lruvec_lru_size(lruvec,
507 LRU_INACTIVE_ANON, MAX_NR_ZONES);
508 eswap_size = memcg_data_size(memcg, SWAP_SIZE);
509 zram_size = memcg_data_size(memcg, CACHE_SIZE);
510
511 if (anon_size + zram_size + eswap_size == 0)
512 continue;
513
514 if (!strlen(memcg->name))
515 continue;
516
517 anon_size *= PAGE_SIZE / SZ_1K;
518 zram_size *= PAGE_SIZE / SZ_1K;
519 eswap_size *= PAGE_SIZE / SZ_1K;
520
521 seq_printf(m, "%s %llu %lu %lu %lu %llu\n", memcg->name, score,
522 anon_size, zram_size, eswap_size,
523 memcg->memcg_reclaimed.reclaimed_pagefault);
524 }
525 return 0;
526 }
527
528 #ifdef CONFIG_HYPERHOLD_DEBUG
avail_buffers_params_show(struct seq_file * m,void * v)529 static int avail_buffers_params_show(struct seq_file *m, void *v)
530 {
531 seq_printf(m, "avail_buffers: %u\n", atomic_read(&avail_buffers));
532 seq_printf(m, "min_avail_buffers: %u\n", atomic_read(&min_avail_buffers));
533 seq_printf(m, "high_avail_buffers: %u\n", atomic_read(&high_avail_buffers));
534 seq_printf(m, "free_swap_threshold: %llu\n",
535 atomic64_read(&free_swap_threshold) * PAGE_SIZE / SZ_1M);
536
537 return 0;
538 }
539
zswapd_max_reclaim_size_show(struct seq_file * m,void * v)540 static int zswapd_max_reclaim_size_show(struct seq_file *m, void *v)
541 {
542 seq_printf(m, "zswapd_max_reclaim_size: %u\n",
543 atomic_read(&max_reclaim_size));
544
545 return 0;
546 }
547
buffers_ratio_params_show(struct seq_file * m,void * v)548 static int buffers_ratio_params_show(struct seq_file *m, void *v)
549 {
550 seq_printf(m, "inactive_file_ratio: %u\n", atomic_read(&inactive_file_ratio));
551 seq_printf(m, "active_file_ratio: %u\n", atomic_read(&active_file_ratio));
552
553 return 0;
554 }
555
area_anon_refault_threshold_read(struct cgroup_subsys_state * css,struct cftype * cft)556 static u64 area_anon_refault_threshold_read(struct cgroup_subsys_state *css,
557 struct cftype *cft)
558 {
559 return atomic64_read(&area_anon_refault_threshold);
560 }
561
empty_round_skip_interval_read(struct cgroup_subsys_state * css,struct cftype * cft)562 static u64 empty_round_skip_interval_read(struct cgroup_subsys_state *css,
563 struct cftype *cft)
564 {
565 return atomic64_read(&empty_round_skip_interval);
566 }
567
max_skip_interval_read(struct cgroup_subsys_state * css,struct cftype * cft)568 static u64 max_skip_interval_read(struct cgroup_subsys_state *css,
569 struct cftype *cft)
570 {
571 return atomic64_read(&max_skip_interval);
572 }
573
empty_round_check_threshold_read(struct cgroup_subsys_state * css,struct cftype * cft)574 static u64 empty_round_check_threshold_read(struct cgroup_subsys_state *css,
575 struct cftype *cft)
576 {
577 return atomic64_read(&empty_round_check_threshold);
578 }
579
anon_refault_snapshot_min_interval_read(struct cgroup_subsys_state * css,struct cftype * cft)580 static u64 anon_refault_snapshot_min_interval_read(
581 struct cgroup_subsys_state *css, struct cftype *cft)
582 {
583 return atomic64_read(&anon_refault_snapshot_min_interval);
584 }
585
zram_critical_threshold_read(struct cgroup_subsys_state * css,struct cftype * cft)586 static u64 zram_critical_threshold_read(struct cgroup_subsys_state *css,
587 struct cftype *cft)
588 {
589 return atomic64_read(&zram_critical_threshold);
590 }
591
zswapd_memcgs_param_show(struct seq_file * m,void * v)592 static int zswapd_memcgs_param_show(struct seq_file *m, void *v)
593 {
594 int i;
595
596 for (i = 0; i < ZSWAPD_MAX_LEVEL_NUM; ++i) {
597 seq_printf(m, "level %d min score: %u\n", i,
598 zswap_param[i].min_score);
599 seq_printf(m, "level %d max score: %u\n", i,
600 zswap_param[i].max_score);
601 seq_printf(m, "level %d ub_mem2zram_ratio: %u\n", i,
602 zswap_param[i].ub_mem2zram_ratio);
603 seq_printf(m, "level %d ub_zram2ufs_ratio: %u\n", i,
604 zswap_param[i].ub_zram2ufs_ratio);
605 seq_printf(m, "level %d refault_threshold: %u\n", i,
606 zswap_param[i].refault_threshold);
607 }
608
609 return 0;
610 }
611
zswapd_single_memcg_param_show(struct seq_file * m,void * v)612 static int zswapd_single_memcg_param_show(struct seq_file *m, void *v)
613 {
614 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
615
616 seq_printf(m, "memcg score: %llu\n",
617 atomic64_read(&memcg->memcg_reclaimed.app_score));
618 seq_printf(m, "memcg ub_mem2zram_ratio: %u\n",
619 atomic_read(&memcg->memcg_reclaimed.ub_mem2zram_ratio));
620 seq_printf(m, "memcg ub_zram2ufs_ratio: %u\n",
621 atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio));
622 seq_printf(m, "memcg refault_threshold: %u\n",
623 atomic_read(&memcg->memcg_reclaimed.refault_threshold));
624
625 return 0;
626 }
627
zram_wm_ratio_show(struct seq_file * m,void * v)628 static int zram_wm_ratio_show(struct seq_file *m, void *v)
629 {
630 seq_printf(m, "zram_wm_ratio: %u\n", atomic_read(&zram_wm_ratio));
631
632 return 0;
633 }
634
compress_ratio_show(struct seq_file * m,void * v)635 static int compress_ratio_show(struct seq_file *m, void *v)
636 {
637 seq_printf(m, "compress_ratio: %u\n", atomic_read(&compress_ratio));
638
639 return 0;
640 }
641
zswapd_vmstat_show(struct seq_file * m,void * v)642 static int zswapd_vmstat_show(struct seq_file *m, void *v)
643 {
644 #ifdef CONFIG_VM_EVENT_COUNTERS
645 unsigned long *vm_buf = NULL;
646
647 vm_buf = kzalloc(sizeof(struct vm_event_state), GFP_KERNEL);
648 if (!vm_buf)
649 return -ENOMEM;
650 all_vm_events(vm_buf);
651
652 seq_printf(m, "zswapd_wake_up:%lu\n", vm_buf[ZSWAPD_WAKEUP]);
653 seq_printf(m, "zswapd_area_refault:%lu\n", vm_buf[ZSWAPD_REFAULT]);
654 seq_printf(m, "zswapd_medium_press:%lu\n", vm_buf[ZSWAPD_MEDIUM_PRESS]);
655 seq_printf(m, "zswapd_critical_press:%lu\n", vm_buf[ZSWAPD_CRITICAL_PRESS]);
656 seq_printf(m, "zswapd_memcg_ratio_skip:%lu\n", vm_buf[ZSWAPD_MEMCG_RATIO_SKIP]);
657 seq_printf(m, "zswapd_memcg_refault_skip:%lu\n", vm_buf[ZSWAPD_MEMCG_REFAULT_SKIP]);
658 seq_printf(m, "zswapd_swapout:%lu\n", vm_buf[ZSWAPD_SWAPOUT]);
659 seq_printf(m, "zswapd_snapshot_times:%lu\n", vm_buf[ZSWAPD_SNAPSHOT_TIMES]);
660 seq_printf(m, "zswapd_reclaimed:%lu\n", vm_buf[ZSWAPD_RECLAIMED]);
661 seq_printf(m, "zswapd_scanned:%lu\n", vm_buf[ZSWAPD_SCANNED]);
662
663 kfree(vm_buf);
664 #endif
665
666 return 0;
667 }
668
eswap_info_show(struct seq_file * m,void * v)669 static int eswap_info_show(struct seq_file *m, void *v)
670 {
671 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
672 unsigned long long eswap_size;
673
674 eswap_size = memcg_data_size(memcg, WRITE_SIZE) / SZ_1K;
675 seq_printf(m, "Total Swapout Size: %llu kB\n", eswap_size);
676
677 return 0;
678 }
679
memcg_eswap_info_show(struct seq_file * m)680 void memcg_eswap_info_show(struct seq_file *m)
681 {
682 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
683 struct mem_cgroup_per_node *mz = NULL;
684 struct lruvec *lruvec = NULL;
685 unsigned long anon;
686 unsigned long file;
687 unsigned long zram;
688 unsigned long eswap;
689
690 mz = mem_cgroup_nodeinfo(memcg, 0);
691 if (!mz)
692 return;
693
694 lruvec = &mz->lruvec;
695 if (!lruvec)
696 return;
697
698 anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
699 lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
700 file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
701 lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
702 zram = memcg_data_size(memcg, CACHE_SIZE) / SZ_1K;
703 eswap = memcg_data_size(memcg, SWAP_SIZE) / SZ_1K;
704 anon *= PAGE_SIZE / SZ_1K;
705 file *= PAGE_SIZE / SZ_1K;
706 seq_printf(m, "Anon:\t%12lu kB\nFile:\t%12lu kB\nzram:\t%12lu kB\nEswap:\t%12lu kB\n",
707 anon, file, zram, eswap);
708 }
709 #endif
710
711 static struct cftype zswapd_policy_files[] = {
712 {
713 .name = "active_app_info_list",
714 .flags = CFTYPE_ONLY_ON_ROOT,
715 .seq_show = memcg_active_app_info_list_show,
716 },
717 {
718 .name = "zram_wm_ratio",
719 .flags = CFTYPE_ONLY_ON_ROOT,
720 .write = mem_cgroup_zram_wm_ratio_write,
721 #ifdef CONFIG_HYPERHOLD_DEBUG
722 .seq_show = zram_wm_ratio_show,
723 #endif
724 },
725 {
726 .name = "compress_ratio",
727 .flags = CFTYPE_ONLY_ON_ROOT,
728 .write = mem_cgroup_compress_ratio_write,
729 #ifdef CONFIG_HYPERHOLD_DEBUG
730 .seq_show = compress_ratio_show,
731 #endif
732 },
733 {
734 .name = "zswapd_pressure",
735 .flags = CFTYPE_ONLY_ON_ROOT,
736 .write = zswapd_pressure_event_control,
737 },
738 {
739 .name = "zswapd_pid",
740 .flags = CFTYPE_ONLY_ON_ROOT,
741 .read_u64 = zswapd_pid_read,
742 },
743 {
744 .name = "avail_buffers",
745 .flags = CFTYPE_ONLY_ON_ROOT,
746 .write = avail_buffers_params_write,
747 #ifdef CONFIG_HYPERHOLD_DEBUG
748 .seq_show = avail_buffers_params_show,
749 #endif
750 },
751 {
752 .name = "zswapd_max_reclaim_size",
753 .flags = CFTYPE_ONLY_ON_ROOT,
754 .write = zswapd_max_reclaim_size_write,
755 #ifdef CONFIG_HYPERHOLD_DEBUG
756 .seq_show = zswapd_max_reclaim_size_show,
757 #endif
758 },
759 {
760 .name = "area_anon_refault_threshold",
761 .flags = CFTYPE_ONLY_ON_ROOT,
762 .write_u64 = area_anon_refault_threshold_write,
763 #ifdef CONFIG_HYPERHOLD_DEBUG
764 .read_u64 = area_anon_refault_threshold_read,
765 #endif
766 },
767 {
768 .name = "empty_round_skip_interval",
769 .flags = CFTYPE_ONLY_ON_ROOT,
770 .write_u64 = empty_round_skip_interval_write,
771 #ifdef CONFIG_HYPERHOLD_DEBUG
772 .read_u64 = empty_round_skip_interval_read,
773 #endif
774 },
775 {
776 .name = "max_skip_interval",
777 .flags = CFTYPE_ONLY_ON_ROOT,
778 .write_u64 = max_skip_interval_write,
779 #ifdef CONFIG_HYPERHOLD_DEBUG
780 .read_u64 = max_skip_interval_read,
781 #endif
782 },
783 {
784 .name = "empty_round_check_threshold",
785 .flags = CFTYPE_ONLY_ON_ROOT,
786 .write_u64 = empty_round_check_threshold_write,
787 #ifdef CONFIG_HYPERHOLD_DEBUG
788 .read_u64 = empty_round_check_threshold_read,
789 #endif
790 },
791 {
792 .name = "anon_refault_snapshot_min_interval",
793 .flags = CFTYPE_ONLY_ON_ROOT,
794 .write_u64 = anon_refault_snapshot_min_interval_write,
795 #ifdef CONFIG_HYPERHOLD_DEBUG
796 .read_u64 = anon_refault_snapshot_min_interval_read,
797 #endif
798 },
799 {
800 .name = "zswapd_memcgs_param",
801 .flags = CFTYPE_ONLY_ON_ROOT,
802 .write = zswapd_memcgs_param_write,
803 #ifdef CONFIG_HYPERHOLD_DEBUG
804 .seq_show = zswapd_memcgs_param_show,
805 #endif
806 },
807 {
808 .name = "zswapd_single_memcg_param",
809 .write = zswapd_single_memcg_param_write,
810 #ifdef CONFIG_HYPERHOLD_DEBUG
811 .seq_show = zswapd_single_memcg_param_show,
812 #endif
813 },
814 {
815 .name = "buffer_ratio_params",
816 .flags = CFTYPE_ONLY_ON_ROOT,
817 .write = buffers_ratio_params_write,
818 #ifdef CONFIG_HYPERHOLD_DEBUG
819 .seq_show = buffers_ratio_params_show,
820 #endif
821 },
822 {
823 .name = "zswapd_pressure_show",
824 .flags = CFTYPE_ONLY_ON_ROOT,
825 .seq_show = zswapd_pressure_show,
826 },
827 {
828 .name = "zram_critical_threshold",
829 .flags = CFTYPE_ONLY_ON_ROOT,
830 .write_u64 = zram_critical_thres_write,
831 #ifdef CONFIG_HYPERHOLD_DEBUG
832 .read_u64 = zram_critical_threshold_read,
833 #endif
834 },
835
836 #ifdef CONFIG_HYPERHOLD_DEBUG
837 {
838 .name = "zswapd_vmstat_show",
839 .flags = CFTYPE_ONLY_ON_ROOT,
840 .seq_show = zswapd_vmstat_show,
841 },
842 #endif
843 {
844 .name = "eswap_info",
845 .flags = CFTYPE_ONLY_ON_ROOT,
846 .seq_show = eswap_info_show,
847 },
848
849 { }, /* terminate */
850 };
851
zswapd_policy_init(void)852 static int __init zswapd_policy_init(void)
853 {
854 if (!mem_cgroup_disabled())
855 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, zswapd_policy_files));
856
857 return 0;
858 }
859 subsys_initcall(zswapd_policy_init);
860