1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Data Access Monitor
4 *
5 * Author: SeongJae Park <sjpark@amazon.de>
6 */
7
8 #define pr_fmt(fmt) "damon: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/damon.h>
19
20 #ifdef CONFIG_DAMON_KUNIT_TEST
21 #undef DAMON_MIN_REGION
22 #define DAMON_MIN_REGION 1
23 #endif
24
25 static DEFINE_MUTEX(damon_lock);
26 static int nr_running_ctxs;
27
28 /*
29 * Construct a damon_region struct
30 *
31 * Returns the pointer to the new struct if success, or NULL otherwise
32 */
damon_new_region(unsigned long start,unsigned long end)33 struct damon_region *damon_new_region(unsigned long start, unsigned long end)
34 {
35 struct damon_region *region;
36
37 region = kmalloc(sizeof(*region), GFP_KERNEL);
38 if (!region)
39 return NULL;
40
41 region->ar.start = start;
42 region->ar.end = end;
43 region->nr_accesses = 0;
44 INIT_LIST_HEAD(®ion->list);
45
46 region->age = 0;
47 region->last_nr_accesses = 0;
48
49 return region;
50 }
51
damon_add_region(struct damon_region * r,struct damon_target * t)52 void damon_add_region(struct damon_region *r, struct damon_target *t)
53 {
54 list_add_tail(&r->list, &t->regions_list);
55 t->nr_regions++;
56 }
57
damon_del_region(struct damon_region * r,struct damon_target * t)58 static void damon_del_region(struct damon_region *r, struct damon_target *t)
59 {
60 list_del(&r->list);
61 t->nr_regions--;
62 }
63
damon_free_region(struct damon_region * r)64 static void damon_free_region(struct damon_region *r)
65 {
66 kfree(r);
67 }
68
damon_destroy_region(struct damon_region * r,struct damon_target * t)69 void damon_destroy_region(struct damon_region *r, struct damon_target *t)
70 {
71 damon_del_region(r, t);
72 damon_free_region(r);
73 }
74
damon_new_scheme(unsigned long min_sz_region,unsigned long max_sz_region,unsigned int min_nr_accesses,unsigned int max_nr_accesses,unsigned int min_age_region,unsigned int max_age_region,enum damos_action action,struct damos_quota * quota,struct damos_watermarks * wmarks)75 struct damos *damon_new_scheme(
76 unsigned long min_sz_region, unsigned long max_sz_region,
77 unsigned int min_nr_accesses, unsigned int max_nr_accesses,
78 unsigned int min_age_region, unsigned int max_age_region,
79 enum damos_action action, struct damos_quota *quota,
80 struct damos_watermarks *wmarks)
81 {
82 struct damos *scheme;
83
84 scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
85 if (!scheme)
86 return NULL;
87 scheme->min_sz_region = min_sz_region;
88 scheme->max_sz_region = max_sz_region;
89 scheme->min_nr_accesses = min_nr_accesses;
90 scheme->max_nr_accesses = max_nr_accesses;
91 scheme->min_age_region = min_age_region;
92 scheme->max_age_region = max_age_region;
93 scheme->action = action;
94 scheme->stat = (struct damos_stat){};
95 INIT_LIST_HEAD(&scheme->list);
96
97 scheme->quota.ms = quota->ms;
98 scheme->quota.sz = quota->sz;
99 scheme->quota.reset_interval = quota->reset_interval;
100 scheme->quota.weight_sz = quota->weight_sz;
101 scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
102 scheme->quota.weight_age = quota->weight_age;
103 scheme->quota.total_charged_sz = 0;
104 scheme->quota.total_charged_ns = 0;
105 scheme->quota.esz = 0;
106 scheme->quota.charged_sz = 0;
107 scheme->quota.charged_from = 0;
108 scheme->quota.charge_target_from = NULL;
109 scheme->quota.charge_addr_from = 0;
110
111 scheme->wmarks.metric = wmarks->metric;
112 scheme->wmarks.interval = wmarks->interval;
113 scheme->wmarks.high = wmarks->high;
114 scheme->wmarks.mid = wmarks->mid;
115 scheme->wmarks.low = wmarks->low;
116 scheme->wmarks.activated = true;
117
118 return scheme;
119 }
120
damon_add_scheme(struct damon_ctx * ctx,struct damos * s)121 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
122 {
123 list_add_tail(&s->list, &ctx->schemes);
124 }
125
damon_del_scheme(struct damos * s)126 static void damon_del_scheme(struct damos *s)
127 {
128 list_del(&s->list);
129 }
130
damon_free_scheme(struct damos * s)131 static void damon_free_scheme(struct damos *s)
132 {
133 kfree(s);
134 }
135
damon_destroy_scheme(struct damos * s)136 void damon_destroy_scheme(struct damos *s)
137 {
138 damon_del_scheme(s);
139 damon_free_scheme(s);
140 }
141
142 /*
143 * Construct a damon_target struct
144 *
145 * Returns the pointer to the new struct if success, or NULL otherwise
146 */
damon_new_target(unsigned long id)147 struct damon_target *damon_new_target(unsigned long id)
148 {
149 struct damon_target *t;
150
151 t = kmalloc(sizeof(*t), GFP_KERNEL);
152 if (!t)
153 return NULL;
154
155 t->id = id;
156 t->nr_regions = 0;
157 INIT_LIST_HEAD(&t->regions_list);
158 INIT_LIST_HEAD(&t->list);
159
160 return t;
161 }
162
damon_add_target(struct damon_ctx * ctx,struct damon_target * t)163 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t)
164 {
165 list_add_tail(&t->list, &ctx->adaptive_targets);
166 }
167
damon_targets_empty(struct damon_ctx * ctx)168 bool damon_targets_empty(struct damon_ctx *ctx)
169 {
170 return list_empty(&ctx->adaptive_targets);
171 }
172
damon_del_target(struct damon_target * t)173 static void damon_del_target(struct damon_target *t)
174 {
175 list_del(&t->list);
176 }
177
damon_free_target(struct damon_target * t)178 void damon_free_target(struct damon_target *t)
179 {
180 struct damon_region *r, *next;
181
182 damon_for_each_region_safe(r, next, t)
183 damon_free_region(r);
184 kfree(t);
185 }
186
damon_destroy_target(struct damon_target * t)187 void damon_destroy_target(struct damon_target *t)
188 {
189 damon_del_target(t);
190 damon_free_target(t);
191 }
192
damon_nr_regions(struct damon_target * t)193 unsigned int damon_nr_regions(struct damon_target *t)
194 {
195 return t->nr_regions;
196 }
197
damon_new_ctx(void)198 struct damon_ctx *damon_new_ctx(void)
199 {
200 struct damon_ctx *ctx;
201
202 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
203 if (!ctx)
204 return NULL;
205
206 ctx->sample_interval = 5 * 1000;
207 ctx->aggr_interval = 100 * 1000;
208 ctx->primitive_update_interval = 60 * 1000 * 1000;
209
210 ktime_get_coarse_ts64(&ctx->last_aggregation);
211 ctx->last_primitive_update = ctx->last_aggregation;
212
213 mutex_init(&ctx->kdamond_lock);
214
215 ctx->min_nr_regions = 10;
216 ctx->max_nr_regions = 1000;
217
218 INIT_LIST_HEAD(&ctx->adaptive_targets);
219 INIT_LIST_HEAD(&ctx->schemes);
220
221 return ctx;
222 }
223
damon_destroy_targets(struct damon_ctx * ctx)224 static void damon_destroy_targets(struct damon_ctx *ctx)
225 {
226 struct damon_target *t, *next_t;
227
228 if (ctx->primitive.cleanup) {
229 ctx->primitive.cleanup(ctx);
230 return;
231 }
232
233 damon_for_each_target_safe(t, next_t, ctx)
234 damon_destroy_target(t);
235 }
236
damon_destroy_ctx(struct damon_ctx * ctx)237 void damon_destroy_ctx(struct damon_ctx *ctx)
238 {
239 struct damos *s, *next_s;
240
241 damon_destroy_targets(ctx);
242
243 damon_for_each_scheme_safe(s, next_s, ctx)
244 damon_destroy_scheme(s);
245
246 kfree(ctx);
247 }
248
249 /**
250 * damon_set_targets() - Set monitoring targets.
251 * @ctx: monitoring context
252 * @ids: array of target ids
253 * @nr_ids: number of entries in @ids
254 *
255 * This function should not be called while the kdamond is running.
256 *
257 * Return: 0 on success, negative error code otherwise.
258 */
damon_set_targets(struct damon_ctx * ctx,unsigned long * ids,ssize_t nr_ids)259 int damon_set_targets(struct damon_ctx *ctx,
260 unsigned long *ids, ssize_t nr_ids)
261 {
262 ssize_t i;
263 struct damon_target *t, *next;
264
265 damon_destroy_targets(ctx);
266
267 for (i = 0; i < nr_ids; i++) {
268 t = damon_new_target(ids[i]);
269 if (!t) {
270 /* The caller should do cleanup of the ids itself */
271 damon_for_each_target_safe(t, next, ctx)
272 damon_destroy_target(t);
273 return -ENOMEM;
274 }
275 damon_add_target(ctx, t);
276 }
277
278 return 0;
279 }
280
281 /**
282 * damon_set_attrs() - Set attributes for the monitoring.
283 * @ctx: monitoring context
284 * @sample_int: time interval between samplings
285 * @aggr_int: time interval between aggregations
286 * @primitive_upd_int: time interval between monitoring primitive updates
287 * @min_nr_reg: minimal number of regions
288 * @max_nr_reg: maximum number of regions
289 *
290 * This function should not be called while the kdamond is running.
291 * Every time interval is in micro-seconds.
292 *
293 * Return: 0 on success, negative error code otherwise.
294 */
damon_set_attrs(struct damon_ctx * ctx,unsigned long sample_int,unsigned long aggr_int,unsigned long primitive_upd_int,unsigned long min_nr_reg,unsigned long max_nr_reg)295 int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
296 unsigned long aggr_int, unsigned long primitive_upd_int,
297 unsigned long min_nr_reg, unsigned long max_nr_reg)
298 {
299 if (min_nr_reg < 3)
300 return -EINVAL;
301 if (min_nr_reg > max_nr_reg)
302 return -EINVAL;
303
304 ctx->sample_interval = sample_int;
305 ctx->aggr_interval = aggr_int;
306 ctx->primitive_update_interval = primitive_upd_int;
307 ctx->min_nr_regions = min_nr_reg;
308 ctx->max_nr_regions = max_nr_reg;
309
310 return 0;
311 }
312
313 /**
314 * damon_set_schemes() - Set data access monitoring based operation schemes.
315 * @ctx: monitoring context
316 * @schemes: array of the schemes
317 * @nr_schemes: number of entries in @schemes
318 *
319 * This function should not be called while the kdamond of the context is
320 * running.
321 *
322 * Return: 0 if success, or negative error code otherwise.
323 */
damon_set_schemes(struct damon_ctx * ctx,struct damos ** schemes,ssize_t nr_schemes)324 int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
325 ssize_t nr_schemes)
326 {
327 struct damos *s, *next;
328 ssize_t i;
329
330 damon_for_each_scheme_safe(s, next, ctx)
331 damon_destroy_scheme(s);
332 for (i = 0; i < nr_schemes; i++)
333 damon_add_scheme(ctx, schemes[i]);
334 return 0;
335 }
336
337 /**
338 * damon_nr_running_ctxs() - Return number of currently running contexts.
339 */
damon_nr_running_ctxs(void)340 int damon_nr_running_ctxs(void)
341 {
342 int nr_ctxs;
343
344 mutex_lock(&damon_lock);
345 nr_ctxs = nr_running_ctxs;
346 mutex_unlock(&damon_lock);
347
348 return nr_ctxs;
349 }
350
351 /* Returns the size upper limit for each monitoring region */
damon_region_sz_limit(struct damon_ctx * ctx)352 static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
353 {
354 struct damon_target *t;
355 struct damon_region *r;
356 unsigned long sz = 0;
357
358 damon_for_each_target(t, ctx) {
359 damon_for_each_region(r, t)
360 sz += r->ar.end - r->ar.start;
361 }
362
363 if (ctx->min_nr_regions)
364 sz /= ctx->min_nr_regions;
365 if (sz < DAMON_MIN_REGION)
366 sz = DAMON_MIN_REGION;
367
368 return sz;
369 }
370
371 static int kdamond_fn(void *data);
372
373 /*
374 * __damon_start() - Starts monitoring with given context.
375 * @ctx: monitoring context
376 *
377 * This function should be called while damon_lock is hold.
378 *
379 * Return: 0 on success, negative error code otherwise.
380 */
__damon_start(struct damon_ctx * ctx)381 static int __damon_start(struct damon_ctx *ctx)
382 {
383 int err = -EBUSY;
384
385 mutex_lock(&ctx->kdamond_lock);
386 if (!ctx->kdamond) {
387 err = 0;
388 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d",
389 nr_running_ctxs);
390 if (IS_ERR(ctx->kdamond)) {
391 err = PTR_ERR(ctx->kdamond);
392 ctx->kdamond = NULL;
393 }
394 }
395 mutex_unlock(&ctx->kdamond_lock);
396
397 return err;
398 }
399
400 /**
401 * damon_start() - Starts the monitorings for a given group of contexts.
402 * @ctxs: an array of the pointers for contexts to start monitoring
403 * @nr_ctxs: size of @ctxs
404 *
405 * This function starts a group of monitoring threads for a group of monitoring
406 * contexts. One thread per each context is created and run in parallel. The
407 * caller should handle synchronization between the threads by itself. If a
408 * group of threads that created by other 'damon_start()' call is currently
409 * running, this function does nothing but returns -EBUSY.
410 *
411 * Return: 0 on success, negative error code otherwise.
412 */
damon_start(struct damon_ctx ** ctxs,int nr_ctxs)413 int damon_start(struct damon_ctx **ctxs, int nr_ctxs)
414 {
415 int i;
416 int err = 0;
417
418 mutex_lock(&damon_lock);
419 if (nr_running_ctxs) {
420 mutex_unlock(&damon_lock);
421 return -EBUSY;
422 }
423
424 for (i = 0; i < nr_ctxs; i++) {
425 err = __damon_start(ctxs[i]);
426 if (err)
427 break;
428 nr_running_ctxs++;
429 }
430 mutex_unlock(&damon_lock);
431
432 return err;
433 }
434
435 /*
436 * __damon_stop() - Stops monitoring of given context.
437 * @ctx: monitoring context
438 *
439 * Return: 0 on success, negative error code otherwise.
440 */
__damon_stop(struct damon_ctx * ctx)441 static int __damon_stop(struct damon_ctx *ctx)
442 {
443 struct task_struct *tsk;
444
445 mutex_lock(&ctx->kdamond_lock);
446 tsk = ctx->kdamond;
447 if (tsk) {
448 get_task_struct(tsk);
449 mutex_unlock(&ctx->kdamond_lock);
450 kthread_stop(tsk);
451 put_task_struct(tsk);
452 return 0;
453 }
454 mutex_unlock(&ctx->kdamond_lock);
455
456 return -EPERM;
457 }
458
459 /**
460 * damon_stop() - Stops the monitorings for a given group of contexts.
461 * @ctxs: an array of the pointers for contexts to stop monitoring
462 * @nr_ctxs: size of @ctxs
463 *
464 * Return: 0 on success, negative error code otherwise.
465 */
damon_stop(struct damon_ctx ** ctxs,int nr_ctxs)466 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
467 {
468 int i, err = 0;
469
470 for (i = 0; i < nr_ctxs; i++) {
471 /* nr_running_ctxs is decremented in kdamond_fn */
472 err = __damon_stop(ctxs[i]);
473 if (err)
474 return err;
475 }
476
477 return err;
478 }
479
480 /*
481 * damon_check_reset_time_interval() - Check if a time interval is elapsed.
482 * @baseline: the time to check whether the interval has elapsed since
483 * @interval: the time interval (microseconds)
484 *
485 * See whether the given time interval has passed since the given baseline
486 * time. If so, it also updates the baseline to current time for next check.
487 *
488 * Return: true if the time interval has passed, or false otherwise.
489 */
damon_check_reset_time_interval(struct timespec64 * baseline,unsigned long interval)490 static bool damon_check_reset_time_interval(struct timespec64 *baseline,
491 unsigned long interval)
492 {
493 struct timespec64 now;
494
495 ktime_get_coarse_ts64(&now);
496 if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
497 interval * 1000)
498 return false;
499 *baseline = now;
500 return true;
501 }
502
503 /*
504 * Check whether it is time to flush the aggregated information
505 */
kdamond_aggregate_interval_passed(struct damon_ctx * ctx)506 static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
507 {
508 return damon_check_reset_time_interval(&ctx->last_aggregation,
509 ctx->aggr_interval);
510 }
511
512 /*
513 * Reset the aggregated monitoring results ('nr_accesses' of each region).
514 */
kdamond_reset_aggregated(struct damon_ctx * c)515 static void kdamond_reset_aggregated(struct damon_ctx *c)
516 {
517 struct damon_target *t;
518 unsigned int ti = 0; /* target's index */
519
520 damon_for_each_target(t, c) {
521 struct damon_region *r;
522
523 damon_for_each_region(r, t) {
524 trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
525 r->last_nr_accesses = r->nr_accesses;
526 r->nr_accesses = 0;
527 }
528 ti++;
529 }
530 }
531
532 static void damon_split_region_at(struct damon_ctx *ctx,
533 struct damon_target *t, struct damon_region *r,
534 unsigned long sz_r);
535
__damos_valid_target(struct damon_region * r,struct damos * s)536 static bool __damos_valid_target(struct damon_region *r, struct damos *s)
537 {
538 unsigned long sz;
539
540 sz = r->ar.end - r->ar.start;
541 return s->min_sz_region <= sz && sz <= s->max_sz_region &&
542 s->min_nr_accesses <= r->nr_accesses &&
543 r->nr_accesses <= s->max_nr_accesses &&
544 s->min_age_region <= r->age && r->age <= s->max_age_region;
545 }
546
damos_valid_target(struct damon_ctx * c,struct damon_target * t,struct damon_region * r,struct damos * s)547 static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
548 struct damon_region *r, struct damos *s)
549 {
550 bool ret = __damos_valid_target(r, s);
551
552 if (!ret || !s->quota.esz || !c->primitive.get_scheme_score)
553 return ret;
554
555 return c->primitive.get_scheme_score(c, t, r, s) >= s->quota.min_score;
556 }
557
damon_do_apply_schemes(struct damon_ctx * c,struct damon_target * t,struct damon_region * r)558 static void damon_do_apply_schemes(struct damon_ctx *c,
559 struct damon_target *t,
560 struct damon_region *r)
561 {
562 struct damos *s;
563
564 damon_for_each_scheme(s, c) {
565 struct damos_quota *quota = &s->quota;
566 unsigned long sz = r->ar.end - r->ar.start;
567 struct timespec64 begin, end;
568 unsigned long sz_applied = 0;
569
570 if (!s->wmarks.activated)
571 continue;
572
573 /* Check the quota */
574 if (quota->esz && quota->charged_sz >= quota->esz)
575 continue;
576
577 /* Skip previously charged regions */
578 if (quota->charge_target_from) {
579 if (t != quota->charge_target_from)
580 continue;
581 if (r == damon_last_region(t)) {
582 quota->charge_target_from = NULL;
583 quota->charge_addr_from = 0;
584 continue;
585 }
586 if (quota->charge_addr_from &&
587 r->ar.end <= quota->charge_addr_from)
588 continue;
589
590 if (quota->charge_addr_from && r->ar.start <
591 quota->charge_addr_from) {
592 sz = ALIGN_DOWN(quota->charge_addr_from -
593 r->ar.start, DAMON_MIN_REGION);
594 if (!sz) {
595 if (r->ar.end - r->ar.start <=
596 DAMON_MIN_REGION)
597 continue;
598 sz = DAMON_MIN_REGION;
599 }
600 damon_split_region_at(c, t, r, sz);
601 r = damon_next_region(r);
602 sz = r->ar.end - r->ar.start;
603 }
604 quota->charge_target_from = NULL;
605 quota->charge_addr_from = 0;
606 }
607
608 if (!damos_valid_target(c, t, r, s))
609 continue;
610
611 /* Apply the scheme */
612 if (c->primitive.apply_scheme) {
613 if (quota->esz &&
614 quota->charged_sz + sz > quota->esz) {
615 sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
616 DAMON_MIN_REGION);
617 if (!sz)
618 goto update_stat;
619 damon_split_region_at(c, t, r, sz);
620 }
621 ktime_get_coarse_ts64(&begin);
622 sz_applied = c->primitive.apply_scheme(c, t, r, s);
623 ktime_get_coarse_ts64(&end);
624 quota->total_charged_ns += timespec64_to_ns(&end) -
625 timespec64_to_ns(&begin);
626 quota->charged_sz += sz;
627 if (quota->esz && quota->charged_sz >= quota->esz) {
628 quota->charge_target_from = t;
629 quota->charge_addr_from = r->ar.end + 1;
630 }
631 }
632 if (s->action != DAMOS_STAT)
633 r->age = 0;
634
635 update_stat:
636 s->stat.nr_tried++;
637 s->stat.sz_tried += sz;
638 if (sz_applied)
639 s->stat.nr_applied++;
640 s->stat.sz_applied += sz_applied;
641 }
642 }
643
644 /* Shouldn't be called if quota->ms and quota->sz are zero */
damos_set_effective_quota(struct damos_quota * quota)645 static void damos_set_effective_quota(struct damos_quota *quota)
646 {
647 unsigned long throughput;
648 unsigned long esz;
649
650 if (!quota->ms) {
651 quota->esz = quota->sz;
652 return;
653 }
654
655 if (quota->total_charged_ns)
656 throughput = quota->total_charged_sz * 1000000 /
657 quota->total_charged_ns;
658 else
659 throughput = PAGE_SIZE * 1024;
660 esz = throughput * quota->ms;
661
662 if (quota->sz && quota->sz < esz)
663 esz = quota->sz;
664 quota->esz = esz;
665 }
666
kdamond_apply_schemes(struct damon_ctx * c)667 static void kdamond_apply_schemes(struct damon_ctx *c)
668 {
669 struct damon_target *t;
670 struct damon_region *r, *next_r;
671 struct damos *s;
672
673 damon_for_each_scheme(s, c) {
674 struct damos_quota *quota = &s->quota;
675 unsigned long cumulated_sz;
676 unsigned int score, max_score = 0;
677
678 if (!s->wmarks.activated)
679 continue;
680
681 if (!quota->ms && !quota->sz)
682 continue;
683
684 /* New charge window starts */
685 if (time_after_eq(jiffies, quota->charged_from +
686 msecs_to_jiffies(
687 quota->reset_interval))) {
688 if (quota->esz && quota->charged_sz >= quota->esz)
689 s->stat.qt_exceeds++;
690 quota->total_charged_sz += quota->charged_sz;
691 quota->charged_from = jiffies;
692 quota->charged_sz = 0;
693 damos_set_effective_quota(quota);
694 }
695
696 if (!c->primitive.get_scheme_score)
697 continue;
698
699 /* Fill up the score histogram */
700 memset(quota->histogram, 0, sizeof(quota->histogram));
701 damon_for_each_target(t, c) {
702 damon_for_each_region(r, t) {
703 if (!__damos_valid_target(r, s))
704 continue;
705 score = c->primitive.get_scheme_score(
706 c, t, r, s);
707 quota->histogram[score] +=
708 r->ar.end - r->ar.start;
709 if (score > max_score)
710 max_score = score;
711 }
712 }
713
714 /* Set the min score limit */
715 for (cumulated_sz = 0, score = max_score; ; score--) {
716 cumulated_sz += quota->histogram[score];
717 if (cumulated_sz >= quota->esz || !score)
718 break;
719 }
720 quota->min_score = score;
721 }
722
723 damon_for_each_target(t, c) {
724 damon_for_each_region_safe(r, next_r, t)
725 damon_do_apply_schemes(c, t, r);
726 }
727 }
728
sz_damon_region(struct damon_region * r)729 static inline unsigned long sz_damon_region(struct damon_region *r)
730 {
731 return r->ar.end - r->ar.start;
732 }
733
734 /*
735 * Merge two adjacent regions into one region
736 */
damon_merge_two_regions(struct damon_target * t,struct damon_region * l,struct damon_region * r)737 static void damon_merge_two_regions(struct damon_target *t,
738 struct damon_region *l, struct damon_region *r)
739 {
740 unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
741
742 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
743 (sz_l + sz_r);
744 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
745 l->ar.end = r->ar.end;
746 damon_destroy_region(r, t);
747 }
748
749 /*
750 * Merge adjacent regions having similar access frequencies
751 *
752 * t target affected by this merge operation
753 * thres '->nr_accesses' diff threshold for the merge
754 * sz_limit size upper limit of each region
755 */
damon_merge_regions_of(struct damon_target * t,unsigned int thres,unsigned long sz_limit)756 static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
757 unsigned long sz_limit)
758 {
759 struct damon_region *r, *prev = NULL, *next;
760
761 damon_for_each_region_safe(r, next, t) {
762 if (abs(r->nr_accesses - r->last_nr_accesses) > thres)
763 r->age = 0;
764 else
765 r->age++;
766
767 if (prev && prev->ar.end == r->ar.start &&
768 abs(prev->nr_accesses - r->nr_accesses) <= thres &&
769 sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
770 damon_merge_two_regions(t, prev, r);
771 else
772 prev = r;
773 }
774 }
775
776 /*
777 * Merge adjacent regions having similar access frequencies
778 *
779 * threshold '->nr_accesses' diff threshold for the merge
780 * sz_limit size upper limit of each region
781 *
782 * This function merges monitoring target regions which are adjacent and their
783 * access frequencies are similar. This is for minimizing the monitoring
784 * overhead under the dynamically changeable access pattern. If a merge was
785 * unnecessarily made, later 'kdamond_split_regions()' will revert it.
786 */
kdamond_merge_regions(struct damon_ctx * c,unsigned int threshold,unsigned long sz_limit)787 static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
788 unsigned long sz_limit)
789 {
790 struct damon_target *t;
791
792 damon_for_each_target(t, c)
793 damon_merge_regions_of(t, threshold, sz_limit);
794 }
795
796 /*
797 * Split a region in two
798 *
799 * r the region to be split
800 * sz_r size of the first sub-region that will be made
801 */
damon_split_region_at(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,unsigned long sz_r)802 static void damon_split_region_at(struct damon_ctx *ctx,
803 struct damon_target *t, struct damon_region *r,
804 unsigned long sz_r)
805 {
806 struct damon_region *new;
807
808 new = damon_new_region(r->ar.start + sz_r, r->ar.end);
809 if (!new)
810 return;
811
812 r->ar.end = new->ar.start;
813
814 new->age = r->age;
815 new->last_nr_accesses = r->last_nr_accesses;
816
817 damon_insert_region(new, r, damon_next_region(r), t);
818 }
819
820 /* Split every region in the given target into 'nr_subs' regions */
damon_split_regions_of(struct damon_ctx * ctx,struct damon_target * t,int nr_subs)821 static void damon_split_regions_of(struct damon_ctx *ctx,
822 struct damon_target *t, int nr_subs)
823 {
824 struct damon_region *r, *next;
825 unsigned long sz_region, sz_sub = 0;
826 int i;
827
828 damon_for_each_region_safe(r, next, t) {
829 sz_region = r->ar.end - r->ar.start;
830
831 for (i = 0; i < nr_subs - 1 &&
832 sz_region > 2 * DAMON_MIN_REGION; i++) {
833 /*
834 * Randomly select size of left sub-region to be at
835 * least 10 percent and at most 90% of original region
836 */
837 sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
838 sz_region / 10, DAMON_MIN_REGION);
839 /* Do not allow blank region */
840 if (sz_sub == 0 || sz_sub >= sz_region)
841 continue;
842
843 damon_split_region_at(ctx, t, r, sz_sub);
844 sz_region = sz_sub;
845 }
846 }
847 }
848
849 /*
850 * Split every target region into randomly-sized small regions
851 *
852 * This function splits every target region into random-sized small regions if
853 * current total number of the regions is equal or smaller than half of the
854 * user-specified maximum number of regions. This is for maximizing the
855 * monitoring accuracy under the dynamically changeable access patterns. If a
856 * split was unnecessarily made, later 'kdamond_merge_regions()' will revert
857 * it.
858 */
kdamond_split_regions(struct damon_ctx * ctx)859 static void kdamond_split_regions(struct damon_ctx *ctx)
860 {
861 struct damon_target *t;
862 unsigned int nr_regions = 0;
863 static unsigned int last_nr_regions;
864 int nr_subregions = 2;
865
866 damon_for_each_target(t, ctx)
867 nr_regions += damon_nr_regions(t);
868
869 if (nr_regions > ctx->max_nr_regions / 2)
870 return;
871
872 /* Maybe the middle of the region has different access frequency */
873 if (last_nr_regions == nr_regions &&
874 nr_regions < ctx->max_nr_regions / 3)
875 nr_subregions = 3;
876
877 damon_for_each_target(t, ctx)
878 damon_split_regions_of(ctx, t, nr_subregions);
879
880 last_nr_regions = nr_regions;
881 }
882
883 /*
884 * Check whether it is time to check and apply the target monitoring regions
885 *
886 * Returns true if it is.
887 */
kdamond_need_update_primitive(struct damon_ctx * ctx)888 static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
889 {
890 return damon_check_reset_time_interval(&ctx->last_primitive_update,
891 ctx->primitive_update_interval);
892 }
893
894 /*
895 * Check whether current monitoring should be stopped
896 *
897 * The monitoring is stopped when either the user requested to stop, or all
898 * monitoring targets are invalid.
899 *
900 * Returns true if need to stop current monitoring.
901 */
kdamond_need_stop(struct damon_ctx * ctx)902 static bool kdamond_need_stop(struct damon_ctx *ctx)
903 {
904 struct damon_target *t;
905
906 if (kthread_should_stop())
907 return true;
908
909 if (!ctx->primitive.target_valid)
910 return false;
911
912 damon_for_each_target(t, ctx) {
913 if (ctx->primitive.target_valid(t))
914 return false;
915 }
916
917 return true;
918 }
919
damos_wmark_metric_value(enum damos_wmark_metric metric)920 static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
921 {
922 struct sysinfo i;
923
924 switch (metric) {
925 case DAMOS_WMARK_FREE_MEM_RATE:
926 si_meminfo(&i);
927 return i.freeram * 1000 / i.totalram;
928 default:
929 break;
930 }
931 return -EINVAL;
932 }
933
934 /*
935 * Returns zero if the scheme is active. Else, returns time to wait for next
936 * watermark check in micro-seconds.
937 */
damos_wmark_wait_us(struct damos * scheme)938 static unsigned long damos_wmark_wait_us(struct damos *scheme)
939 {
940 unsigned long metric;
941
942 if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
943 return 0;
944
945 metric = damos_wmark_metric_value(scheme->wmarks.metric);
946 /* higher than high watermark or lower than low watermark */
947 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
948 if (scheme->wmarks.activated)
949 pr_debug("deactivate a scheme (%d) for %s wmark\n",
950 scheme->action,
951 metric > scheme->wmarks.high ?
952 "high" : "low");
953 scheme->wmarks.activated = false;
954 return scheme->wmarks.interval;
955 }
956
957 /* inactive and higher than middle watermark */
958 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
959 !scheme->wmarks.activated)
960 return scheme->wmarks.interval;
961
962 if (!scheme->wmarks.activated)
963 pr_debug("activate a scheme (%d)\n", scheme->action);
964 scheme->wmarks.activated = true;
965 return 0;
966 }
967
kdamond_usleep(unsigned long usecs)968 static void kdamond_usleep(unsigned long usecs)
969 {
970 /* See Documentation/timers/timers-howto.rst for the thresholds */
971 if (usecs > 20 * USEC_PER_MSEC)
972 schedule_timeout_idle(usecs_to_jiffies(usecs));
973 else
974 usleep_idle_range(usecs, usecs + 1);
975 }
976
977 /* Returns negative error code if it's not activated but should return */
kdamond_wait_activation(struct damon_ctx * ctx)978 static int kdamond_wait_activation(struct damon_ctx *ctx)
979 {
980 struct damos *s;
981 unsigned long wait_time;
982 unsigned long min_wait_time = 0;
983
984 while (!kdamond_need_stop(ctx)) {
985 damon_for_each_scheme(s, ctx) {
986 wait_time = damos_wmark_wait_us(s);
987 if (!min_wait_time || wait_time < min_wait_time)
988 min_wait_time = wait_time;
989 }
990 if (!min_wait_time)
991 return 0;
992
993 kdamond_usleep(min_wait_time);
994 }
995 return -EBUSY;
996 }
997
998 /*
999 * The monitoring daemon that runs as a kernel thread
1000 */
kdamond_fn(void * data)1001 static int kdamond_fn(void *data)
1002 {
1003 struct damon_ctx *ctx = (struct damon_ctx *)data;
1004 struct damon_target *t;
1005 struct damon_region *r, *next;
1006 unsigned int max_nr_accesses = 0;
1007 unsigned long sz_limit = 0;
1008 bool done = false;
1009
1010 pr_debug("kdamond (%d) starts\n", current->pid);
1011
1012 if (ctx->primitive.init)
1013 ctx->primitive.init(ctx);
1014 if (ctx->callback.before_start && ctx->callback.before_start(ctx))
1015 done = true;
1016
1017 sz_limit = damon_region_sz_limit(ctx);
1018
1019 while (!kdamond_need_stop(ctx) && !done) {
1020 if (kdamond_wait_activation(ctx))
1021 continue;
1022
1023 if (ctx->primitive.prepare_access_checks)
1024 ctx->primitive.prepare_access_checks(ctx);
1025 if (ctx->callback.after_sampling &&
1026 ctx->callback.after_sampling(ctx))
1027 done = true;
1028
1029 kdamond_usleep(ctx->sample_interval);
1030
1031 if (ctx->primitive.check_accesses)
1032 max_nr_accesses = ctx->primitive.check_accesses(ctx);
1033
1034 if (kdamond_aggregate_interval_passed(ctx)) {
1035 kdamond_merge_regions(ctx,
1036 max_nr_accesses / 10,
1037 sz_limit);
1038 if (ctx->callback.after_aggregation &&
1039 ctx->callback.after_aggregation(ctx))
1040 done = true;
1041 kdamond_apply_schemes(ctx);
1042 kdamond_reset_aggregated(ctx);
1043 kdamond_split_regions(ctx);
1044 if (ctx->primitive.reset_aggregated)
1045 ctx->primitive.reset_aggregated(ctx);
1046 }
1047
1048 if (kdamond_need_update_primitive(ctx)) {
1049 if (ctx->primitive.update)
1050 ctx->primitive.update(ctx);
1051 sz_limit = damon_region_sz_limit(ctx);
1052 }
1053 }
1054 damon_for_each_target(t, ctx) {
1055 damon_for_each_region_safe(r, next, t)
1056 damon_destroy_region(r, t);
1057 }
1058
1059 if (ctx->callback.before_terminate)
1060 ctx->callback.before_terminate(ctx);
1061 if (ctx->primitive.cleanup)
1062 ctx->primitive.cleanup(ctx);
1063
1064 pr_debug("kdamond (%d) finishes\n", current->pid);
1065 mutex_lock(&ctx->kdamond_lock);
1066 ctx->kdamond = NULL;
1067 mutex_unlock(&ctx->kdamond_lock);
1068
1069 mutex_lock(&damon_lock);
1070 nr_running_ctxs--;
1071 mutex_unlock(&damon_lock);
1072
1073 return 0;
1074 }
1075
1076 #include "core-test.h"
1077