/mm/damon/ |
D | core-test.h | 20 struct damon_target *t; in damon_test_regions() local 27 t = damon_new_target(42); in damon_test_regions() 28 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); in damon_test_regions() 30 damon_add_region(r, t); in damon_test_regions() 31 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); in damon_test_regions() 33 damon_del_region(r, t); in damon_test_regions() 34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); in damon_test_regions() 36 damon_free_target(t); in damon_test_regions() 41 struct damon_target *t; in nr_damon_targets() local 44 damon_for_each_target(t, ctx) in nr_damon_targets() [all …]
|
D | core.c | 52 void damon_add_region(struct damon_region *r, struct damon_target *t) in damon_add_region() argument 54 list_add_tail(&r->list, &t->regions_list); in damon_add_region() 55 t->nr_regions++; in damon_add_region() 58 static void damon_del_region(struct damon_region *r, struct damon_target *t) in damon_del_region() argument 61 t->nr_regions--; in damon_del_region() 69 void damon_destroy_region(struct damon_region *r, struct damon_target *t) in damon_destroy_region() argument 71 damon_del_region(r, t); in damon_destroy_region() 149 struct damon_target *t; in damon_new_target() local 151 t = kmalloc(sizeof(*t), GFP_KERNEL); in damon_new_target() 152 if (!t) in damon_new_target() [all …]
|
D | vaddr.c | 29 static inline struct task_struct *damon_get_task_struct(struct damon_target *t) in damon_get_task_struct() argument 31 return get_pid_task((struct pid *)t->id, PIDTYPE_PID); in damon_get_task_struct() 41 static struct mm_struct *damon_get_mm(struct damon_target *t) in damon_get_mm() argument 46 task = damon_get_task_struct(t); in damon_get_mm() 64 static int damon_va_evenly_split_region(struct damon_target *t, in damon_va_evenly_split_region() argument 88 damon_insert_region(n, r, next, t); in damon_va_evenly_split_region() 172 static int damon_va_three_regions(struct damon_target *t, in damon_va_three_regions() argument 178 mm = damon_get_mm(t); in damon_va_three_regions() 233 struct damon_target *t) in __damon_va_init_regions() argument 241 if (damon_va_three_regions(t, regions)) { in __damon_va_init_regions() [all …]
|
D | vaddr-test.h | 98 static struct damon_region *__nth_region_of(struct damon_target *t, int idx) in __nth_region_of() argument 103 damon_for_each_region(r, t) { in __nth_region_of() 138 struct damon_target *t; in damon_do_test_apply_three_regions() local 142 t = damon_new_target(42); in damon_do_test_apply_three_regions() 145 damon_add_region(r, t); in damon_do_test_apply_three_regions() 148 damon_va_apply_three_regions(t, three_regions); in damon_do_test_apply_three_regions() 151 r = __nth_region_of(t, i); in damon_do_test_apply_three_regions() 254 struct damon_target *t = damon_new_target(42); in damon_test_split_evenly_fail() local 257 damon_add_region(r, t); in damon_test_split_evenly_fail() 259 damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL); in damon_test_split_evenly_fail() [all …]
|
D | dbgfs.c | 285 struct damon_target *t; in sprint_target_ids() local 290 damon_for_each_target(t, ctx) { in sprint_target_ids() 291 id = t->id; in sprint_target_ids() 365 struct damon_target *t, *next_t; in dbgfs_target_ids_write() local 410 damon_for_each_target_safe(t, next_t, ctx) { in dbgfs_target_ids_write() 412 put_pid((struct pid *)t->id); in dbgfs_target_ids_write() 413 damon_destroy_target(t); in dbgfs_target_ids_write() 444 struct damon_target *t; in sprint_init_regions() local 449 damon_for_each_target(t, c) { in sprint_init_regions() 450 damon_for_each_region(r, t) { in sprint_init_regions() [all …]
|
D | paddr.c | 78 struct damon_target *t; in damon_pa_prepare_access_checks() local 81 damon_for_each_target(t, ctx) { in damon_pa_prepare_access_checks() 82 damon_for_each_region(r, t) in damon_pa_prepare_access_checks() 197 struct damon_target *t; in damon_pa_check_accesses() local 201 damon_for_each_target(t, ctx) { in damon_pa_check_accesses() 202 damon_for_each_region(r, t) { in damon_pa_check_accesses() 211 bool damon_pa_target_valid(void *t) in damon_pa_target_valid() argument 217 struct damon_target *t, struct damon_region *r, in damon_pa_apply_scheme() argument 251 struct damon_target *t, struct damon_region *r, in damon_pa_scheme_score() argument
|
/mm/ |
D | vmstat.c | 320 long t; in __mod_zone_page_state() local 334 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state() 336 if (unlikely(abs(x) > t)) { in __mod_zone_page_state() 353 long t; in __mod_node_page_state() local 372 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state() 374 if (unlikely(abs(x) > t)) { in __mod_node_page_state() 412 s8 v, t; in __inc_zone_state() local 419 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state() 420 if (unlikely(v > t)) { in __inc_zone_state() 421 s8 overstep = t >> 1; in __inc_zone_state() [all …]
|
D | memory-failure.c | 253 struct task_struct *t = tk->tsk; in kill_proc() local 258 pfn, t->comm, t->pid); in kill_proc() 261 if (t == current) in kill_proc() 267 addr_lsb, t); in kill_proc() 276 addr_lsb, t); /* synchronous? */ in kill_proc() 280 t->comm, t->pid, ret); in kill_proc() 442 struct task_struct *t; in find_early_kill_thread() local 444 for_each_thread(tsk, t) { in find_early_kill_thread() 445 if (t->flags & PF_MCE_PROCESS) { in find_early_kill_thread() 446 if (t->flags & PF_MCE_EARLY) in find_early_kill_thread() [all …]
|
D | slub.c | 698 struct track *t; in get_each_object_track() local 708 t = get_track(s, p, alloc); in get_each_object_track() 710 ret = fn(s, p, t, private); in get_each_object_track() 756 static void print_track(const char *s, struct track *t, unsigned long pr_time) in print_track() argument 758 if (!t->addr) in print_track() 762 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track() 767 if (t->addrs[i]) in print_track() 768 pr_err("\t%pS\n", (void *)t->addrs[i]); in print_track() 2145 void *t; in get_partial_node() local 2150 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node() [all …]
|
D | page-writeback.c | 616 static void writeout_period(struct timer_list *t) in writeout_period() argument 618 struct wb_domain *dom = from_timer(dom, t, period_timer); in writeout_period() 1422 unsigned long t; in wb_max_pause() local 1431 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); in wb_max_pause() 1432 t++; in wb_max_pause() 1434 return min_t(unsigned long, t, MAX_PAUSE); in wb_max_pause() 1445 long t; /* target pause */ in wb_min_pause() local 1450 t = max(1, HZ / 100); in wb_min_pause() 1459 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause() 1479 t = min(t, 1 + max_pause / 2); in wb_min_pause() [all …]
|
D | oom_kill.c | 142 struct task_struct *t; in find_lock_task_mm() local 146 for_each_thread(p, t) { in find_lock_task_mm() 147 task_lock(t); in find_lock_task_mm() 148 if (likely(t->mm)) in find_lock_task_mm() 150 task_unlock(t); in find_lock_task_mm() 152 t = NULL; in find_lock_task_mm() 156 return t; in find_lock_task_mm() 499 struct task_struct *t; in process_shares_mm() local 501 for_each_thread(p, t) { in process_shares_mm() 502 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
|
D | util.c | 313 struct task_struct * __maybe_unused t = current; in vma_is_stack_for_current() local 315 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_current() 883 struct ctl_table t; in overcommit_policy_handler() local 899 t = *table; in overcommit_policy_handler() 900 t.data = &new_policy; in overcommit_policy_handler() 901 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); in overcommit_policy_handler()
|
D | hugetlb.c | 388 static long add_reservation_in_range(struct resv_map *resv, long f, long t, in add_reservation_in_range() argument 418 if (rg->from >= t) in add_reservation_in_range() 436 if (last_accounted_offset < t) in add_reservation_in_range() 438 t, h, h_cg, regions_needed); in add_reservation_in_range() 519 static long region_add(struct resv_map *resv, long f, long t, in region_add() argument 529 add_reservation_in_range(resv, f, t, NULL, NULL, in region_add() 548 VM_BUG_ON(t - f <= 1); in region_add() 558 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add() 586 static long region_chg(struct resv_map *resv, long f, long t, in region_chg() argument 594 chg = add_reservation_in_range(resv, f, t, NULL, NULL, in region_chg() [all …]
|
D | Kconfig | 99 # Don't discard allocated memory used to track "memory" and "reserved" memblocks 114 # feature. If you are not sure, don't touch it. 225 invocations for high order memory requests. You shouldn't 413 madvise(MADV_HUGEPAGE) but it won't risk to increase the 444 (PFRA) would like to keep around, but can't since there isn't enough 552 interactions don't cause any known issues on simple memory setups,
|
D | memcontrol.c | 4151 struct mem_cgroup_threshold_ary *t; in __mem_cgroup_threshold() local 4157 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold() 4159 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold() 4161 if (!t) in __mem_cgroup_threshold() 4171 i = t->current_threshold; in __mem_cgroup_threshold() 4179 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold() 4180 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold() 4191 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold() 4192 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold() 4195 t->current_threshold = i - 1; in __mem_cgroup_threshold()
|
D | page_alloc.c | 2087 unsigned long t; in deferred_init_maxorder() local 2092 t = min(mo_pfn, *end_pfn); in deferred_init_maxorder() 2093 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder() 2105 unsigned long t; in deferred_init_maxorder() local 2110 t = min(mo_pfn, epfn); in deferred_init_maxorder() 2111 deferred_free_pages(spfn, t); in deferred_init_maxorder() 3461 unsigned int order, t; in mark_free_pages() local 3486 for_each_migratetype_order(order, t) { in mark_free_pages() 3488 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages() 6979 unsigned int order, t; in zone_init_free_lists() local [all …]
|
D | mempolicy.c | 1392 unsigned long t; in get_nodes() local 1394 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) in get_nodes() 1401 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); in get_nodes() 1403 if (t) in get_nodes()
|
D | Kconfig.debug | 28 For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
|
D | vmalloc.c | 93 struct llist_node *t, *llnode; in free_work() local 95 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
|