Home
last modified time | relevance | path

Searched refs:t (Results 1 – 13 of 13) sorted by relevance

/mm/
Dvmstat.c322 long t; in __mod_zone_page_state() local
326 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
328 if (unlikely(x > t || x < -t)) { in __mod_zone_page_state()
342 long t; in __mod_node_page_state() local
346 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state()
348 if (unlikely(x > t || x < -t)) { in __mod_node_page_state()
383 s8 v, t; in __inc_zone_state() local
386 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
387 if (unlikely(v > t)) { in __inc_zone_state()
388 s8 overstep = t >> 1; in __inc_zone_state()
[all …]
Dmemory-failure.c211 struct task_struct *t = tk->tsk; in kill_proc() local
216 pfn, t->comm, t->pid); in kill_proc()
218 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { in kill_proc()
229 addr_lsb, t); /* synchronous? */ in kill_proc()
233 t->comm, t->pid, ret); in kill_proc()
405 struct task_struct *t; in find_early_kill_thread() local
407 for_each_thread(tsk, t) in find_early_kill_thread()
408 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY)) in find_early_kill_thread()
409 return t; in find_early_kill_thread()
422 struct task_struct *t; in task_early_kill() local
[all …]
Doom_kill.c134 struct task_struct *t; in find_lock_task_mm() local
138 for_each_thread(p, t) { in find_lock_task_mm()
139 task_lock(t); in find_lock_task_mm()
140 if (likely(t->mm)) in find_lock_task_mm()
142 task_unlock(t); in find_lock_task_mm()
144 t = NULL; in find_lock_task_mm()
148 return t; in find_lock_task_mm()
492 struct task_struct *t; in process_shares_mm() local
494 for_each_thread(p, t) { in process_shares_mm()
495 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
Dpage-writeback.c628 static void writeout_period(struct timer_list *t) in writeout_period() argument
630 struct wb_domain *dom = from_timer(dom, t, period_timer); in writeout_period()
1423 unsigned long t; in wb_max_pause() local
1432 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); in wb_max_pause()
1433 t++; in wb_max_pause()
1435 return min_t(unsigned long, t, MAX_PAUSE); in wb_max_pause()
1446 long t; /* target pause */ in wb_min_pause() local
1451 t = max(1, HZ / 100); in wb_min_pause()
1460 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause()
1480 t = min(t, 1 + max_pause / 2); in wb_min_pause()
[all …]
Dslub.c583 static void print_track(const char *s, struct track *t, unsigned long pr_time) in print_track() argument
585 if (!t->addr) in print_track()
589 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
594 if (t->addrs[i]) in print_track()
595 pr_err("\t%pS\n", (void *)t->addrs[i]); in print_track()
1854 void *t; in get_partial_node() local
1859 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1860 if (!t) in get_partial_node()
1867 object = t; in get_partial_node()
3977 struct page *t; in __kmem_cache_shrink() local
[all …]
Dhugetlb.c262 static long region_add(struct resv_map *resv, long f, long t) in region_add() argument
280 if (&rg->link == head || t < rg->from) { in region_add()
289 nrg->to = t; in region_add()
292 add += t - f; in region_add()
305 if (rg->from > t) in region_add()
311 if (rg->to > t) in region_add()
312 t = rg->to; in region_add()
326 add += t - nrg->to; /* Added to end of region */ in region_add()
327 nrg->to = t; in region_add()
358 static long region_chg(struct resv_map *resv, long f, long t) in region_chg() argument
[all …]
Dutil.c298 struct task_struct * __maybe_unused t = current; in vma_is_stack_for_current() local
300 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_current()
DKconfig.debug28 For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
82 bool "Only poison, don't sanity check"
DKconfig147 # feature. If you are not sure, don't touch it.
234 invocations for high order memory requests. You shouldn't
404 madvise(MADV_HUGEPAGE) but it won't risk to increase the
439 (PFRA) would like to keep around, but can't since there isn't enough
540 interactions don't cause any known issues on simple memory setups,
Dmempolicy.c1332 unsigned long t; in get_nodes() local
1360 if (get_user(t, nmask + k)) in get_nodes()
1363 if (t & endmask) in get_nodes()
1365 } else if (t) in get_nodes()
1376 if (get_user(t, nmask + nlongs - 1)) in get_nodes()
1378 if (t & valid_mask) in get_nodes()
Dmemcontrol.c3958 struct mem_cgroup_threshold_ary *t; in __mem_cgroup_threshold() local
3964 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
3966 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
3968 if (!t) in __mem_cgroup_threshold()
3978 i = t->current_threshold; in __mem_cgroup_threshold()
3986 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
3987 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
3998 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
3999 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4002 t->current_threshold = i - 1; in __mem_cgroup_threshold()
Dpage_alloc.c1745 unsigned long t; in deferred_init_maxorder() local
1750 t = min(mo_pfn, *end_pfn); in deferred_init_maxorder()
1751 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1763 unsigned long t; in deferred_init_maxorder() local
1768 t = min(mo_pfn, epfn); in deferred_init_maxorder()
1769 deferred_free_pages(spfn, t); in deferred_init_maxorder()
2975 unsigned int order, t; in mark_free_pages() local
3000 for_each_migratetype_order(order, t) { in mark_free_pages()
3002 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
6030 unsigned int order, t; in zone_init_free_lists() local
[all …]
Dvmalloc.c55 struct llist_node *t, *llnode; in free_work() local
57 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()