Home
last modified time | relevance | path

Searched refs:t (Results 1 – 12 of 12) sorted by relevance

/mm/
Dvmstat.c246 long t; in __mod_zone_page_state() local
250 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
252 if (unlikely(x > t || x < -t)) { in __mod_zone_page_state()
266 long t; in __mod_node_page_state() local
270 t = __this_cpu_read(pcp->stat_threshold); in __mod_node_page_state()
272 if (unlikely(x > t || x < -t)) { in __mod_node_page_state()
307 s8 v, t; in __inc_zone_state() local
310 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
311 if (unlikely(v > t)) { in __inc_zone_state()
312 s8 overstep = t >> 1; in __inc_zone_state()
[all …]
Dmemory-failure.c181 static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, in kill_proc() argument
188 pfn, t->comm, t->pid); in kill_proc()
197 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { in kill_proc()
208 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ in kill_proc()
212 t->comm, t->pid, ret); in kill_proc()
368 struct task_struct *t; in find_early_kill_thread() local
370 for_each_thread(tsk, t) in find_early_kill_thread()
371 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY)) in find_early_kill_thread()
372 return t; in find_early_kill_thread()
385 struct task_struct *t; in task_early_kill() local
[all …]
Doom_kill.c110 struct task_struct *t; in find_lock_task_mm() local
114 for_each_thread(p, t) { in find_lock_task_mm()
115 task_lock(t); in find_lock_task_mm()
116 if (likely(t->mm)) in find_lock_task_mm()
118 task_unlock(t); in find_lock_task_mm()
120 t = NULL; in find_lock_task_mm()
124 return t; in find_lock_task_mm()
444 struct task_struct *t; in process_shares_mm() local
446 for_each_thread(p, t) { in process_shares_mm()
447 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
[all …]
Dslub.c564 static void print_track(const char *s, struct track *t) in print_track() argument
566 if (!t->addr) in print_track()
570 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); in print_track()
575 if (t->addrs[i]) in print_track()
576 pr_err("\t%pS\n", (void *)t->addrs[i]); in print_track()
1810 void *t; in get_partial_node() local
1815 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1816 if (!t) in get_partial_node()
1823 object = t; in get_partial_node()
3899 struct page *t; in __kmem_cache_shrink() local
[all …]
Dpage-writeback.c627 static void writeout_period(unsigned long t) in writeout_period() argument
629 struct wb_domain *dom = (void *)t; in writeout_period()
1424 unsigned long t; in wb_max_pause() local
1433 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); in wb_max_pause()
1434 t++; in wb_max_pause()
1436 return min_t(unsigned long, t, MAX_PAUSE); in wb_max_pause()
1447 long t; /* target pause */ in wb_min_pause() local
1452 t = max(1, HZ / 100); in wb_min_pause()
1461 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause()
1481 t = min(t, 1 + max_pause / 2); in wb_min_pause()
[all …]
Dhugetlb.c257 static long region_add(struct resv_map *resv, long f, long t) in region_add() argument
275 if (&rg->link == head || t < rg->from) { in region_add()
284 nrg->to = t; in region_add()
287 add += t - f; in region_add()
300 if (rg->from > t) in region_add()
306 if (rg->to > t) in region_add()
307 t = rg->to; in region_add()
321 add += t - nrg->to; /* Added to end of region */ in region_add()
322 nrg->to = t; in region_add()
353 static long region_chg(struct resv_map *resv, long f, long t) in region_chg() argument
[all …]
DKconfig171 Users who don't use the memory hotplug feature are fine with this
172 option on since they don't specify movable_node boot option or they
173 don't online memory as movable.
180 # feature. If you are not sure, don't touch it.
269 invocations for high order memory requests. You shouldn't
306 # have more than 4GB of memory, but we don't currently use the IOTLB to present
445 madvise(MADV_HUGEPAGE) but it won't risk to increase the
451 # We don't deposit page tables on file THP mapping,
472 (PFRA) would like to keep around, but can't since there isn't enough
575 interactions don't cause any known issues on simple memory setups,
Dutil.c259 struct task_struct * __maybe_unused t = current; in vma_is_stack_for_current() local
261 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_current()
DKconfig.debug22 For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
60 bool "Only poison, don't sanity check"
Dmemcontrol.c3260 struct mem_cgroup_threshold_ary *t; in __mem_cgroup_threshold() local
3266 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
3268 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
3270 if (!t) in __mem_cgroup_threshold()
3280 i = t->current_threshold; in __mem_cgroup_threshold()
3288 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
3289 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
3300 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
3301 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
3304 t->current_threshold = i - 1; in __mem_cgroup_threshold()
Dmempolicy.c1290 unsigned long t; in get_nodes() local
1291 if (get_user(t, nmask + k)) in get_nodes()
1294 if (t & endmask) in get_nodes()
1296 } else if (t) in get_nodes()
Dpage_alloc.c2415 unsigned int order, t; in mark_free_pages() local
2435 for_each_migratetype_order(order, t) { in mark_free_pages()
2437 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
5130 unsigned int order, t; in zone_init_free_lists() local
5131 for_each_migratetype_order(order, t) { in zone_init_free_lists()
5132 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()