/mm/ |
D | memory-failure.c | 79 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev() argument 91 if (PageSlab(p)) in hwpoison_filter_dev() 94 mapping = page_mapping(p); in hwpoison_filter_dev() 109 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags() argument 114 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == in hwpoison_filter_flags() 134 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task() argument 143 mem = try_get_mem_cgroup_from_page(p); in hwpoison_filter_task() 157 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task() argument 160 int hwpoison_filter(struct page *p) in hwpoison_filter() argument 165 if (hwpoison_filter_dev(p)) in hwpoison_filter() [all …]
|
D | swapfile.c | 345 static void inc_cluster_info_page(struct swap_info_struct *p, in inc_cluster_info_page() argument 353 VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx); in inc_cluster_info_page() 354 cluster_set_next_flag(&p->free_cluster_head, in inc_cluster_info_page() 356 if (cluster_next(&p->free_cluster_tail) == idx) { in inc_cluster_info_page() 357 cluster_set_null(&p->free_cluster_tail); in inc_cluster_info_page() 358 cluster_set_null(&p->free_cluster_head); in inc_cluster_info_page() 373 static void dec_cluster_info_page(struct swap_info_struct *p, in dec_cluster_info_page() argument 391 if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in dec_cluster_info_page() 393 swap_cluster_schedule_discard(p, idx); in dec_cluster_info_page() 398 if (cluster_is_null(&p->free_cluster_head)) { in dec_cluster_info_page() [all …]
|
D | oom_kill.c | 101 struct task_struct *find_lock_task_mm(struct task_struct *p) in find_lock_task_mm() argument 107 for_each_thread(p, t) { in find_lock_task_mm() 121 static bool oom_unkillable_task(struct task_struct *p, in oom_unkillable_task() argument 124 if (is_global_init(p)) in oom_unkillable_task() 126 if (p->flags & PF_KTHREAD) in oom_unkillable_task() 130 if (memcg && !task_in_mem_cgroup(p, memcg)) in oom_unkillable_task() 134 if (!has_intersects_mems_allowed(p, nodemask)) in oom_unkillable_task() 149 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, in oom_badness() argument 155 if (oom_unkillable_task(p, memcg, nodemask)) in oom_badness() 158 p = find_lock_task_mm(p); in oom_badness() [all …]
|
D | util.c | 73 void *p; in kmemdup() local 75 p = kmalloc_track_caller(len, gfp); in kmemdup() 76 if (p) in kmemdup() 77 memcpy(p, src, len); in kmemdup() 78 return p; in kmemdup() 92 void *p; in memdup_user() local 99 p = kmalloc_track_caller(len, GFP_KERNEL); in memdup_user() 100 if (!p) in memdup_user() 103 if (copy_from_user(p, src, len)) { in memdup_user() 104 kfree(p); in memdup_user() [all …]
|
D | slub.c | 126 static inline void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument 129 p += s->red_left_pad; in fixup_red_left() 131 return p; in fixup_red_left() 214 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument 246 void *p; in get_freepointer_safe() local 249 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); in get_freepointer_safe() 251 p = get_freepointer(s, object); in get_freepointer_safe() 253 return p; in get_freepointer_safe() 273 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument 275 return (p - addr) / s->size; in slab_index() [all …]
|
D | sparse-vmemmap.c | 106 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); in vmemmap_pte_populate() local 107 if (!p) in vmemmap_pte_populate() 109 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); in vmemmap_pte_populate() 119 void *p = vmemmap_alloc_block(PAGE_SIZE, node); in vmemmap_pmd_populate() local 120 if (!p) in vmemmap_pmd_populate() 122 pmd_populate_kernel(&init_mm, pmd, p); in vmemmap_pmd_populate() 131 void *p = vmemmap_alloc_block(PAGE_SIZE, node); in vmemmap_pud_populate() local 132 if (!p) in vmemmap_pud_populate() 134 pud_populate(&init_mm, pud, p); in vmemmap_pud_populate() 143 void *p = vmemmap_alloc_block(PAGE_SIZE, node); in vmemmap_pgd_populate() local [all …]
|
D | vmstat.c | 220 s8 __percpu *p = pcp->vm_stat_diff + item; in __mod_zone_page_state() local 224 x = delta + __this_cpu_read(*p); in __mod_zone_page_state() 232 __this_cpu_write(*p, x); in __mod_zone_page_state() 262 s8 __percpu *p = pcp->vm_stat_diff + item; in __inc_zone_state() local 265 v = __this_cpu_inc_return(*p); in __inc_zone_state() 271 __this_cpu_write(*p, -overstep); in __inc_zone_state() 284 s8 __percpu *p = pcp->vm_stat_diff + item; in __dec_zone_state() local 287 v = __this_cpu_dec_return(*p); in __dec_zone_state() 293 __this_cpu_write(*p, overstep); in __dec_zone_state() 320 s8 __percpu *p = pcp->vm_stat_diff + item; in mod_state() local [all …]
|
D | hwpoison-inject.c | 16 struct page *p; in hwpoison_inject() local 26 p = pfn_to_page(pfn); in hwpoison_inject() 27 hpage = compound_head(p); in hwpoison_inject() 37 if (!PageLRU(p) && !PageHuge(p)) in hwpoison_inject() 38 shake_page(p, 0); in hwpoison_inject() 42 if (!PageLRU(p) && !PageHuge(p)) in hwpoison_inject()
|
D | mempolicy.c | 126 struct mempolicy *get_task_policy(struct task_struct *p) in get_task_policy() argument 128 struct mempolicy *pol = p->mempolicy; in get_task_policy() 302 void __mpol_put(struct mempolicy *p) in __mpol_put() argument 304 if (!atomic_dec_and_test(&p->refcnt)) in __mpol_put() 306 kmem_cache_free(policy_cache, p); in __mpol_put() 844 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) in get_policy_nodemask() argument 847 if (p == &default_policy) in get_policy_nodemask() 850 switch (p->mode) { in get_policy_nodemask() 854 *nodes = p->v.nodes; in get_policy_nodemask() 857 if (!(p->flags & MPOL_F_LOCAL)) in get_policy_nodemask() [all …]
|
D | slab_common.c | 848 void *slab_next(struct seq_file *m, void *p, loff_t *pos) in slab_next() argument 850 return seq_list_next(p, &slab_caches, pos); in slab_next() 853 void slab_stop(struct seq_file *m, void *p) in slab_stop() argument 906 static int s_show(struct seq_file *m, void *p) in s_show() argument 908 struct kmem_cache *s = list_entry(p, struct kmem_cache, list); in s_show() 957 static __always_inline void *__do_krealloc(const void *p, size_t new_size, in __do_krealloc() argument 963 if (p) in __do_krealloc() 964 ks = ksize(p); in __do_krealloc() 967 return (void *)p; in __do_krealloc() 970 if (ret && p) in __do_krealloc() [all …]
|
D | vmalloc.c | 47 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); in free_work() local 48 struct llist_node *llnode = llist_del_all(&p->list); in free_work() 50 void *p = llnode; in free_work() local 52 __vunmap(p, 1); in free_work() 308 struct rb_node **p = &vmap_area_root.rb_node; in __insert_vmap_area() local 312 while (*p) { in __insert_vmap_area() 315 parent = *p; in __insert_vmap_area() 318 p = &(*p)->rb_left; in __insert_vmap_area() 320 p = &(*p)->rb_right; in __insert_vmap_area() 325 rb_link_node(&va->rb_node, parent, p); in __insert_vmap_area() [all …]
|
D | nommu.c | 555 struct rb_node *p, *lastp; in validate_nommu_regions() local 565 while ((p = rb_next(lastp))) { in validate_nommu_regions() 566 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions() 573 lastp = p; in validate_nommu_regions() 588 struct rb_node **p, *parent; in add_nommu_region() local 593 p = &nommu_region_tree.rb_node; in add_nommu_region() 594 while (*p) { in add_nommu_region() 595 parent = *p; in add_nommu_region() 598 p = &(*p)->rb_left; in add_nommu_region() 600 p = &(*p)->rb_right; in add_nommu_region() [all …]
|
D | madvise.c | 343 struct page *p; in madvise_hwpoison() local 347 compound_order(compound_head(p))) { in madvise_hwpoison() 350 ret = get_user_pages_fast(start, 1, 0, &p); in madvise_hwpoison() 354 if (PageHWPoison(p)) { in madvise_hwpoison() 355 put_page(p); in madvise_hwpoison() 360 page_to_pfn(p), start); in madvise_hwpoison() 361 ret = soft_offline_page(p, MF_COUNT_INCREASED); in madvise_hwpoison() 367 page_to_pfn(p), start); in madvise_hwpoison() 369 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); in madvise_hwpoison()
|
D | percpu.c | 560 int *p; in pcpu_alloc_area() local 562 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { in pcpu_alloc_area() 566 off = *p; in pcpu_alloc_area() 570 this_size = (p[1] & ~1) - off; in pcpu_alloc_area() 589 if (head && (head < sizeof(int) || !(p[-1] & 1))) { in pcpu_alloc_area() 590 *p = off += head; in pcpu_alloc_area() 591 if (p[-1] & 1) in pcpu_alloc_area() 594 max_contig = max(*p - p[-1], max_contig); in pcpu_alloc_area() 611 memmove(p + nr_extra + 1, p + 1, in pcpu_alloc_area() 620 *++p = off += head; in pcpu_alloc_area() [all …]
|
D | slab.h | 174 struct kmem_cache *p) in slab_equal_or_root() argument 176 return (p == s) || in slab_equal_or_root() 177 (s->memcg_params && (p == s->memcg_params->root_cache)); in slab_equal_or_root() 256 struct kmem_cache *p) in slab_equal_or_root() argument 360 void *slab_next(struct seq_file *m, void *p, loff_t *pos); 361 void slab_stop(struct seq_file *m, void *p);
|
D | page_alloc.c | 387 struct page *p = page + i; in prep_compound_page() local 388 set_page_count(p, 0); in prep_compound_page() 389 p->first_page = page; in prep_compound_page() 392 __SetPageTail(p); in prep_compound_page() 411 struct page *p = page + i; in destroy_compound_page() local 413 if (unlikely(!PageTail(p))) { in destroy_compound_page() 416 } else if (unlikely(p->first_page != page)) { in destroy_compound_page() 420 __ClearPageTail(p); in destroy_compound_page() 821 struct page *p = page; in __free_pages_bootmem() local 824 prefetchw(p); in __free_pages_bootmem() [all …]
|
D | vmacache.c | 18 struct task_struct *g, *p; in vmacache_flush_all() local 31 for_each_process_thread(g, p) { in vmacache_flush_all() 38 if (mm == p->mm) in vmacache_flush_all() 39 vmacache_flush(p); in vmacache_flush_all()
|
D | quicklist.c | 78 void *p = quicklist_alloc(nr, 0, NULL); in quicklist_trim() local 81 dtor(p); in quicklist_trim() 82 free_page((unsigned long)p); in quicklist_trim()
|
D | slab.c | 2360 struct list_head *p; in drain_freelist() local 2368 p = n->slabs_free.prev; in drain_freelist() 2369 if (p == &n->slabs_free) { in drain_freelist() 2374 page = list_entry(p, struct page, lru); in drain_freelist() 3331 struct list_head *p; in cache_flusharray() local 3333 p = n->slabs_free.next; in cache_flusharray() 3334 while (p != &(n->slabs_free)) { in cache_flusharray() 3337 page = list_entry(p, struct page, lru); in cache_flusharray() 3341 p = p->next; in cache_flusharray() 4061 unsigned long *p; in add_caller() local [all …]
|
D | memcontrol.c | 711 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded() local 721 while (*p) { in __mem_cgroup_insert_exceeded() 722 parent = *p; in __mem_cgroup_insert_exceeded() 726 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded() 732 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded() 734 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded() 1029 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) in mem_cgroup_from_task() argument 1036 if (unlikely(!p)) in mem_cgroup_from_task() 1039 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); in mem_cgroup_from_task() 1446 struct task_struct *p; in task_in_mem_cgroup() local [all …]
|
D | page-writeback.c | 1582 int *p; in balance_dirty_pages_ratelimited() local 1598 p = this_cpu_ptr(&bdp_ratelimits); in balance_dirty_pages_ratelimited() 1600 *p = 0; in balance_dirty_pages_ratelimited() 1601 else if (unlikely(*p >= ratelimit_pages)) { in balance_dirty_pages_ratelimited() 1602 *p = 0; in balance_dirty_pages_ratelimited() 1610 p = this_cpu_ptr(&dirty_throttle_leaks); in balance_dirty_pages_ratelimited() 1611 if (*p > 0 && current->nr_dirtied < ratelimit) { in balance_dirty_pages_ratelimited() 1613 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); in balance_dirty_pages_ratelimited() 1614 *p -= nr_pages_dirtied; in balance_dirty_pages_ratelimited()
|
D | iov_iter.c | 482 void *p; in get_pages_alloc_iovec() local 496 p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); in get_pages_alloc_iovec() 497 if (!p) in get_pages_alloc_iovec() 498 p = vmalloc(n * sizeof(struct page *)); in get_pages_alloc_iovec() 499 if (!p) in get_pages_alloc_iovec() 502 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); in get_pages_alloc_iovec() 504 kvfree(p); in get_pages_alloc_iovec() 507 *pages = p; in get_pages_alloc_iovec()
|
D | migrate.c | 1188 static struct page *new_page_node(struct page *p, unsigned long private, in new_page_node() argument 1193 while (pm->node != MAX_NUMNODES && pm->page != p) in new_page_node() 1201 if (PageHuge(p)) in new_page_node() 1202 return alloc_huge_page_node(page_hstate(compound_head(p)), in new_page_node() 1338 const void __user *p; in do_pages_move() local 1342 if (get_user(p, pages + j + chunk_start)) in do_pages_move() 1344 pm[j].addr = (unsigned long) p; in do_pages_move()
|
D | sparse.c | 268 unsigned long *p; in sparse_early_usemaps_alloc_pgdat_section() local 284 p = memblock_virt_alloc_try_nid_nopanic(size, in sparse_early_usemaps_alloc_pgdat_section() 287 if (!p && limit) { in sparse_early_usemaps_alloc_pgdat_section() 291 return p; in sparse_early_usemaps_alloc_pgdat_section()
|
D | vmscan.c | 3326 static int kswapd(void *p) in kswapd() argument 3332 pg_data_t *pgdat = (pg_data_t*)p; in kswapd() 3470 struct task_struct *p = current; in shrink_all_memory() local 3473 p->flags |= PF_MEMALLOC; in shrink_all_memory() 3476 p->reclaim_state = &reclaim_state; in shrink_all_memory() 3480 p->reclaim_state = NULL; in shrink_all_memory() 3482 p->flags &= ~PF_MEMALLOC; in shrink_all_memory() 3644 struct task_struct *p = current; in __zone_reclaim() local 3666 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; in __zone_reclaim() 3669 p->reclaim_state = &reclaim_state; in __zone_reclaim() [all …]
|