/mm/ |
D | mempool.c | 182 gfp_t gfp_mask, int node_id) in mempool_init_node() argument 192 gfp_mask, node_id); in mempool_init_node() 202 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 263 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 267 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 272 gfp_mask, node_id)) { in mempool_create_node() 375 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 382 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 383 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc() 385 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc() [all …]
|
D | page_alloc.c | 3687 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 3691 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page() 3693 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page() 3696 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page() 3727 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 3734 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 3736 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page() 3851 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument 3883 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast() 3928 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument [all …]
|
D | page_owner.c | 26 gfp_t gfp_mask; member 192 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument 201 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 213 gfp_t gfp_mask) in __set_page_owner() argument 218 handle = save_stack(gfp_mask); in __set_page_owner() 223 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner() 277 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner() 362 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print() 403 page_owner->order, page_owner->gfp_mask, in print_page_owner() 404 &page_owner->gfp_mask, page_owner->pid, in print_page_owner() [all …]
|
D | vmscan.c | 143 gfp_t gfp_mask; member 572 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument 592 .gfp_mask = gfp_mask, in shrink_slab_memcg() 647 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument 674 unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument 682 trace_android_vh_shrink_slab_bypass(gfp_mask, nid, memcg, priority, &bypass); in shrink_slab() 694 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab() 701 .gfp_mask = gfp_mask, in shrink_slab() 1165 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list() 1166 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list() [all …]
|
D | swap_state.c | 452 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 494 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 527 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { in __read_swap_cache_async() 532 if (mem_cgroup_charge(page, NULL, gfp_mask)) { in __read_swap_cache_async() 558 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 562 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async() 654 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument 693 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead() 709 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swap_cluster_readahead() 837 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument [all …]
|
D | vmalloc.c | 1155 int node, gfp_t gfp_mask) in alloc_vmap_area() argument 1170 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; in alloc_vmap_area() 1172 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area() 1180 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area() 1206 pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area() 1251 if (gfpflags_allow_blocking(gfp_mask)) { in alloc_vmap_area() 1260 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) in alloc_vmap_area() 1546 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument 1558 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block() 1564 node, gfp_mask); in new_vmap_block() [all …]
|
D | cma.c | 438 gfp_t gfp_mask) in cma_alloc() argument 458 (void *)cma, count, align, gfp_mask); in cma_alloc() 486 (gfp_mask & __GFP_NORETRY)) in cma_alloc() 516 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info); in cma_alloc() 544 if (info.failed_pfn && gfp_mask & __GFP_NORETRY) { in cma_alloc() 569 if (ret && !(gfp_mask & __GFP_NOWARN)) { in cma_alloc() 577 trace_android_vh_cma_alloc_finish(cma, page, count, align, gfp_mask, ts); in cma_alloc()
|
D | hugetlb.c | 1102 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument 1111 zonelist = node_zonelist(nid, gfp_mask); in dequeue_huge_page_nodemask() 1115 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_huge_page_nodemask() 1118 if (!cpuset_zone_allowed(zone, gfp_mask)) in dequeue_huge_page_nodemask() 1145 gfp_t gfp_mask; in dequeue_huge_page_vma() local 1162 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma() 1163 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma() 1164 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma() 1284 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument 1304 if (!(gfp_mask & __GFP_THISNODE)) { in alloc_gigantic_page() [all …]
|
D | oom_kill.c | 260 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); in constrained_alloc() 282 if (oc->gfp_mask & __GFP_THISNODE) in constrained_alloc() 301 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc() 497 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, in dump_header() 1133 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
|
D | sparse-vmemmap.c | 52 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; in vmemmap_alloc_block() local 57 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block() 62 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, in vmemmap_alloc_block()
|
D | compaction.c | 1001 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block() 2252 cc->migratetype = gfp_migratetype(cc->gfp_mask); in compact_zone() 2445 gfp_t gfp_mask, enum compact_priority prio, in compact_zone_order() argument 2453 .gfp_mask = gfp_mask, in compact_zone_order() 2506 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument 2510 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages() 2522 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages() 2535 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages() 2592 .gfp_mask = GFP_KERNEL, in proactive_compact_node() 2621 .gfp_mask = GFP_KERNEL, in compact_node() [all …]
|
D | filemap.c | 795 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 934 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument 937 gfp_mask, NULL); in add_to_page_cache_locked() 942 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument 949 gfp_mask, &shadow); in add_to_page_cache_lru() 962 if (!(gfp_mask & __GFP_WRITE) && shadow) in add_to_page_cache_lru() 1797 int fgp_flags, gfp_t gfp_mask) in pagecache_get_page() argument 1807 gfp_mask, page); in pagecache_get_page() 1844 gfp_mask |= __GFP_WRITE; in pagecache_get_page() 1846 gfp_mask &= ~__GFP_FS; in pagecache_get_page() [all …]
|
D | migrate.c | 1531 gfp_t gfp_mask; in alloc_migration_target() local 1538 gfp_mask = mtc->gfp_mask; in alloc_migration_target() 1546 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); in alloc_migration_target() 1547 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); in alloc_migration_target() 1555 gfp_mask &= ~__GFP_RECLAIM; in alloc_migration_target() 1556 gfp_mask |= GFP_TRANSHUGE; in alloc_migration_target() 1561 gfp_mask |= __GFP_HIGHMEM; in alloc_migration_target() 1563 new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); in alloc_migration_target() 1590 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, in do_move_pages_to_node()
|
D | readahead.c | 190 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() local 226 page = __page_cache_alloc(gfp_mask); in page_cache_ra_unbounded() 233 gfp_mask) < 0) { in page_cache_ra_unbounded()
|
D | memcontrol.c | 1746 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument 1753 .gfp_mask = gfp_mask, in mem_cgroup_out_of_memory() 1777 gfp_t gfp_mask, in mem_cgroup_soft_reclaim() argument 1815 total += mem_cgroup_shrink_node(victim, gfp_mask, false, in mem_cgroup_soft_reclaim() 2462 gfp_t gfp_mask) in reclaim_high() argument 2477 gfp_mask, true); in reclaim_high() 2713 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument 2756 if (gfp_mask & __GFP_ATOMIC) in try_charge() 2771 if (!gfpflags_allow_blocking(gfp_mask)) in try_charge() 2778 gfp_mask, may_swap); in try_charge() [all …]
|
D | nommu.c | 143 void *__vmalloc(unsigned long size, gfp_t gfp_mask) in __vmalloc() argument 149 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc() 154 unsigned long start, unsigned long end, gfp_t gfp_mask, in __vmalloc_node_range() argument 158 return __vmalloc(size, gfp_mask); in __vmalloc_node_range() 161 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, in __vmalloc_node() argument 164 return __vmalloc(size, gfp_mask); in __vmalloc_node()
|
D | internal.h | 250 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member 663 gfp_t gfp_mask; member
|
D | swapfile.c | 3645 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) in add_swap_count_continuation() argument 3660 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation() 3860 void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) in __cgroup_throttle_swaprate() argument 3866 if (!(gfp_mask & __GFP_IO)) in __cgroup_throttle_swaprate()
|
D | memory-failure.c | 1788 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, in __soft_offline_page()
|
D | memory_hotplug.c | 1385 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, in do_migrate_range()
|
D | gup.c | 1629 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN, in check_and_migrate_cma_pages()
|
D | mempolicy.c | 1085 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, in migrate_to_node()
|
D | memory.c | 4858 .gfp_mask = __get_fault_gfp_mask(vma), in __handle_mm_fault() 5032 .gfp_mask = __get_fault_gfp_mask(vma), in ___handle_speculative_fault()
|