/mm/ |
D | slob.c | 190 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument 196 page = __alloc_pages_node(node, gfp, order); in slob_new_pages() 199 page = alloc_pages(gfp, order); in slob_new_pages() 268 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) in slob_alloc() argument 316 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc() 332 if (unlikely((gfp & __GFP_ZERO) && b)) in slob_alloc() 427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument 433 gfp &= gfp_allowed_mask; in __do_kmalloc_node() 435 lockdep_trace_alloc(gfp); in __do_kmalloc_node() 441 m = slob_alloc(size + align, gfp, align, node); in __do_kmalloc_node() [all …]
|
D | zbud.c | 140 static void *zbud_zpool_create(const char *name, gfp_t gfp, in zbud_zpool_create() argument 146 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL); in zbud_zpool_create() 159 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc() argument 162 return zbud_alloc(pool, size, gfp, handle); in zbud_zpool_malloc() 305 struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) in zbud_create_pool() argument 310 pool = kzalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool() 353 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, in zbud_alloc() argument 361 if (!size || (gfp & __GFP_HIGHMEM)) in zbud_alloc() 385 page = alloc_page(gfp); in zbud_alloc()
|
D | util.c | 44 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument 53 buf = kmalloc_track_caller(len, gfp); in kstrdup() 69 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() argument 74 return kstrdup(s, gfp); in kstrdup_const() 86 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument 95 buf = kmalloc_track_caller(len+1, gfp); in kstrndup() 111 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument 115 p = kmalloc_track_caller(len, gfp); in kmemdup() 128 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) in kmemdup_nul() argument 135 buf = kmalloc_track_caller(len + 1, gfp); in kmemdup_nul()
|
D | backing-dev.c | 290 int blkcg_id, gfp_t gfp) in wb_init() argument 314 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); in wb_init() 318 err = fprop_local_init_percpu(&wb->completions, gfp); in wb_init() 323 err = percpu_counter_init(&wb->stat[i], 0, gfp); in wb_init() 399 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) in wb_congested_get_create() argument 434 new_congested = kzalloc(sizeof(*new_congested), gfp); in wb_congested_get_create() 519 struct cgroup_subsys_state *memcg_css, gfp_t gfp) in cgwb_create() argument 547 wb = kmalloc(sizeof(*wb), gfp); in cgwb_create() 551 ret = wb_init(wb, bdi, blkcg_css->id, gfp); in cgwb_create() 555 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); in cgwb_create() [all …]
|
D | zpool.c | 153 struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, in zpool_create_pool() argument 173 zpool = kmalloc(sizeof(*zpool), gfp); in zpool_create_pool() 181 zpool->pool = driver->create(name, gfp, ops, zpool); in zpool_create_pool() 254 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, in zpool_malloc() argument 257 return zpool->driver->malloc(zpool->pool, size, gfp, handle); in zpool_malloc()
|
D | z3fold.c | 223 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp, in z3fold_create_pool() argument 229 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool() 294 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument 302 if (!size || (gfp & __GFP_HIGHMEM)) in z3fold_alloc() 345 page = alloc_page(gfp); in z3fold_alloc() 699 static void *z3fold_zpool_create(const char *name, gfp_t gfp, in z3fold_zpool_create() argument 705 pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL); in z3fold_zpool_create() 718 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, in z3fold_zpool_malloc() argument 721 return z3fold_alloc(pool, size, gfp, handle); in z3fold_zpool_malloc()
|
D | kmemleak.c | 127 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ argument 542 int min_count, gfp_t gfp) in create_object() argument 548 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in create_object() 755 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) in add_scan_area() argument 768 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area() 912 gfp_t gfp) in kmemleak_alloc() argument 917 create_object((unsigned long)ptr, size, min_count, gfp); in kmemleak_alloc() 933 gfp_t gfp) in kmemleak_alloc_percpu() argument 946 size, 0, gfp); in kmemleak_alloc_percpu() 1095 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) in kmemleak_scan_area() argument [all …]
|
D | shmem.c | 113 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 114 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 118 gfp_t gfp, struct mm_struct *fault_mm, int *fault_type); 1080 gfp_t gfp; in shmem_unuse_inode() local 1097 gfp = mapping_gfp_mask(mapping); in shmem_unuse_inode() 1098 if (shmem_should_replace_page(*pagep, gfp)) { in shmem_unuse_inode() 1100 error = shmem_replace_page(pagep, gfp, info, index); in shmem_unuse_inode() 1369 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin() argument 1376 page = swapin_readahead(swap, gfp, &pvma, 0); in shmem_swapin() 1382 static struct page *shmem_alloc_hugepage(gfp_t gfp, in shmem_alloc_hugepage() argument [all …]
|
D | mempolicy.c | 1659 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) in policy_nodemask() argument 1663 apply_policy_zone(policy, gfp_zone(gfp)) && in policy_nodemask() 1671 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, in policy_zonelist() argument 1686 if (unlikely(gfp & __GFP_THISNODE) && in policy_zonelist() 1693 return node_zonelist(nd, gfp); in policy_zonelist() 1931 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument 1937 zl = node_zonelist(nid, gfp); in alloc_page_interleave() 1938 page = __alloc_pages(gfp, order, zl); in alloc_page_interleave() 1968 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument 1986 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma() [all …]
|
D | vmpressure.c | 227 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 243 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure() 310 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 326 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio()
|
D | slab.h | 247 gfp_t gfp, int order, in memcg_charge_slab() argument 257 ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); in memcg_charge_slab() 315 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, in memcg_charge_slab() argument 481 gfp_t gfp); 485 unsigned int count, gfp_t gfp) in cache_random_seq_create() argument
|
D | slab.c | 213 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 586 int batchcount, gfp_t gfp) in alloc_arraycache() argument 591 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache() 642 int limit, gfp_t gfp) in alloc_alien_cache() argument 679 int batch, gfp_t gfp) in __alloc_alien_cache() argument 684 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache() 690 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument 698 alc_ptr = kzalloc_node(memsize, gfp, node); in alloc_alien_cache() 705 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache() 850 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument [all …]
|
D | khugepaged.c | 751 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 755 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); in khugepaged_alloc_page() 815 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 946 gfp_t gfp; in collapse_huge_page() local 951 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; in collapse_huge_page() 960 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page() 966 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_huge_page() 1305 gfp_t gfp; in collapse_shmem() local 1317 gfp = alloc_hugepage_khugepaged_gfpmask() | in collapse_shmem() 1320 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_shmem() [all …]
|
D | zsmalloc.c | 352 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) in cache_alloc_handle() argument 355 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); in cache_alloc_handle() 388 static void *zs_zpool_create(const char *name, gfp_t gfp, in zs_zpool_create() argument 405 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument 408 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc() 1114 gfp_t gfp) in alloc_zspage() argument 1118 struct zspage *zspage = cache_alloc_zspage(pool, gfp); in alloc_zspage() 1130 page = alloc_page(gfp); in alloc_zspage() 1539 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc() argument 1549 handle = cache_alloc_handle(pool, gfp); in zs_malloc() [all …]
|
D | percpu-vm.c | 85 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; in pcpu_alloc_pages() local 93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
|
D | filemap.c | 761 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc() argument 771 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc() 776 return alloc_pages(gfp, 0); in __page_cache_alloc() 2392 gfp_t gfp) in do_read_cache_page() argument 2399 page = __page_cache_alloc(gfp | __GFP_COLD); in do_read_cache_page() 2402 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page() 2517 gfp_t gfp) in read_cache_page_gfp() argument 2521 return do_read_cache_page(mapping, index, filler, NULL, gfp); in read_cache_page_gfp()
|
D | zswap.c | 241 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) in zswap_entry_cache_alloc() argument 244 entry = kmem_cache_alloc(zswap_entry_cache, gfp); in zswap_entry_cache_alloc() 580 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; in zswap_pool_create() local 591 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); in zswap_pool_create()
|
D | readahead.c | 112 struct list_head *pages, unsigned int nr_pages, gfp_t gfp) in read_pages() argument 130 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) in read_pages()
|
D | percpu.c | 869 gfp_t gfp) in pcpu_alloc() argument 874 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; in pcpu_alloc() 1027 kmemleak_alloc_percpu(ptr, size, gfp); in pcpu_alloc() 1063 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) in __alloc_percpu_gfp() argument 1065 return pcpu_alloc(size, align, false, gfp); in __alloc_percpu_gfp()
|
D | huge_memory.c | 536 gfp_t gfp) in __do_huge_pmd_anonymous_page() argument 545 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { in __do_huge_pmd_anonymous_page() 647 gfp_t gfp; in do_huge_pmd_anonymous_page() local 693 gfp = alloc_hugepage_direct_gfpmask(vma); in do_huge_pmd_anonymous_page() 694 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page() 700 return __do_huge_pmd_anonymous_page(fe, page, gfp); in do_huge_pmd_anonymous_page()
|
D | slab_common.c | 1082 gfp_t gfp) in cache_random_seq_create() argument 1089 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); in cache_random_seq_create()
|
D | memcontrol.c | 2296 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, in memcg_kmem_charge_memcg() argument 2303 ret = try_charge(memcg, gfp, nr_pages); in memcg_kmem_charge_memcg() 2326 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) in memcg_kmem_charge() argument 2336 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); in memcg_kmem_charge() 3611 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument 3613 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init() 3680 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
|
D | hugetlb.c | 1510 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN; in __hugetlb_alloc_buddy_huge_page() local 1529 gfp |= __GFP_THISNODE; in __hugetlb_alloc_buddy_huge_page() 1534 return alloc_pages_node(nid, gfp, order); in __hugetlb_alloc_buddy_huge_page() 1549 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask); in __hugetlb_alloc_buddy_huge_page() 1551 page = __alloc_pages_nodemask(gfp, order, zl, nodemask); in __hugetlb_alloc_buddy_huge_page()
|
D | rmap.c | 123 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) in anon_vma_chain_alloc() argument 125 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); in anon_vma_chain_alloc()
|
D | page-writeback.c | 646 int wb_domain_init(struct wb_domain *dom, gfp_t gfp) in wb_domain_init() argument 658 return fprop_global_init(&dom->completions, gfp); in wb_domain_init()
|