/mm/ |
D | slob.c | 191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument 197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages() 200 page = alloc_pages(gfp, order); in slob_new_pages() 301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument 358 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc() 374 if (unlikely(gfp & __GFP_ZERO)) in slob_alloc() 469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument 477 gfp &= gfp_allowed_mask; in __do_kmalloc_node() 479 fs_reclaim_acquire(gfp); in __do_kmalloc_node() 480 fs_reclaim_release(gfp); in __do_kmalloc_node() [all …]
|
D | zbud.c | 141 static void *zbud_zpool_create(const char *name, gfp_t gfp, in zbud_zpool_create() argument 147 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL); in zbud_zpool_create() 160 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc() argument 163 return zbud_alloc(pool, size, gfp, handle); in zbud_zpool_malloc() 306 struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) in zbud_create_pool() argument 311 pool = kzalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool() 354 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, in zbud_alloc() argument 362 if (!size || (gfp & __GFP_HIGHMEM)) in zbud_alloc() 385 page = alloc_page(gfp); in zbud_alloc()
|
D | percpu-vm.c | 83 gfp_t gfp) in pcpu_alloc_pages() argument 88 gfp |= __GFP_HIGHMEM; in pcpu_alloc_pages() 94 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages() 276 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() argument 284 if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) in pcpu_populate_chunk() 332 gfp_t gfp) in pcpu_create_chunk() argument 337 chunk = pcpu_alloc_chunk(type, gfp); in pcpu_create_chunk()
|
D | kmemleak.c | 116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ argument 417 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) in mem_pool_alloc() argument 424 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in mem_pool_alloc() 572 int min_count, gfp_t gfp) in create_object() argument 579 object = mem_pool_alloc(gfp); in create_object() 786 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) in add_scan_area() argument 805 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area() 894 gfp_t gfp) in kmemleak_alloc() argument 899 create_object((unsigned long)ptr, size, min_count, gfp); in kmemleak_alloc() 913 gfp_t gfp) in kmemleak_alloc_percpu() argument [all …]
|
D | percpu-km.c | 36 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() argument 48 gfp_t gfp) in pcpu_create_chunk() argument 56 chunk = pcpu_alloc_chunk(type, gfp); in pcpu_create_chunk() 60 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
|
D | percpu.c | 483 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) in pcpu_mem_zalloc() argument 489 return kzalloc(size, gfp); in pcpu_mem_zalloc() 491 return __vmalloc(size, gfp | __GFP_ZERO); in pcpu_mem_zalloc() 1396 static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp) in pcpu_alloc_chunk() argument 1401 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); in pcpu_alloc_chunk() 1410 sizeof(chunk->alloc_map[0]), gfp); in pcpu_alloc_chunk() 1415 sizeof(chunk->bound_map[0]), gfp); in pcpu_alloc_chunk() 1420 sizeof(chunk->md_blocks[0]), gfp); in pcpu_alloc_chunk() 1428 sizeof(struct obj_cgroup *), gfp); in pcpu_alloc_chunk() 1535 int page_start, int page_end, gfp_t gfp); [all …]
|
D | zpool.c | 155 struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, in zpool_create_pool() argument 175 zpool = kmalloc(sizeof(*zpool), gfp); in zpool_create_pool() 183 zpool->pool = driver->create(name, gfp, ops, zpool); in zpool_create_pool() 273 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, in zpool_malloc() argument 276 return zpool->driver->malloc(zpool->pool, size, gfp, handle); in zpool_malloc()
|
D | util.c | 55 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument 64 buf = kmalloc_track_caller(len, gfp); in kstrdup() 82 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() argument 87 return kstrdup(s, gfp); in kstrdup_const() 101 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument 110 buf = kmalloc_track_caller(len+1, gfp); in kstrndup() 128 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument 132 p = kmalloc_track_caller(len, gfp); in kmemdup() 148 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) in kmemdup_nul() argument 155 buf = kmalloc_track_caller(len + 1, gfp); in kmemdup_nul()
|
D | shmem.c | 147 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 148 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 152 gfp_t gfp, struct vm_area_struct *vma, 156 gfp_t gfp, struct vm_area_struct *vma, 681 pgoff_t index, void *expected, gfp_t gfp, in shmem_add_to_page_cache() argument 700 error = mem_cgroup_charge(page, charge_mm, gfp); in shmem_add_to_page_cache() 709 cgroup_throttle_swaprate(page, gfp); in shmem_add_to_page_cache() 735 } while (xas_nomem(&xas, gfp)); in shmem_add_to_page_cache() 1531 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin() argument 1541 page = swap_cluster_readahead(swap, gfp, &vmf); in shmem_swapin() [all …]
|
D | backing-dev.c | 281 gfp_t gfp) in wb_init() argument 308 err = fprop_local_init_percpu(&wb->completions, gfp); in wb_init() 313 err = percpu_counter_init(&wb->stat[i], 0, gfp); in wb_init() 436 struct cgroup_subsys_state *memcg_css, gfp_t gfp) in cgwb_create() argument 464 wb = kmalloc(sizeof(*wb), gfp); in cgwb_create() 470 ret = wb_init(wb, bdi, gfp); in cgwb_create() 474 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); in cgwb_create() 478 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); in cgwb_create() 587 gfp_t gfp) in wb_get_create() argument 591 might_sleep_if(gfpflags_allow_blocking(gfp)); in wb_get_create() [all …]
|
D | mempolicy.c | 1884 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) in policy_nodemask() argument 1888 apply_policy_zone(policy, gfp_zone(gfp)) && in policy_nodemask() 1896 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) in policy_node() argument 1906 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); in policy_node() 2143 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument 2148 page = __alloc_pages(gfp, order, nid); in alloc_page_interleave() 2184 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument 2199 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma() 2219 nmask = policy_nodemask(gfp, pol); in alloc_pages_vma() 2227 gfp | __GFP_THISNODE | __GFP_NORETRY, order); in alloc_pages_vma() [all …]
|
D | slab.c | 215 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 534 int batchcount, gfp_t gfp) in alloc_arraycache() argument 539 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache() 608 int limit, gfp_t gfp) in alloc_alien_cache() argument 645 int batch, gfp_t gfp) in __alloc_alien_cache() argument 650 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache() 659 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument 666 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); in alloc_alien_cache() 673 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache() 818 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument [all …]
|
D | vmpressure.c | 242 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 268 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure() 335 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio()
|
D | z3fold.c | 211 gfp_t gfp) in alloc_slots() argument 216 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); in alloc_slots() 398 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() argument 412 slots = alloc_slots(pool, gfp); in init_z3fold_page() 989 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, in z3fold_create_pool() argument 995 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool() 1089 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument 1096 bool can_sleep = gfpflags_allow_blocking(gfp); in z3fold_alloc() 1147 page = alloc_page(gfp); in z3fold_alloc() 1152 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc() [all …]
|
D | zswap.c | 265 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) in zswap_entry_cache_alloc() argument 268 entry = kmem_cache_alloc(zswap_entry_cache, gfp); in zswap_entry_cache_alloc() 535 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; in zswap_pool_create() local 556 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); in zswap_pool_create() 1014 gfp_t gfp; in zswap_frontswap_store() local 1090 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; in zswap_frontswap_store() 1092 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; in zswap_frontswap_store() 1093 ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle); in zswap_frontswap_store()
|
D | khugepaged.c | 882 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 886 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); in khugepaged_alloc_page() 958 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 1077 gfp_t gfp; in collapse_huge_page() local 1082 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; in collapse_huge_page() 1091 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page() 1097 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_huge_page() 1691 gfp_t gfp; in collapse_file() local 1703 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; in collapse_file() 1705 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_file() [all …]
|
D | slab.h | 301 gfp_t gfp); 444 struct kmem_cache *s, gfp_t gfp) in memcg_alloc_page_obj_cgroups() argument 659 gfp_t gfp); 663 unsigned int count, gfp_t gfp) in cache_random_seq_create() argument
|
D | zsmalloc.c | 347 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) in cache_alloc_handle() argument 350 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA)); in cache_alloc_handle() 383 static void *zs_zpool_create(const char *name, gfp_t gfp, in zs_zpool_create() argument 400 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument 403 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc() 1061 gfp_t gfp) in alloc_zspage() argument 1065 struct zspage *zspage = cache_alloc_zspage(pool, gfp); in alloc_zspage() 1077 page = alloc_page(gfp); in alloc_zspage() 1413 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc() argument 1423 handle = cache_alloc_handle(pool, gfp); in zs_malloc() [all …]
|
D | filemap.c | 837 pgoff_t offset, gfp_t gfp, in __add_to_page_cache_locked() argument 854 error = mem_cgroup_charge(page, current->mm, gfp); in __add_to_page_cache_locked() 860 gfp &= GFP_RECLAIM_MASK; in __add_to_page_cache_locked() 868 order, gfp); in __add_to_page_cache_locked() 902 } while (xas_nomem(&xas, gfp)); in __add_to_page_cache_locked() 971 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc() argument 981 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc() 986 return alloc_pages(gfp, 0); in __page_cache_alloc() 3176 gfp_t gfp) in do_read_cache_page() argument 3183 page = __page_cache_alloc(gfp); in do_read_cache_page() [all …]
|
D | huge_memory.c | 582 struct page *page, gfp_t gfp) in __do_huge_pmd_anonymous_page() argument 591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page() 597 cgroup_throttle_swaprate(page, gfp); in __do_huge_pmd_anonymous_page() 714 gfp_t gfp; in do_huge_pmd_anonymous_page() local 762 gfp = alloc_hugepage_direct_gfpmask(vma); in do_huge_pmd_anonymous_page() 763 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page() 769 return __do_huge_pmd_anonymous_page(vmf, page, gfp); in do_huge_pmd_anonymous_page()
|
D | memcontrol.c | 2935 gfp_t gfp) in memcg_alloc_page_obj_cgroups() argument 2940 gfp &= ~OBJCGS_CLEAR_MASK; in memcg_alloc_page_obj_cgroups() 2941 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, in memcg_alloc_page_obj_cgroups() 3078 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, in __memcg_kmem_charge() argument 3084 ret = try_charge(memcg, gfp, nr_pages); in __memcg_kmem_charge() 3096 if (gfp & __GFP_NOFAIL) { in __memcg_kmem_charge() 3127 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) in __memcg_kmem_charge_page() argument 3134 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); in __memcg_kmem_charge_page() 3266 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) in obj_cgroup_charge() argument 3298 ret = __memcg_kmem_charge(memcg, gfp, nr_pages); in obj_cgroup_charge() [all …]
|
D | swap_state.c | 130 gfp_t gfp, void **shadowp) in add_to_swap_cache() argument 170 } while (xas_nomem(&xas, gfp)); in add_to_swap_cache()
|
D | slab_common.c | 901 gfp_t gfp) in cache_random_seq_create() argument 908 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); in cache_random_seq_create()
|
/mm/kfence/ |
D | kfence_test.c | 227 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) in test_alloc() argument 248 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp, in test_alloc() 264 alloc = kmem_cache_alloc(test_cache, gfp); in test_alloc() 266 alloc = kmalloc(size, gfp); in test_alloc()
|
D | core.c | 257 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp) in kfence_guarded_alloc() argument 337 if (unlikely(slab_want_init_on_alloc(gfp, cache))) in kfence_guarded_alloc()
|