Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 25 of 31) sorted by relevance

12

/mm/
Dslob.c191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
358 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc()
374 if (unlikely(gfp & __GFP_ZERO)) in slob_alloc()
477 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
483 gfp &= gfp_allowed_mask; in __do_kmalloc_node()
485 might_alloc(gfp); in __do_kmalloc_node()
500 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node()
[all …]
Dzbud.c220 static struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) in zbud_create_pool() argument
225 pool = kzalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool()
268 static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, in zbud_alloc() argument
276 if (!size || (gfp & __GFP_HIGHMEM)) in zbud_alloc()
299 page = alloc_page(gfp); in zbud_alloc()
539 static void *zbud_zpool_create(const char *name, gfp_t gfp, in zbud_zpool_create() argument
545 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL); in zbud_zpool_create()
558 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc() argument
561 return zbud_alloc(pool, size, gfp, handle); in zbud_zpool_malloc()
Dpercpu-vm.c84 gfp_t gfp) in pcpu_alloc_pages() argument
89 gfp |= __GFP_HIGHMEM; in pcpu_alloc_pages()
95 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
277 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() argument
285 if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) in pcpu_populate_chunk()
333 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) in pcpu_create_chunk() argument
338 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk()
Dkmemleak.c116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ argument
423 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) in mem_pool_alloc() argument
430 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in mem_pool_alloc()
578 int min_count, gfp_t gfp) in create_object() argument
586 object = mem_pool_alloc(gfp); in create_object()
794 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) in add_scan_area() argument
813 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area()
902 gfp_t gfp) in kmemleak_alloc() argument
907 create_object((unsigned long)ptr, size, min_count, gfp); in kmemleak_alloc()
921 gfp_t gfp) in kmemleak_alloc_percpu() argument
[all …]
Dpercpu-km.c42 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() argument
53 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) in pcpu_create_chunk() argument
61 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk()
65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
Dpercpu.c508 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) in pcpu_mem_zalloc() argument
514 return kzalloc(size, gfp); in pcpu_mem_zalloc()
516 return __vmalloc(size, gfp | __GFP_ZERO); in pcpu_mem_zalloc()
1441 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) in pcpu_alloc_chunk() argument
1446 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); in pcpu_alloc_chunk()
1455 sizeof(chunk->alloc_map[0]), gfp); in pcpu_alloc_chunk()
1460 sizeof(chunk->bound_map[0]), gfp); in pcpu_alloc_chunk()
1465 sizeof(chunk->md_blocks[0]), gfp); in pcpu_alloc_chunk()
1473 sizeof(struct obj_cgroup *), gfp); in pcpu_alloc_chunk()
1578 int page_start, int page_end, gfp_t gfp);
[all …]
Dzpool.c156 struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, in zpool_create_pool() argument
176 zpool = kmalloc(sizeof(*zpool), gfp); in zpool_create_pool()
184 zpool->pool = driver->create(name, gfp, ops, zpool); in zpool_create_pool()
275 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, in zpool_malloc() argument
278 return zpool->driver->malloc(zpool->pool, size, gfp, handle); in zpool_malloc()
Dmempolicy.c1764 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) in policy_nodemask() argument
1770 apply_policy_zone(policy, gfp_zone(gfp)) && in policy_nodemask()
1787 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) in policy_node() argument
1797 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); in policy_node()
2027 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument
2032 page = __alloc_pages(gfp, order, nid, NULL); in alloc_page_interleave()
2044 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, in alloc_pages_preferred_many() argument
2056 preferred_gfp = gfp | __GFP_NOWARN; in alloc_pages_preferred_many()
2060 page = __alloc_pages(gfp, order, numa_node_id(), NULL); in alloc_pages_preferred_many()
2081 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
[all …]
Dutil.c55 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument
64 buf = kmalloc_track_caller(len, gfp); in kstrdup()
82 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() argument
87 return kstrdup(s, gfp); in kstrdup_const()
101 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument
110 buf = kmalloc_track_caller(len+1, gfp); in kstrndup()
128 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument
132 p = kmalloc_track_caller(len, gfp); in kmemdup()
148 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) in kmemdup_nul() argument
155 buf = kmalloc_track_caller(len + 1, gfp); in kmemdup_nul()
Dshmem.c142 gfp_t gfp, struct vm_area_struct *vma,
146 gfp_t gfp, struct vm_area_struct *vma,
700 pgoff_t index, void *expected, gfp_t gfp, in shmem_add_to_page_cache() argument
719 error = mem_cgroup_charge(page, charge_mm, gfp); in shmem_add_to_page_cache()
728 cgroup_throttle_swaprate(page, gfp); in shmem_add_to_page_cache()
754 } while (xas_nomem(&xas, gfp)); in shmem_add_to_page_cache()
1516 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin() argument
1526 page = swap_cluster_readahead(swap, gfp, &vmf); in shmem_swapin()
1556 static struct page *shmem_alloc_hugepage(gfp_t gfp, in shmem_alloc_hugepage() argument
1570 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), in shmem_alloc_hugepage()
[all …]
Dbacking-dev.c281 gfp_t gfp) in wb_init() argument
310 err = fprop_local_init_percpu(&wb->completions, gfp); in wb_init()
315 err = percpu_counter_init(&wb->stat[i], 0, gfp); in wb_init()
450 struct cgroup_subsys_state *memcg_css, gfp_t gfp) in cgwb_create() argument
478 wb = kmalloc(sizeof(*wb), gfp); in cgwb_create()
484 ret = wb_init(wb, bdi, gfp); in cgwb_create()
488 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); in cgwb_create()
492 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); in cgwb_create()
602 gfp_t gfp) in wb_get_create() argument
606 might_alloc(gfp); in wb_get_create()
[all …]
Dslab.c215 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
534 int batchcount, gfp_t gfp) in alloc_arraycache() argument
539 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
608 int limit, gfp_t gfp) in alloc_alien_cache() argument
645 int batch, gfp_t gfp) in __alloc_alien_cache() argument
650 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache()
659 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument
666 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); in alloc_alien_cache()
673 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache()
818 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument
[all …]
Dvmpressure.c239 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument
268 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
335 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument
351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio()
Dz3fold.c213 gfp_t gfp) in alloc_slots() argument
218 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); in alloc_slots()
386 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() argument
400 slots = alloc_slots(pool, gfp); in init_z3fold_page()
973 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, in z3fold_create_pool() argument
979 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool()
1074 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument
1081 bool can_sleep = gfpflags_allow_blocking(gfp); in z3fold_alloc()
1132 page = alloc_page(gfp); in z3fold_alloc()
1137 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
[all …]
Dslab.h280 gfp_t gfp, bool new_page);
409 struct kmem_cache *s, gfp_t gfp, in memcg_alloc_page_obj_cgroups() argument
452 gfp_t gfp) in account_slab_page() argument
455 memcg_alloc_page_obj_cgroups(page, s, gfp, true); in account_slab_page()
626 gfp_t gfp);
630 unsigned int count, gfp_t gfp) in cache_random_seq_create() argument
Dsecretmem.c55 gfp_t gfp = vmf->gfp_mask; in secretmem_fault() local
69 page = alloc_page(gfp | __GFP_ZERO); in secretmem_fault()
83 err = add_to_page_cache_lru(page, mapping, offset, gfp); in secretmem_fault()
Dzswap.c275 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) in zswap_entry_cache_alloc() argument
278 entry = kmem_cache_alloc(zswap_entry_cache, gfp); in zswap_entry_cache_alloc()
589 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; in zswap_pool_create() local
610 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); in zswap_pool_create()
1095 gfp_t gfp; in zswap_frontswap_store() local
1191 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; in zswap_frontswap_store()
1193 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; in zswap_frontswap_store()
1194 ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle); in zswap_frontswap_store()
Dkhugepaged.c884 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument
888 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); in khugepaged_alloc_page()
960 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument
1077 gfp_t gfp; in collapse_huge_page() local
1082 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; in collapse_huge_page()
1091 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page()
1097 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_huge_page()
1684 gfp_t gfp; in collapse_file() local
1697 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; in collapse_file()
1699 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_file()
[all …]
Dzsmalloc.c347 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) in cache_alloc_handle() argument
350 gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA)); in cache_alloc_handle()
383 static void *zs_zpool_create(const char *name, gfp_t gfp, in zs_zpool_create() argument
400 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument
403 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc()
1058 gfp_t gfp) in alloc_zspage() argument
1062 struct zspage *zspage = cache_alloc_zspage(pool, gfp); in alloc_zspage()
1073 page = alloc_page(gfp); in alloc_zspage()
1409 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc() argument
1419 handle = cache_alloc_handle(pool, gfp); in zs_malloc()
[all …]
Dfilemap.c878 pgoff_t offset, gfp_t gfp, in __add_to_page_cache_locked() argument
895 error = mem_cgroup_charge(page, NULL, gfp); in __add_to_page_cache_locked()
901 gfp &= GFP_RECLAIM_MASK; in __add_to_page_cache_locked()
909 order, gfp); in __add_to_page_cache_locked()
941 } while (xas_nomem(&xas, gfp)); in __add_to_page_cache_locked()
1010 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc() argument
1020 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
1025 return alloc_pages(gfp, 0); in __page_cache_alloc()
3514 gfp_t gfp) in do_read_cache_page() argument
3521 page = __page_cache_alloc(gfp); in do_read_cache_page()
[all …]
Dpage_alloc.c5506 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, in __alloc_pages_bulk() argument
5538 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT)) in __alloc_pages_bulk()
5558 gfp &= gfp_allowed_mask; in __alloc_pages_bulk()
5559 alloc_gfp = gfp; in __alloc_pages_bulk()
5560 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) in __alloc_pages_bulk()
5562 gfp = alloc_gfp; in __alloc_pages_bulk()
5569 !__cpuset_zone_allowed(zone, gfp)) { in __alloc_pages_bulk()
5581 alloc_flags, gfp)) { in __alloc_pages_bulk()
5620 prep_new_page(page, 0, gfp, 0); in __alloc_pages_bulk()
5641 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); in __alloc_pages_bulk()
[all …]
Dmemcontrol.c2845 gfp_t gfp, bool new_page) in memcg_alloc_page_obj_cgroups() argument
2851 gfp &= ~OBJCGS_CLEAR_MASK; in memcg_alloc_page_obj_cgroups()
2852 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, in memcg_alloc_page_obj_cgroups()
3022 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, in obj_cgroup_charge_pages() argument
3031 ret = try_charge_memcg(memcg, gfp, nr_pages); in obj_cgroup_charge_pages()
3043 if (gfp & __GFP_NOFAIL) { in obj_cgroup_charge_pages()
3064 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) in __memcg_kmem_charge_page() argument
3071 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order); in __memcg_kmem_charge_page()
3275 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) in obj_cgroup_charge() argument
3312 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); in obj_cgroup_charge()
[all …]
Dhuge_memory.c597 struct page *page, gfp_t gfp) in __do_huge_pmd_anonymous_page() argument
606 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
612 cgroup_throttle_swaprate(page, gfp); in __do_huge_pmd_anonymous_page()
727 gfp_t gfp; in do_huge_pmd_anonymous_page() local
776 gfp = vma_thp_gfp_mask(vma); in do_huge_pmd_anonymous_page()
777 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
783 return __do_huge_pmd_anonymous_page(vmf, page, gfp); in do_huge_pmd_anonymous_page()
/mm/kfence/
Dkfence_test.c238 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) in test_alloc() argument
259 kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp, in test_alloc()
275 alloc = kmem_cache_alloc(test_cache, gfp); in test_alloc()
277 alloc = kmalloc(size, gfp); in test_alloc()
Dcore.c331 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, in kfence_guarded_alloc() argument
419 if (unlikely(slab_want_init_on_alloc(gfp, cache))) in kfence_guarded_alloc()

12