/mm/ |
D | slob.c | 191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() 301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() 477 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() 526 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() 532 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) in __kmalloc_track_caller() 539 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() 643 void *__kmalloc_node(size_t size, gfp_t gfp, int node) in __kmalloc_node() 649 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) in kmem_cache_alloc_node()
|
D | kmemleak.c | 116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ argument 423 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) in mem_pool_alloc() 578 int min_count, gfp_t gfp) in create_object() 794 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) in add_scan_area() 902 gfp_t gfp) in kmemleak_alloc() 921 gfp_t gfp) in kmemleak_alloc_percpu() 947 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp) in kmemleak_vmalloc() 1096 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) in kmemleak_scan_area() 1133 gfp_t gfp) in kmemleak_alloc_phys()
|
D | percpu-km.c | 42 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() 53 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) in pcpu_create_chunk()
|
D | zbud.c | 220 static struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) in zbud_create_pool() 268 static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, in zbud_alloc() 539 static void *zbud_zpool_create(const char *name, gfp_t gfp, in zbud_zpool_create() 558 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc()
|
D | percpu-vm.c | 84 gfp_t gfp) in pcpu_alloc_pages() 277 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() 333 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) in pcpu_create_chunk()
|
D | util.c | 55 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() 82 const char *kstrdup_const(const char *s, gfp_t gfp) in kstrdup_const() 101 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() 128 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() 148 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) in kmemdup_nul()
|
D | z3fold.c | 213 gfp_t gfp) in alloc_slots() 386 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() 973 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, in z3fold_create_pool() 1074 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() 1712 static void *z3fold_zpool_create(const char *name, gfp_t gfp, in z3fold_zpool_create() 1732 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, in z3fold_zpool_malloc()
|
D | zpool.c | 156 struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, in zpool_create_pool() 275 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, in zpool_malloc()
|
D | shmem.c | 700 pgoff_t index, void *expected, gfp_t gfp, in shmem_add_to_page_cache() 1516 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin() 1556 static struct page *shmem_alloc_hugepage(gfp_t gfp, in shmem_alloc_hugepage() 1580 static struct page *shmem_alloc_page(gfp_t gfp, in shmem_alloc_page() 1593 static struct page *shmem_alloc_and_acct_page(gfp_t gfp, in shmem_alloc_and_acct_page() 1637 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) in shmem_should_replace_page() 1642 static int shmem_replace_page(struct page **pagep, gfp_t gfp, in shmem_replace_page() 1717 gfp_t gfp, struct vm_area_struct *vma, in shmem_swapin_page() 1816 struct page **pagep, enum sgp_type sgp, gfp_t gfp, in shmem_getpage_gfp() 2056 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in shmem_fault() local [all …]
|
D | vmpressure.c | 241 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() 342 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio()
|
D | percpu.c | 508 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) in pcpu_mem_zalloc() 1441 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) in pcpu_alloc_chunk() 1626 static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, in pcpu_memcg_pre_alloc_hook() 1691 pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) in pcpu_memcg_pre_alloc_hook() 1723 gfp_t gfp) in pcpu_alloc() 1935 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) in __alloc_percpu_gfp() 2044 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; in pcpu_balance_populated() local
|
D | slab.c | 534 int batchcount, gfp_t gfp) in alloc_arraycache() 608 int limit, gfp_t gfp) in alloc_alien_cache() 645 int batch, gfp_t gfp) in __alloc_alien_cache() 659 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() 818 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() 884 int node, gfp_t gfp, bool force_change) in setup_kmem_cache_node() 1754 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) in setup_cpu_cache() 1925 gfp_t gfp; in __kmem_cache_create() local 3811 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) in setup_kmem_cache_nodes() 3846 int batchcount, int shared, gfp_t gfp) in do_tune_cpucache() [all …]
|
D | backing-dev.c | 281 gfp_t gfp) in wb_init() 450 struct cgroup_subsys_state *memcg_css, gfp_t gfp) in cgwb_create() 602 gfp_t gfp) in wb_get_create()
|
D | mempolicy.c | 1771 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) in policy_nodemask() 1794 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) in policy_node() 2034 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() 2051 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, in alloc_pages_preferred_many() 2088 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() 2175 struct page *alloc_pages(gfp_t gfp, unsigned order) in alloc_pages()
|
D | slab.h | 409 struct kmem_cache *s, gfp_t gfp, in memcg_alloc_page_obj_cgroups() 452 gfp_t gfp) in account_slab_page() 630 unsigned int count, gfp_t gfp) in cache_random_seq_create()
|
D | zswap.c | 275 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) in zswap_entry_cache_alloc() 589 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; in zswap_pool_create() local 1095 gfp_t gfp; in zswap_frontswap_store() local
|
D | khugepaged.c | 884 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() 960 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() 1077 gfp_t gfp; in collapse_huge_page() local 1684 gfp_t gfp; in collapse_file() local
|
D | secretmem.c | 55 gfp_t gfp = vmf->gfp_mask; in secretmem_fault() local
|
D | zsmalloc.c | 347 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) in cache_alloc_handle() 383 static void *zs_zpool_create(const char *name, gfp_t gfp, in zs_zpool_create() 400 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() 1058 gfp_t gfp) in alloc_zspage() 1409 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc()
|
D | filemap.c | 879 pgoff_t offset, gfp_t gfp, in __add_to_page_cache_locked() 1011 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc() 3512 gfp_t gfp) in do_read_cache_page() 3658 gfp_t gfp) in read_cache_page_gfp()
|
D | memcontrol.c | 2865 gfp_t gfp, bool new_page) in memcg_alloc_page_obj_cgroups() 3042 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp, in obj_cgroup_charge_pages() 3084 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) in __memcg_kmem_charge_page() 3295 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) in obj_cgroup_charge() 4522 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() 4716 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() 6809 static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp) in charge_memcg() 6869 gfp_t gfp, swp_entry_t entry) in mem_cgroup_swapin_charge_page()
|
D | swap_state.c | 101 gfp_t gfp, void **shadowp) in add_to_swap_cache()
|
D | slab_common.c | 1022 gfp_t gfp) in cache_random_seq_create()
|
/mm/kfence/ |
D | kfence_test.c | 238 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) in test_alloc()
|
D | core.c | 331 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, in kfence_guarded_alloc()
|