Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 17 of 17) sorted by relevance

/mm/
Dslob.c190 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
196 page = alloc_pages_exact_node(node, gfp, order); in slob_new_pages()
199 page = alloc_pages(gfp, order); in slob_new_pages()
268 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) in slob_alloc() argument
316 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc()
332 if (unlikely((gfp & __GFP_ZERO) && b)) in slob_alloc()
427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
433 gfp &= gfp_allowed_mask; in __do_kmalloc_node()
435 lockdep_trace_alloc(gfp); in __do_kmalloc_node()
441 m = slob_alloc(size + align, gfp, align, node); in __do_kmalloc_node()
[all …]
Dzbud.c133 static void *zbud_zpool_create(char *name, gfp_t gfp, in zbud_zpool_create() argument
136 return zbud_create_pool(gfp, &zbud_zpool_ops); in zbud_zpool_create()
144 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zbud_zpool_malloc() argument
147 return zbud_alloc(pool, size, gfp, handle); in zbud_zpool_malloc()
290 struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops) in zbud_create_pool() argument
295 pool = kmalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool()
338 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, in zbud_alloc() argument
346 if (!size || (gfp & __GFP_HIGHMEM)) in zbud_alloc()
370 page = alloc_page(gfp); in zbud_alloc()
Dzpool.c144 struct zpool *zpool_create_pool(char *type, char *name, gfp_t gfp, in zpool_create_pool() argument
164 zpool = kmalloc(sizeof(*zpool), gfp); in zpool_create_pool()
173 zpool->pool = driver->create(name, gfp, ops); in zpool_create_pool()
246 int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, in zpool_malloc() argument
249 return zpool->driver->malloc(zpool->pool, size, gfp, handle); in zpool_malloc()
Dutil.c24 char *kstrdup(const char *s, gfp_t gfp) in kstrdup() argument
33 buf = kmalloc_track_caller(len, gfp); in kstrdup()
46 char *kstrndup(const char *s, size_t max, gfp_t gfp) in kstrndup() argument
55 buf = kmalloc_track_caller(len+1, gfp); in kstrndup()
71 void *kmemdup(const void *src, size_t len, gfp_t gfp) in kmemdup() argument
75 p = kmalloc_track_caller(len, gfp); in kmemdup()
Dshmem.c119 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
123 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
623 gfp_t gfp; in shmem_unuse_inode() local
640 gfp = mapping_gfp_mask(mapping); in shmem_unuse_inode()
641 if (shmem_should_replace_page(*pagep, gfp)) { in shmem_unuse_inode()
643 error = shmem_replace_page(pagep, gfp, info, index); in shmem_unuse_inode()
879 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin() argument
892 page = swapin_readahead(swap, gfp, &pvma, 0); in shmem_swapin()
900 static struct page *shmem_alloc_page(gfp_t gfp, in shmem_alloc_page() argument
[all …]
Dvmpressure.c211 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure() argument
227 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
263 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument
279 vmpressure(gfp, memcg, vmpressure_win, 0); in vmpressure_prio()
Dkmemleak.c117 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ argument
519 int min_count, gfp_t gfp) in create_object() argument
525 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); in create_object()
739 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) in add_scan_area() argument
752 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); in add_scan_area()
895 gfp_t gfp) in kmemleak_alloc() argument
900 create_object((unsigned long)ptr, size, min_count, gfp); in kmemleak_alloc()
916 gfp_t gfp) in kmemleak_alloc_percpu() argument
929 size, 0, gfp); in kmemleak_alloc_percpu()
1078 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) in kmemleak_scan_area() argument
[all …]
Dmempolicy.c1680 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) in policy_nodemask() argument
1684 apply_policy_zone(policy, gfp_zone(gfp)) && in policy_nodemask()
1692 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, in policy_zonelist() argument
1707 if (unlikely(gfp & __GFP_THISNODE) && in policy_zonelist()
1714 return node_zonelist(nd, gfp); in policy_zonelist()
1967 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, in alloc_page_interleave() argument
1973 zl = node_zonelist(nid, gfp); in alloc_page_interleave()
1974 page = __alloc_pages(gfp, order, zl); in alloc_page_interleave()
2004 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
2020 page = alloc_page_interleave(gfp, order, nid); in alloc_pages_vma()
[all …]
Dslab.c250 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
679 int batchcount, gfp_t gfp) in alloc_arraycache() argument
684 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
834 int limit, gfp_t gfp) in alloc_alien_cache() argument
866 int batch, gfp_t gfp) in __alloc_alien_cache() argument
871 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache()
877 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument
885 alc_ptr = kzalloc_node(memsize, gfp, node); in alloc_alien_cache()
892 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache()
2008 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) in setup_cpu_cache() argument
[all …]
Dslab.h232 gfp_t gfp, int order) in memcg_charge_slab() argument
238 return __memcg_charge_slab(s, gfp, order); in memcg_charge_slab()
277 static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) in memcg_charge_slab() argument
Dzswap.c220 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) in zswap_entry_cache_alloc() argument
223 entry = kmem_cache_alloc(zswap_entry_cache, gfp); in zswap_entry_cache_alloc()
903 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; in init_zswap() local
910 zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp, in init_zswap()
915 zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp, in init_zswap()
Dfilemap.c643 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc() argument
653 page = alloc_pages_exact_node(n, gfp, 0); in __page_cache_alloc()
658 return alloc_pages(gfp, 0); in __page_cache_alloc()
2145 gfp_t gfp) in __read_cache_page() argument
2152 page = __page_cache_alloc(gfp | __GFP_COLD); in __read_cache_page()
2155 err = add_to_page_cache_lru(page, mapping, index, gfp); in __read_cache_page()
2178 gfp_t gfp) in do_read_cache_page() argument
2185 page = __read_cache_page(mapping, index, filler, data, gfp); in do_read_cache_page()
2249 gfp_t gfp) in read_cache_page_gfp() argument
2253 return do_read_cache_page(mapping, index, filler, NULL, gfp); in read_cache_page_gfp()
Dpercpu-vm.c85 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; in pcpu_alloc_pages() local
93 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
Dpercpu.c871 gfp_t gfp) in pcpu_alloc() argument
876 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; in pcpu_alloc()
1029 kmemleak_alloc_percpu(ptr, size, gfp); in pcpu_alloc()
1065 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) in __alloc_percpu_gfp() argument
1067 return pcpu_alloc(size, align, false, gfp); in __alloc_percpu_gfp()
Dzsmalloc.c316 static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops) in zs_zpool_create() argument
318 return zs_create_pool(name, gfp); in zs_zpool_create()
326 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument
Dmemcontrol.c2807 static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) in memcg_charge_kmem() argument
2816 ret = try_charge(memcg, gfp, size >> PAGE_SHIFT); in memcg_charge_kmem()
3125 int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order) in __memcg_charge_slab() argument
3129 res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, in __memcg_charge_slab()
3156 gfp_t gfp) in __memcg_kmem_get_cache() argument
3218 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) in __memcg_kmem_newpage_charge() argument
3260 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); in __memcg_kmem_newpage_charge()
Drmap.c117 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) in anon_vma_chain_alloc() argument
119 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); in anon_vma_chain_alloc()