/kernel/linux/linux-5.10/include/linux/ |
D | gfp.h | 516 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, 520 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) in __alloc_pages() argument 522 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); in __alloc_pages() 530 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument 533 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); in __alloc_pages_node() 535 return __alloc_pages(gfp_mask, order, nid); in __alloc_pages_node() 543 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument 549 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node() 553 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 556 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument [all …]
|
D | cpuset.h | 67 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); 69 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 72 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed() 76 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 78 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 81 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 84 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 207 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 212 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 217 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
D | mempool.h | 13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 36 gfp_t gfp_mask, int node_id); 44 gfp_t gfp_mask, int nid); 48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; 56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
D | blk-crypto.h | 86 gfp_t gfp_mask); 115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 128 gfp_t gfp_mask) in bio_crypt_clone() argument 131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
D | page_owner.h | 13 unsigned int order, gfp_t gfp_mask); 28 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument 31 __set_page_owner(page, order, gfp_mask); in set_page_owner() 59 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
|
D | swap.h | 360 gfp_t gfp_mask, nodemask_t *mask); 364 gfp_t gfp_mask, 367 gfp_t gfp_mask, bool noswap, 391 extern unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, 546 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument 569 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument 574 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument 609 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument 695 extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); 697 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) in cgroup_throttle_swaprate() argument
|
/kernel/linux/linux-5.10/block/ |
D | blk-lib.c | 26 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument 97 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 132 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 139 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard() 166 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() argument 191 bio = blk_next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same() 227 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 235 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, in blkdev_issue_write_same() 247 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 267 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_write_zeroes() [all …]
|
D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() argument 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 131 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument 141 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 158 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov() 186 page = alloc_page(rq->q->bounce_gfp | gfp_mask); in bio_copy_user_iov() 244 gfp_t gfp_mask) in bio_map_user_iov() argument 254 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); in bio_map_user_iov() 382 unsigned int len, gfp_t gfp_mask) in bio_map_kern() argument 393 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern() [all …]
|
D | blk-crypto.c | 82 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument 90 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx() 92 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx() 106 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument 108 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone() 295 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument 298 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
|
/kernel/linux/linux-5.10/mm/ |
D | mempool.c | 180 gfp_t gfp_mask, int node_id) in mempool_init_node() argument 190 gfp_mask, node_id); in mempool_init_node() 200 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 261 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 265 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 270 gfp_mask, node_id)) { in mempool_create_node() 373 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 380 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 381 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc() 383 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc() [all …]
|
D | page_owner.c | 26 gfp_t gfp_mask; member 170 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument 179 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 191 gfp_t gfp_mask) in __set_page_owner() argument 199 handle = save_stack(gfp_mask); in __set_page_owner() 200 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner() 243 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner() 326 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print() 364 page_owner->order, page_owner->gfp_mask, in print_page_owner() 365 &page_owner->gfp_mask, page_owner->pid, in print_page_owner() [all …]
|
D | page_alloc.c | 3548 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 3552 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page() 3554 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page() 3557 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page() 3588 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 3595 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 3597 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page() 3703 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument 3735 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast() 3779 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument [all …]
|
D | vmscan.c | 473 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument 493 .gfp_mask = gfp_mask, in shrink_slab_memcg() 548 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument 575 unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument 590 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab() 597 .gfp_mask = gfp_mask, in shrink_slab() 1054 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list() 1055 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list() 1182 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list() 1344 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list() [all …]
|
D | swap_state.c | 451 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 493 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 526 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { in __read_swap_cache_async() 531 if (mem_cgroup_charge(page, NULL, gfp_mask)) { in __read_swap_cache_async() 557 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 561 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async() 649 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument 688 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead() 704 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swap_cluster_readahead() 832 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument [all …]
|
/kernel/linux/linux-5.10/fs/btrfs/ |
D | ulist.h | 48 struct ulist *ulist_alloc(gfp_t gfp_mask); 50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 52 u64 *old_aux, gfp_t gfp_mask); 57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
D | ulist.c | 92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
/kernel/linux/linux-5.10/fs/nfs/blocklayout/ |
D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument 402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 407 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 431 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument 460 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe() [all …]
|
/kernel/linux/linux-5.10/lib/ |
D | generic-radix-tree.c | 79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument 83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); in genradix_alloc_node() 90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); in genradix_alloc_node() 105 gfp_t gfp_mask) in __genradix_ptr_alloc() argument 122 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 145 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 218 gfp_t gfp_mask) in __genradix_prealloc() argument 223 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
|
D | scatterlist.c | 149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() argument 161 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc() 162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc() 166 gfp_mask); in sg_kmalloc() 268 unsigned int nents_first_chunk, gfp_t gfp_mask, in __sg_alloc_table() argument 302 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table() 355 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() argument 360 NULL, 0, gfp_mask, sg_kmalloc); in sg_alloc_table() 371 gfp_t gfp_mask) in get_next_sg() argument 384 new_sg = sg_kmalloc(alloc_size, gfp_mask); in get_next_sg() [all …]
|
/kernel/linux/linux-5.10/drivers/staging/android/ion/ |
D | ion_page_pool.c | 19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 100 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, in ion_page_pool_shrink() argument 109 high = !!(gfp_mask & __GFP_HIGHMEM); in ion_page_pool_shrink() 134 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument 144 pool->gfp_mask = gfp_mask | __GFP_COMP; in ion_page_pool_create()
|
D | ion.h | 94 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 282 gfp_t gfp_mask; member 287 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); 299 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
/kernel/linux/linux-5.10/net/sunrpc/auth_gss/ |
D | gss_krb5_mech.c | 312 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument 339 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3() 357 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument 373 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 388 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 403 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 413 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 423 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 433 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 469 gfp_t gfp_mask) in gss_import_v2_context() argument [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
/kernel/linux/linux-5.10/fs/ntfs/ |
D | malloc.h | 28 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument 33 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc() 37 return __vmalloc(size, gfp_mask); in __ntfs_malloc()
|
/kernel/linux/linux-5.10/drivers/connector/ |
D | connector.c | 62 gfp_t gfp_mask) in cn_netlink_send_mult() argument 96 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 114 gfp_mask); in cn_netlink_send_mult() 116 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 122 gfp_t gfp_mask) in cn_netlink_send() argument 124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()
|