Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 25 of 49) sorted by relevance

12

/include/linux/
Dgfp.h586 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
589 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); in __alloc_pages_node()
591 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node()
599 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument
605 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
610 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
613 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ argument
614 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
616 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument
618 return alloc_pages_node(numa_node_id(), gfp_mask, order); in alloc_pages()
[all …]
Dcpuset.h70 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
72 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
75 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed()
79 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
81 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed()
84 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
87 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed()
213 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
218 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
223 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
36 gfp_t gfp_mask, int node_id);
44 gfp_t gfp_mask, int nid);
48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Dpage_owner.h15 unsigned int order, gfp_t gfp_mask);
30 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
33 __set_page_owner(page, order, gfp_mask); in set_page_owner()
61 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
Dblk-crypto.h91 gfp_t gfp_mask);
125 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
138 gfp_t gfp_mask) in bio_crypt_clone() argument
142 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
Dconnector.h99 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
122 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
Dswap.h387 gfp_t gfp_mask, nodemask_t *mask);
390 gfp_t gfp_mask,
393 gfp_t gfp_mask, bool noswap,
582 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument
605 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument
610 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument
645 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument
731 extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
732 static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) in cgroup_throttle_swaprate() argument
736 __cgroup_throttle_swaprate(page, gfp_mask); in cgroup_throttle_swaprate()
[all …]
Dtextsearch.h163 gfp_t gfp_mask) in alloc_ts_config() argument
167 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); in alloc_ts_config()
Dpagemap.h111 return mapping->gfp_mask; in mapping_gfp_mask()
116 gfp_t gfp_mask) in mapping_gfp_constraint() argument
118 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint()
127 m->gfp_mask = mask; in mapping_set_gfp_mask()
416 pgoff_t index, gfp_t gfp_mask) in find_or_create_page() argument
420 gfp_mask); in find_or_create_page()
506 pgoff_t index, gfp_t gfp_mask);
741 pgoff_t index, gfp_t gfp_mask);
743 pgoff_t index, gfp_t gfp_mask);
757 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache() argument
[all …]
DmISDNif.h537 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) in mI_alloc_skb() argument
541 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); in mI_alloc_skb()
548 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) in _alloc_mISDN_skb() argument
550 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); in _alloc_mISDN_skb()
565 u_int id, u_int len, void *dp, gfp_t gfp_mask) in _queue_data() argument
571 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); in _queue_data()
Dshrinker.h15 gfp_t gfp_mask; member
Dscatterlist.h283 gfp_t gfp_mask);
299 unsigned int left_pages, gfp_t gfp_mask);
303 unsigned int max_segment, gfp_t gfp_mask);
329 unsigned long size, gfp_t gfp_mask) in sg_alloc_table_from_pages() argument
332 size, UINT_MAX, gfp_mask); in sg_alloc_table_from_pages()
Dvmalloc.h149 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
151 unsigned long start, unsigned long end, gfp_t gfp_mask,
154 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
Dumh.h37 gfp_t gfp_mask,
Doom.h40 const gfp_t gfp_mask; member
Dradix-tree.h237 int radix_tree_preload(gfp_t gfp_mask);
238 int radix_tree_maybe_preload(gfp_t gfp_mask);
Dblkdev.h355 gfp_t gfp_mask);
885 struct bio_set *bs, gfp_t gfp_mask,
1304 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1309 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1311 sector_t nr_sects, gfp_t gfp_mask, int flags,
1318 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1321 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1324 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) in sb_issue_discard() argument
1331 gfp_mask, flags); in sb_issue_discard()
1334 sector_t nr_blocks, gfp_t gfp_mask) in sb_issue_zeroout() argument
[all …]
Dskbuff.h1190 gfp_t gfp_mask);
1238 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1243 gfp_t gfp_mask, bool fclone);
1245 gfp_t gfp_mask) in __pskb_copy() argument
1247 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); in __pskb_copy()
1250 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
2929 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags() argument
2936 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags()
2940 static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags_rx() argument
2944 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags_rx()
[all …]
Dhugetlb.h638 nodemask_t *nmask, gfp_t gfp_mask);
839 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) in htlb_modify_alloc_mask() argument
844 modified_mask |= (gfp_mask & __GFP_THISNODE); in htlb_modify_alloc_mask()
846 modified_mask |= (gfp_mask & __GFP_NOWARN); in htlb_modify_alloc_mask()
936 nodemask_t *nmask, gfp_t gfp_mask)
1049 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
Dcompaction.h94 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
/include/linux/sched/
Dmm.h187 extern void fs_reclaim_acquire(gfp_t gfp_mask);
188 extern void fs_reclaim_release(gfp_t gfp_mask);
192 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument
193 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument
204 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument
206 fs_reclaim_acquire(gfp_mask); in might_alloc()
207 fs_reclaim_release(gfp_mask); in might_alloc()
209 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
/include/trace/hooks/
Dmm.h62 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long delta),
63 TP_ARGS(gfp_mask, order, delta));
74 TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags,
77 TP_ARGS(gfp_mask, order, alloc_flags, migratetype, did_some_progress, bypass));
99 TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
101 TP_ARGS(gfp_mask, order, alloc_flags,
176 TP_PROTO(gfp_t gfp_mask, int order, int alloc_flags,
178 TP_ARGS(gfp_mask, order, alloc_flags, migratetype, page));
180 TP_PROTO(gfp_t gfp_mask, int order, int alloc_flags,
182 TP_ARGS(gfp_mask, order, alloc_flags, migratetype, page));
[all …]
Dvmscan.h43 TP_PROTO(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority, bool *bypass),
44 TP_ARGS(gfp_mask, nid, memcg, priority, bypass));
/include/trace/events/
Dcompaction.h174 gfp_t gfp_mask,
177 TP_ARGS(order, gfp_mask, prio),
181 __field(gfp_t, gfp_mask)
187 __entry->gfp_mask = gfp_mask;
193 show_gfp_flags(__entry->gfp_mask),
/include/linux/sunrpc/
Dgss_api.h53 gfp_t gfp_mask);
115 gfp_t gfp_mask);

12