Home
last modified time | relevance | path

Searched refs:gfp_t (Results 1 – 25 of 215) sorted by relevance

123456789

/include/linux/
Dgfp.h68 #define __GFP_DMA ((__force gfp_t)___GFP_DMA)
69 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
70 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
71 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
72 #define __GFP_CMA ((__force gfp_t)___GFP_CMA)
102 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
103 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
104 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
105 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
106 #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
[all …]
Dkmemleak.h19 gfp_t gfp) __ref;
21 gfp_t gfp) __ref;
23 gfp_t gfp) __ref;
30 extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
33 gfp_t gfp) __ref;
40 gfp_t gfp) in kmemleak_alloc_recursive()
63 gfp_t gfp) in kmemleak_alloc()
68 gfp_t gfp) in kmemleak_alloc_recursive()
72 gfp_t gfp) in kmemleak_alloc_percpu()
76 gfp_t gfp) in kmemleak_vmalloc()
[all …]
Dslab.h184 void * __must_check krealloc(const void *, size_t, gfp_t);
330 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) in kmalloc_type()
401 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
402 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
413 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
425 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
426 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __m…
428 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node()
433 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) in kmem_cache_alloc_node()
440 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __m…
[all …]
Ddevcoredump.h56 gfp_t gfp);
59 void *data, size_t datalen, gfp_t gfp,
65 size_t datalen, gfp_t gfp);
68 size_t datalen, gfp_t gfp) in dev_coredumpv()
75 void *data, size_t datalen, gfp_t gfp, in dev_coredumpm()
84 size_t datalen, gfp_t gfp) in dev_coredumpsg()
Dvmpressure.h33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure()
49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio()
Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
36 gfp_t gfp_mask, int node_id);
44 gfp_t gfp_mask, int nid);
48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Dmemregion.h12 int memregion_alloc(gfp_t gfp);
15 static inline int memregion_alloc(gfp_t gfp) in memregion_alloc()
Didr.h32 #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
112 void idr_preload(gfp_t gfp_mask);
114 int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
116 unsigned long max, gfp_t);
117 int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
257 int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
273 static inline int ida_alloc(struct ida *ida, gfp_t gfp) in ida_alloc()
291 static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) in ida_alloc_min()
309 static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) in ida_alloc_max()
Dcpuset.h71 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
73 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed()
80 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed()
85 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
217 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed()
222 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed()
227 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
Dkasan.h87 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
93 unsigned int order, gfp_t flags) in kasan_alloc_pages()
234 void *object, gfp_t flags, bool init);
236 struct kmem_cache *s, void *object, gfp_t flags, bool init) in kasan_slab_alloc()
244 size_t size, gfp_t flags);
246 const void *object, size_t size, gfp_t flags) in kasan_kmalloc()
254 size_t size, gfp_t flags);
256 size_t size, gfp_t flags) in kasan_kmalloc_large()
264 size_t new_size, gfp_t flags);
266 size_t new_size, gfp_t flags) in kasan_krealloc()
[all …]
Dzpool.h43 gfp_t gfp, const struct zpool_ops *ops);
51 int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
90 gfp_t gfp,
96 int (*malloc)(void *pool, size_t size, gfp_t gfp,
Dxarray.h263 #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
264 #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
265 #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
266 #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
267 #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
268 #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
269 #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
295 gfp_t xa_flags;
348 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
351 void *entry, gfp_t);
[all …]
Dfault-inject.h67 int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
69 extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
71 static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) in __should_failslab()
Drtnetlink.h15 u32 group, struct nlmsghdr *nlh, gfp_t flags);
21 void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
23 gfp_t flags, int *new_nsid, int new_ifindex);
26 gfp_t flags, int *new_nsid,
29 gfp_t flags);
Dswap.h371 gfp_t gfp_mask, nodemask_t *mask);
375 gfp_t gfp_mask,
378 gfp_t gfp_mask, bool noswap,
429 gfp_t gfp, void **shadowp);
441 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
444 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
447 extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
449 extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
474 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
539 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation()
[all …]
Ddmapool.h26 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
42 static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, in dma_pool_alloc()
52 static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, in dma_pool_zalloc()
Dzbud.h13 struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
15 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
Dkfence.h99 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
117 static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) in kfence_alloc()
210 static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } in kfence_alloc()
/include/net/sctp/
Dulpevent.h81 gfp_t gfp);
90 gfp_t gfp);
96 gfp_t gfp);
103 gfp_t gfp);
108 gfp_t gfp);
113 __u32 flags, gfp_t gfp);
116 const struct sctp_association *asoc, gfp_t gfp);
120 gfp_t gfp);
124 __u32 indication, gfp_t gfp);
127 const struct sctp_association *asoc, gfp_t gfp);
[all …]
Dstream_interleave.h25 int len, __u8 flags, gfp_t gfp);
29 struct sctp_chunk *chunk, gfp_t gfp);
33 struct sctp_chunk *chunk, gfp_t gfp);
34 void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
35 void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
Dulpqueue.h44 int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
50 void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
53 void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
56 void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
Dauth.h71 struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
73 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
79 gfp_t gfp);
80 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
94 struct sctp_shared_key *ep_key, gfp_t gfp);
110 int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp);
Dstream_sched.h22 gfp_t gfp);
28 int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
50 __u16 value, gfp_t gfp);
56 int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
/include/drm/
Ddrm_managed.h49 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
61 static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) in drmm_kzalloc()
78 size_t n, size_t size, gfp_t flags) in drmm_kmalloc_array()
100 size_t n, size_t size, gfp_t flags) in drmm_kcalloc()
105 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
/include/net/
Dhwbm.h21 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
26 static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) in hwbm_pool_refill()

123456789