• Home
  • Raw
  • Download

Lines Matching refs:gfp_t

19 #define __GFP_DMA	((__force gfp_t)0x01u)
20 #define __GFP_HIGHMEM ((__force gfp_t)0x02u)
21 #define __GFP_DMA32 ((__force gfp_t)0x04u)
37 #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
38 #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
39 #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */
40 #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */
41 #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */
42 #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
43 #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */
44 #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */
45 #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */
46 #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
47 #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
48 #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
49 #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
50 #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
51 #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
52 #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
55 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
76 #define GFP_THISNODE ((__force gfp_t)0)
102 static inline int allocflags_to_migratetype(gfp_t gfp_flags) in allocflags_to_migratetype()
114 static inline enum zone_type gfp_zone(gfp_t flags) in gfp_zone()
141 static inline int gfp_zonelist(gfp_t flags) in gfp_zonelist()
158 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
171 __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
175 __alloc_pages(gfp_t gfp_mask, unsigned int order, in __alloc_pages()
182 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask()
189 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node()
203 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
206 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages()
213 extern struct page *alloc_page_vma(gfp_t gfp_mask,
222 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
223 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
225 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);