Lines Matching refs:gfp_mask
74 gfp_t gfp_mask, int node_id) in mempool_create_node() argument
77 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node()
81 gfp_mask, node_id); in mempool_create_node()
99 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_create_node()
126 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) in mempool_resize() argument
148 new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); in mempool_resize()
167 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_resize()
198 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
205 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc()
206 might_sleep_if(gfp_mask & __GFP_WAIT); in mempool_alloc()
208 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc()
209 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ in mempool_alloc()
210 gfp_mask |= __GFP_NOWARN; /* failures are OK */ in mempool_alloc()
212 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); in mempool_alloc()
238 if (gfp_temp != gfp_mask) { in mempool_alloc()
240 gfp_temp = gfp_mask; in mempool_alloc()
245 if (!(gfp_mask & __GFP_WAIT)) { in mempool_alloc()
332 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) in mempool_alloc_slab() argument
335 return kmem_cache_alloc(mem, gfp_mask); in mempool_alloc_slab()
350 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) in mempool_kmalloc() argument
353 return kmalloc(size, gfp_mask); in mempool_kmalloc()
367 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) in mempool_alloc_pages() argument
370 return alloc_pages(gfp_mask, order); in mempool_alloc_pages()