/block/ |
D | blk-lib.c | 41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 78 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_discard() 145 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 166 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_write_same() 216 sector_t nr_sects, gfp_t gfp_mask) in __blkdev_issue_zeroout() argument 230 bio = bio_alloc(gfp_mask, in __blkdev_issue_zeroout() 286 sector_t nr_sects, gfp_t gfp_mask, bool discard) in blkdev_issue_zeroout() argument 291 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) in blkdev_issue_zeroout() 295 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, in blkdev_issue_zeroout() 299 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); in blkdev_issue_zeroout()
|
D | blk-map.c | 83 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() argument 114 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov() 116 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov() 148 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() argument 157 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user() 207 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern() argument 222 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern() 224 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
|
D | bio.c | 174 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc() argument 211 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc() 214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); in bvec_alloc() 228 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { in bvec_alloc() 427 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) in bio_alloc_bioset() argument 429 gfp_t saved_gfp = gfp_mask; in bio_alloc_bioset() 443 gfp_mask); in bio_alloc_bioset() 474 gfp_mask &= ~__GFP_DIRECT_RECLAIM; in bio_alloc_bioset() 476 p = mempool_alloc(bs->bio_pool, gfp_mask); in bio_alloc_bioset() 477 if (!p && gfp_mask != saved_gfp) { in bio_alloc_bioset() [all …]
|
D | blk.h | 62 gfp_t gfp_mask); 267 gfp_t gfp_mask); 270 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 284 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) in create_io_context() argument 288 create_task_io_context(current, gfp_mask, node); in create_io_context()
|
D | bio-integrity.c | 51 gfp_t gfp_mask, in bio_integrity_alloc() argument 61 sizeof(struct bio_vec) * nr_vecs, gfp_mask); in bio_integrity_alloc() 64 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); in bio_integrity_alloc() 74 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, in bio_integrity_alloc() 463 gfp_t gfp_mask) in bio_integrity_clone() argument 470 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); in bio_integrity_clone()
|
D | blk-core.c | 608 static void *alloc_request_struct(gfp_t gfp_mask, void *data) in alloc_request_struct() argument 611 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); in alloc_request_struct() 620 gfp_t gfp_mask) in blk_init_rl() argument 633 (void *)(long)q->node, gfp_mask, in blk_init_rl() 647 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) in blk_alloc_queue() argument 649 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); in blk_alloc_queue() 683 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) in blk_alloc_queue_node() argument 689 gfp_mask | __GFP_ZERO, node_id); in blk_alloc_queue_node() 693 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); in blk_alloc_queue_node() 1065 struct bio *bio, gfp_t gfp_mask) in __get_request() argument [all …]
|
D | blk-ioc.c | 358 gfp_t gfp_mask) in ioc_create_icq() argument 364 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, in ioc_create_icq() 369 if (radix_tree_maybe_preload(gfp_mask) < 0) { in ioc_create_icq()
|
D | bounce.c | 75 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) in mempool_alloc_pages_isa() argument 77 return mempool_alloc_pages(gfp_mask | GFP_DMA, data); in mempool_alloc_pages_isa()
|
D | blk-cgroup.c | 92 gfp_t gfp_mask) in blkg_alloc() argument 98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc() 102 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || in blkg_alloc() 103 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) in blkg_alloc() 113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc() 126 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc()
|
D | blk-flush.c | 462 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, in blkdev_issue_flush() argument 485 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
|
D | elevator.c | 703 struct bio *bio, gfp_t gfp_mask) in elv_set_request() argument 708 return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); in elv_set_request()
|
D | cfq-iosched.c | 4360 gfp_t gfp_mask) in cfq_set_request() argument
|