/block/ |
D | blk-lib.c | 27 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument 87 bio = next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 130 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 137 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard() 164 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() argument 186 bio = next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same() 222 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 230 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, in blkdev_issue_write_same() 242 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 259 bio = next_bio(bio, 0, gfp_mask); in __blkdev_issue_write_zeroes() [all …]
|
D | blk-map.c | 60 gfp_t gfp_mask, bool copy) in __blk_rq_map_user_iov() argument 67 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in __blk_rq_map_user_iov() 69 bio = bio_map_user_iov(q, iter, gfp_mask); in __blk_rq_map_user_iov() 123 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() argument 143 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); in blk_rq_map_user_iov() 164 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() argument 173 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user() 223 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern() argument 238 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern() 240 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
|
D | bio.c | 179 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc() argument 216 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc() 219 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); in bvec_alloc() 233 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { in bvec_alloc() 436 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, in bio_alloc_bioset() argument 439 gfp_t saved_gfp = gfp_mask; in bio_alloc_bioset() 452 gfp_mask); in bio_alloc_bioset() 484 gfp_mask &= ~__GFP_DIRECT_RECLAIM; in bio_alloc_bioset() 486 p = mempool_alloc(bs->bio_pool, gfp_mask); in bio_alloc_bioset() 487 if (!p && gfp_mask != saved_gfp) { in bio_alloc_bioset() [all …]
|
D | blk-zoned.c | 69 gfp_t gfp_mask) in blkdev_report_zones() argument 115 bio = bio_alloc(gfp_mask, nr_pages); in blkdev_report_zones() 124 page = alloc_page(gfp_mask); in blkdev_report_zones() 206 gfp_t gfp_mask) in blkdev_reset_zones() argument 235 bio = bio_alloc(gfp_mask, 0); in blkdev_reset_zones()
|
D | blk-core.c | 701 static void *alloc_request_simple(gfp_t gfp_mask, void *data) in alloc_request_simple() argument 705 return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node); in alloc_request_simple() 713 static void *alloc_request_size(gfp_t gfp_mask, void *data) in alloc_request_size() argument 718 rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask, in alloc_request_size() 720 if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) { in alloc_request_size() 737 gfp_t gfp_mask) in blk_init_rl() argument 751 q, gfp_mask, q->node); in blk_init_rl() 755 q, gfp_mask, q->node); in blk_init_rl() 775 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) in blk_alloc_queue() argument 777 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); in blk_alloc_queue() [all …]
|
D | blk.h | 62 gfp_t gfp_mask); 286 gfp_t gfp_mask); 289 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 319 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) in create_io_context() argument 323 create_task_io_context(current, gfp_mask, node); in create_io_context()
|
D | bio-integrity.c | 52 gfp_t gfp_mask, in bio_integrity_alloc() argument 61 sizeof(struct bio_vec) * nr_vecs, gfp_mask); in bio_integrity_alloc() 64 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); in bio_integrity_alloc() 76 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, in bio_integrity_alloc() 450 gfp_t gfp_mask) in bio_integrity_clone() argument 457 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); in bio_integrity_clone()
|
D | blk-ioc.c | 390 gfp_t gfp_mask) in ioc_create_icq() argument 396 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, in ioc_create_icq() 401 if (radix_tree_maybe_preload(gfp_mask) < 0) { in ioc_create_icq()
|
D | bounce.c | 86 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) in mempool_alloc_pages_isa() argument 88 return mempool_alloc_pages(gfp_mask | GFP_DMA, data); in mempool_alloc_pages_isa()
|
D | blk-cgroup.c | 93 gfp_t gfp_mask) in blkg_alloc() argument 99 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc() 103 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || in blkg_alloc() 104 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) in blkg_alloc() 114 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc() 127 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc()
|
D | blk-flush.c | 504 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, in blkdev_issue_flush() argument 527 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
|
D | elevator.c | 756 struct bio *bio, gfp_t gfp_mask) in elv_set_request() argument 764 return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask); in elv_set_request()
|
D | cfq-iosched.c | 4485 gfp_t gfp_mask) in cfq_set_request() argument
|