/block/ |
D | blk-lib.c | 26 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument 97 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 132 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 139 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard() 166 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() argument 191 bio = blk_next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same() 227 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 235 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, in blkdev_issue_write_same() 247 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 267 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_write_zeroes() [all …]
|
D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() argument 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 131 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument 141 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 158 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov() 186 page = alloc_page(rq->q->bounce_gfp | gfp_mask); in bio_copy_user_iov() 244 gfp_t gfp_mask) in bio_map_user_iov() argument 254 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); in bio_map_user_iov() 382 unsigned int len, gfp_t gfp_mask) in bio_map_kern() argument 393 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern() [all …]
|
D | blk-crypto.c | 83 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument 91 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx() 93 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx() 108 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument 110 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone() 297 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument 300 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
|
D | bio.c | 172 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc() argument 209 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc() 212 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); in bvec_alloc() 226 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { in bvec_alloc() 439 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, in bio_alloc_bioset() argument 442 gfp_t saved_gfp = gfp_mask; in bio_alloc_bioset() 453 p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); in bio_alloc_bioset() 486 gfp_mask &= ~__GFP_DIRECT_RECLAIM; in bio_alloc_bioset() 488 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset() 489 if (!p && gfp_mask != saved_gfp) { in bio_alloc_bioset() [all …]
|
D | bounce.c | 95 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) in mempool_alloc_pages_isa() argument 97 return mempool_alloc_pages(gfp_mask | GFP_DMA, data); in mempool_alloc_pages_isa() 217 static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, in bounce_clone_bio() argument 246 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); in bounce_clone_bio() 270 if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0) in bounce_clone_bio() 274 bio_integrity_clone(bio, bio_src, gfp_mask) < 0) in bounce_clone_bio()
|
D | bio-integrity.c | 51 gfp_t gfp_mask, in bio_integrity_alloc() argument 62 bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask); in bio_integrity_alloc() 65 bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask); in bio_integrity_alloc() 77 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, in bio_integrity_alloc() 415 gfp_t gfp_mask) in bio_integrity_clone() argument 422 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); in bio_integrity_clone()
|
D | blk-crypto-internal.h | 163 gfp_t gfp_mask); 175 gfp_t gfp_mask) in blk_crypto_rq_bio_prep() argument 178 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask); in blk_crypto_rq_bio_prep()
|
D | blk-ioc.c | 373 gfp_t gfp_mask) in ioc_create_icq() argument 379 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, in ioc_create_icq() 384 if (radix_tree_maybe_preload(gfp_mask) < 0) { in ioc_create_icq()
|
D | blk-flush.c | 455 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) in blkdev_issue_flush() argument 460 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
|
D | blk-zoned.c | 204 gfp_t gfp_mask) in blkdev_zone_mgmt() argument 234 bio = blk_next_bio(bio, 0, gfp_mask); in blkdev_zone_mgmt()
|
D | blk.h | 296 gfp_t gfp_mask); 299 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
|
D | blk-cgroup.c | 152 gfp_t gfp_mask) in blkg_alloc() argument 158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc() 162 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask)) in blkg_alloc() 165 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask); in blkg_alloc() 188 pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); in blkg_alloc()
|
D | blk-core.c | 1618 struct bio_set *bs, gfp_t gfp_mask, in blk_rq_prep_clone() argument 1628 bio = bio_clone_fast(bio_src, gfp_mask, bs); in blk_rq_prep_clone() 1654 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) in blk_rq_prep_clone()
|