Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 11 of 11) sorted by relevance

/block/
Dbfq-cgroup.c19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) in bfq_stat_init() argument
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); in bfq_stat_init()
462 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) in bfqg_stats_init() argument
464 if (blkg_rwstat_init(&stats->bytes, gfp) || in bfqg_stats_init()
465 blkg_rwstat_init(&stats->ios, gfp)) in bfqg_stats_init()
469 if (blkg_rwstat_init(&stats->merged, gfp) || in bfqg_stats_init()
470 blkg_rwstat_init(&stats->service_time, gfp) || in bfqg_stats_init()
471 blkg_rwstat_init(&stats->wait_time, gfp) || in bfqg_stats_init()
472 blkg_rwstat_init(&stats->queued, gfp) || in bfqg_stats_init()
473 bfq_stat_init(&stats->time, gfp) || in bfqg_stats_init()
[all …]
Dblk-ioprio.c120 ioprio_alloc_pd(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg) in ioprio_alloc_pd() argument
124 ioprio_blkg = kzalloc(sizeof(*ioprio_blkg), gfp); in ioprio_alloc_pd()
138 static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp) in ioprio_alloc_cpd() argument
142 blkcg = kzalloc(sizeof(*blkcg), gfp); in ioprio_alloc_cpd()
Dblk-cgroup-rwstat.c8 int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) in blkg_rwstat_init() argument
13 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); in blkg_rwstat_init()
Dblk-lib.c13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) in blk_next_bio() argument
15 struct bio *new = bio_alloc(gfp, nr_pages); in blk_next_bio()
Dblk-cgroup-rwstat.h42 int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp);
Dblk-iolatency.c958 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, in iolatency_pd_alloc() argument
964 iolat = kzalloc_node(sizeof(*iolat), gfp, q->node); in iolatency_pd_alloc()
968 __alignof__(struct latency_stat), gfp); in iolatency_pd_alloc()
Dbio.c171 static inline gfp_t bvec_alloc_gfp(gfp_t gfp) in bvec_alloc_gfp() argument
173 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | in bvec_alloc_gfp()
1497 gfp_t gfp, struct bio_set *bs) in bio_split() argument
1508 split = bio_clone_fast(bio, gfp, bs); in bio_split()
Dblk.h381 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
Dblk-mq.c2769 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; in blk_mq_alloc_hctx() local
2771 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); in blk_mq_alloc_hctx()
2775 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) in blk_mq_alloc_hctx()
2796 gfp, node); in blk_mq_alloc_hctx()
2801 gfp, node, false, false)) in blk_mq_alloc_hctx()
2809 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); in blk_mq_alloc_hctx()
Dblk-iocost.c2930 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp) in ioc_cpd_alloc() argument
2934 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp); in ioc_cpd_alloc()
2947 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q, in ioc_pd_alloc() argument
2953 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node); in ioc_pd_alloc()
2957 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp); in ioc_pd_alloc()
Dblk-throttle.c490 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, in throtl_pd_alloc() argument
497 tg = kzalloc_node(sizeof(*tg), gfp, q->node); in throtl_pd_alloc()
501 if (blkg_rwstat_init(&tg->stat_bytes, gfp)) in throtl_pd_alloc()
504 if (blkg_rwstat_init(&tg->stat_ios, gfp)) in throtl_pd_alloc()