/block/ |
D | bfq-wf2q.c | 25 struct rb_node *node = tree->rb_node; in bfq_root_active_entity() local 27 return rb_entry(node, struct bfq_entity, rb_node); in bfq_root_active_entity() 312 struct bfq_entity *bfq_entity_of(struct rb_node *node) in bfq_entity_of() argument 316 if (node) in bfq_entity_of() 317 entity = rb_entry(node, struct bfq_entity, rb_node); in bfq_entity_of() 371 struct rb_node **node = &root->rb_node; in bfq_insert() local 374 while (*node) { in bfq_insert() 375 parent = *node; in bfq_insert() 379 node = &parent->rb_left; in bfq_insert() 381 node = &parent->rb_right; in bfq_insert() [all …]
|
D | blk-mq-tag.c | 465 bool round_robin, int node) in bt_alloc() argument 468 node); in bt_alloc() 474 int node, int alloc_policy) in blk_mq_init_bitmaps() argument 479 if (bt_alloc(bitmap_tags, depth, round_robin, node)) in blk_mq_init_bitmaps() 481 if (bt_alloc(breserved_tags, reserved, round_robin, node)) in blk_mq_init_bitmaps() 492 int node, int alloc_policy) in blk_mq_init_bitmap_tags() argument 499 node, alloc_policy); in blk_mq_init_bitmap_tags() 538 int node, unsigned int flags) in blk_mq_init_tags() argument 548 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); in blk_mq_init_tags() 559 if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { in blk_mq_init_tags()
|
D | mq-deadline.c | 139 struct rb_node *node = rb_prev(&rq->rb_node); in deadline_earlier_request() local 141 if (node) in deadline_earlier_request() 142 return rb_entry_rq(node); in deadline_earlier_request() 153 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() local 155 if (node) in deadline_latter_request() 156 return rb_entry_rq(node); in deadline_latter_request() 168 struct rb_node *node = per_prio->sort_list[data_dir].rb_node; in deadline_from_pos() local 171 if (!node) in deadline_from_pos() 174 rq = rb_entry_rq(node); in deadline_from_pos() 183 while (node) { in deadline_from_pos() [all …]
|
D | blk-mq.c | 2402 int node; in blk_mq_alloc_rq_map() local 2404 node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); in blk_mq_alloc_rq_map() 2405 if (node == NUMA_NO_NODE) in blk_mq_alloc_rq_map() 2406 node = set->numa_node; in blk_mq_alloc_rq_map() 2408 tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags); in blk_mq_alloc_rq_map() 2414 node); in blk_mq_alloc_rq_map() 2422 node); in blk_mq_alloc_rq_map() 2433 unsigned int hctx_idx, int node) in blk_mq_init_request() argument 2438 ret = set->ops->init_request(set, rq, hctx_idx, node); in blk_mq_init_request() 2452 int node; in blk_mq_alloc_rqs() local [all …]
|
D | blk-ioc.c | 250 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) in create_task_io_context() argument 256 node); in create_task_io_context() 305 gfp_t gfp_flags, int node) in get_task_io_context() argument 320 } while (!create_task_io_context(task, gfp_flags, node)); in get_task_io_context() 381 q->node); in ioc_create_icq()
|
D | disk-events.c | 11 struct list_head node; /* all disk_event's */ member 428 list_for_each_entry(ev, &disk_events, node) in disk_events_set_dfl_poll_msecs() 461 INIT_LIST_HEAD(&ev->node); in disk_alloc_events() 479 list_add_tail(&disk->ev->node, &disk_events); in disk_add_events() 495 list_del_init(&disk->ev->node); in disk_del_events()
|
D | blk-mq-tag.h | 35 int node, unsigned int flags); 41 int node, int alloc_policy);
|
D | blk-zoned.c | 157 static inline unsigned long *blk_alloc_zone_bitmap(int node, in blk_alloc_zone_bitmap() argument 161 GFP_NOIO, node); in blk_alloc_zone_bitmap() 194 need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones); in blkdev_zone_reset_all_emulated() 532 blk_alloc_zone_bitmap(q->node, args->nr_zones); in blk_revalidate_zone_cb() 542 blk_alloc_zone_bitmap(q->node, args->nr_zones); in blk_revalidate_zone_cb()
|
D | blk-throttle.c | 69 struct list_head node; /* service_queue->queued[] */ member 99 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) argument 398 INIT_LIST_HEAD(&qn->node); in throtl_qnode_init() 417 if (list_empty(&qn->node)) { in throtl_qnode_add_bio() 418 list_add_tail(&qn->node, queued); in throtl_qnode_add_bio() 435 qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_peek_queued() 464 qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_pop_queued() 469 list_del_init(&qn->node); in throtl_pop_queued() 475 list_move_tail(&qn->node, queued); in throtl_pop_queued() 497 tg = kzalloc_node(sizeof(*tg), gfp, q->node); in throtl_pd_alloc() [all …]
|
D | blk-flush.c | 464 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, in blk_alloc_flush_queue() argument 470 fq = kzalloc_node(sizeof(*fq), flags, node); in blk_alloc_flush_queue() 477 fq->flush_rq = kzalloc_node(rq_sz, flags, node); in blk_alloc_flush_queue()
|
D | blk.h | 61 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, 331 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
|
D | bfq-iosched.h | 1013 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); 1057 struct bfq_entity *bfq_entity_of(struct rb_node *node);
|
D | bfq-cgroup.c | 528 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); in bfq_pd_alloc() 1272 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) in bfq_create_group_hierarchy() argument 1474 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) in bfq_create_group_hierarchy() argument 1479 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); in bfq_create_group_hierarchy()
|
D | sed-opal.c | 220 struct list_head node; member 1075 list_for_each_entry(iter, &dev->unlk_lst, node) { in add_suspend_info() 1077 list_del(&iter->node); in add_suspend_info() 1082 list_add_tail(&sus->node, &dev->unlk_lst); in add_suspend_info() 2124 list_for_each_entry_safe(suspend, next, &dev->unlk_lst, node) { in clean_opal_dev() 2125 list_del(&suspend->node); in clean_opal_dev() 2561 list_for_each_entry(suspend, &dev->unlk_lst, node) { in opal_unlock_from_suspend()
|
D | genhd.c | 1336 struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass) in __blk_alloc_disk() argument 1341 q = blk_alloc_queue(node); in __blk_alloc_disk() 1345 disk = __alloc_disk_node(q, node, lkclass); in __blk_alloc_disk()
|
D | bfq-iosched.c | 2565 struct rb_node *parent, *node; in bfqq_find_close() local 2589 node = rb_next(&__bfqq->pos_node); in bfqq_find_close() 2591 node = rb_prev(&__bfqq->pos_node); in bfqq_find_close() 2592 if (!node) in bfqq_find_close() 2595 __bfqq = rb_entry(node, struct bfq_queue, pos_node); in bfqq_find_close() 5632 bfqd->queue->node); in bfq_get_queue() 6949 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); in bfq_init_queue() 7055 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); in bfq_init_queue()
|
D | kyber-iosched.c | 365 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node); in kyber_queue_data_alloc() 384 GFP_KERNEL, q->node); in kyber_queue_data_alloc()
|
D | bio.c | 643 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) in bio_cpu_dead() argument 647 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); in bio_cpu_dead()
|
D | blk-core.c | 558 q->node = node_id; in blk_alloc_queue() 884 create_task_io_context(current, GFP_ATOMIC, q->node); in submit_bio_checks()
|
D | elevator.c | 169 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
|
D | blk-iolatency.c | 964 iolat = kzalloc_node(sizeof(*iolat), gfp, q->node); in iolatency_pd_alloc()
|
D | blk-cgroup.c | 158 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
|
D | blk-iocost.c | 2953 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node); in ioc_pd_alloc()
|