1 /*
2 * cgroups support for the BFQ I/O scheduler.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 */
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/blkdev.h>
17 #include <linux/cgroup.h>
18 #include <linux/elevator.h>
19 #include <linux/ktime.h>
20 #include <linux/rbtree.h>
21 #include <linux/ioprio.h>
22 #include <linux/sbitmap.h>
23 #include <linux/delay.h>
24
25 #include "bfq-iosched.h"
26
27 #ifdef CONFIG_BFQ_GROUP_IOSCHED
28
29 /* bfqg stats flags */
30 enum bfqg_stats_flags {
31 BFQG_stats_waiting = 0,
32 BFQG_stats_idling,
33 BFQG_stats_empty,
34 };
35
36 #define BFQG_FLAG_FNS(name) \
37 static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
38 { \
39 stats->flags |= (1 << BFQG_stats_##name); \
40 } \
41 static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
42 { \
43 stats->flags &= ~(1 << BFQG_stats_##name); \
44 } \
45 static int bfqg_stats_##name(struct bfqg_stats *stats) \
46 { \
47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
48 } \
49
50 BFQG_FLAG_FNS(waiting)
BFQG_FLAG_FNS(idling)51 BFQG_FLAG_FNS(idling)
52 BFQG_FLAG_FNS(empty)
53 #undef BFQG_FLAG_FNS
54
55 /* This should be called with the scheduler lock held. */
56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57 {
58 unsigned long long now;
59
60 if (!bfqg_stats_waiting(stats))
61 return;
62
63 now = sched_clock();
64 if (time_after64(now, stats->start_group_wait_time))
65 blkg_stat_add(&stats->group_wait_time,
66 now - stats->start_group_wait_time);
67 bfqg_stats_clear_waiting(stats);
68 }
69
70 /* This should be called with the scheduler lock held. */
bfqg_stats_set_start_group_wait_time(struct bfq_group * bfqg,struct bfq_group * curr_bfqg)71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 struct bfq_group *curr_bfqg)
73 {
74 struct bfqg_stats *stats = &bfqg->stats;
75
76 if (bfqg_stats_waiting(stats))
77 return;
78 if (bfqg == curr_bfqg)
79 return;
80 stats->start_group_wait_time = sched_clock();
81 bfqg_stats_mark_waiting(stats);
82 }
83
84 /* This should be called with the scheduler lock held. */
bfqg_stats_end_empty_time(struct bfqg_stats * stats)85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86 {
87 unsigned long long now;
88
89 if (!bfqg_stats_empty(stats))
90 return;
91
92 now = sched_clock();
93 if (time_after64(now, stats->start_empty_time))
94 blkg_stat_add(&stats->empty_time,
95 now - stats->start_empty_time);
96 bfqg_stats_clear_empty(stats);
97 }
98
bfqg_stats_update_dequeue(struct bfq_group * bfqg)99 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
100 {
101 blkg_stat_add(&bfqg->stats.dequeue, 1);
102 }
103
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)104 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
105 {
106 struct bfqg_stats *stats = &bfqg->stats;
107
108 if (blkg_rwstat_total(&stats->queued))
109 return;
110
111 /*
112 * group is already marked empty. This can happen if bfqq got new
113 * request in parent group and moved to this group while being added
114 * to service tree. Just ignore the event and move on.
115 */
116 if (bfqg_stats_empty(stats))
117 return;
118
119 stats->start_empty_time = sched_clock();
120 bfqg_stats_mark_empty(stats);
121 }
122
bfqg_stats_update_idle_time(struct bfq_group * bfqg)123 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
124 {
125 struct bfqg_stats *stats = &bfqg->stats;
126
127 if (bfqg_stats_idling(stats)) {
128 unsigned long long now = sched_clock();
129
130 if (time_after64(now, stats->start_idle_time))
131 blkg_stat_add(&stats->idle_time,
132 now - stats->start_idle_time);
133 bfqg_stats_clear_idling(stats);
134 }
135 }
136
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)137 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
138 {
139 struct bfqg_stats *stats = &bfqg->stats;
140
141 stats->start_idle_time = sched_clock();
142 bfqg_stats_mark_idling(stats);
143 }
144
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)145 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
146 {
147 struct bfqg_stats *stats = &bfqg->stats;
148
149 blkg_stat_add(&stats->avg_queue_size_sum,
150 blkg_rwstat_total(&stats->queued));
151 blkg_stat_add(&stats->avg_queue_size_samples, 1);
152 bfqg_stats_update_group_wait_time(stats);
153 }
154
155 /*
156 * blk-cgroup policy-related handlers
157 * The following functions help in converting between blk-cgroup
158 * internal structures and BFQ-specific structures.
159 */
160
pd_to_bfqg(struct blkg_policy_data * pd)161 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
162 {
163 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
164 }
165
bfqg_to_blkg(struct bfq_group * bfqg)166 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
167 {
168 return pd_to_blkg(&bfqg->pd);
169 }
170
blkg_to_bfqg(struct blkcg_gq * blkg)171 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
172 {
173 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
174 }
175
176 /*
177 * bfq_group handlers
178 * The following functions help in navigating the bfq_group hierarchy
179 * by allowing to find the parent of a bfq_group or the bfq_group
180 * associated to a bfq_queue.
181 */
182
bfqg_parent(struct bfq_group * bfqg)183 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
184 {
185 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
186
187 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
188 }
189
bfqq_group(struct bfq_queue * bfqq)190 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
191 {
192 struct bfq_entity *group_entity = bfqq->entity.parent;
193
194 return group_entity ? container_of(group_entity, struct bfq_group,
195 entity) :
196 bfqq->bfqd->root_group;
197 }
198
199 /*
200 * The following two functions handle get and put of a bfq_group by
201 * wrapping the related blk-cgroup hooks.
202 */
203
bfqg_get(struct bfq_group * bfqg)204 static void bfqg_get(struct bfq_group *bfqg)
205 {
206 bfqg->ref++;
207 }
208
bfqg_put(struct bfq_group * bfqg)209 static void bfqg_put(struct bfq_group *bfqg)
210 {
211 bfqg->ref--;
212
213 if (bfqg->ref == 0)
214 kfree(bfqg);
215 }
216
bfqg_and_blkg_get(struct bfq_group * bfqg)217 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
218 {
219 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
220 bfqg_get(bfqg);
221
222 blkg_get(bfqg_to_blkg(bfqg));
223 }
224
bfqg_and_blkg_put(struct bfq_group * bfqg)225 void bfqg_and_blkg_put(struct bfq_group *bfqg)
226 {
227 blkg_put(bfqg_to_blkg(bfqg));
228
229 bfqg_put(bfqg);
230 }
231
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,unsigned int op)232 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
233 unsigned int op)
234 {
235 blkg_rwstat_add(&bfqg->stats.queued, op, 1);
236 bfqg_stats_end_empty_time(&bfqg->stats);
237 if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
238 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
239 }
240
bfqg_stats_update_io_remove(struct bfq_group * bfqg,unsigned int op)241 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
242 {
243 blkg_rwstat_add(&bfqg->stats.queued, op, -1);
244 }
245
bfqg_stats_update_io_merged(struct bfq_group * bfqg,unsigned int op)246 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
247 {
248 blkg_rwstat_add(&bfqg->stats.merged, op, 1);
249 }
250
bfqg_stats_update_completion(struct bfq_group * bfqg,uint64_t start_time,uint64_t io_start_time,unsigned int op)251 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
252 uint64_t io_start_time, unsigned int op)
253 {
254 struct bfqg_stats *stats = &bfqg->stats;
255 unsigned long long now = sched_clock();
256
257 if (time_after64(now, io_start_time))
258 blkg_rwstat_add(&stats->service_time, op,
259 now - io_start_time);
260 if (time_after64(io_start_time, start_time))
261 blkg_rwstat_add(&stats->wait_time, op,
262 io_start_time - start_time);
263 }
264
265 /* @stats = 0 */
bfqg_stats_reset(struct bfqg_stats * stats)266 static void bfqg_stats_reset(struct bfqg_stats *stats)
267 {
268 /* queued stats shouldn't be cleared */
269 blkg_rwstat_reset(&stats->merged);
270 blkg_rwstat_reset(&stats->service_time);
271 blkg_rwstat_reset(&stats->wait_time);
272 blkg_stat_reset(&stats->time);
273 blkg_stat_reset(&stats->avg_queue_size_sum);
274 blkg_stat_reset(&stats->avg_queue_size_samples);
275 blkg_stat_reset(&stats->dequeue);
276 blkg_stat_reset(&stats->group_wait_time);
277 blkg_stat_reset(&stats->idle_time);
278 blkg_stat_reset(&stats->empty_time);
279 }
280
281 /* @to += @from */
bfqg_stats_add_aux(struct bfqg_stats * to,struct bfqg_stats * from)282 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
283 {
284 if (!to || !from)
285 return;
286
287 /* queued stats shouldn't be cleared */
288 blkg_rwstat_add_aux(&to->merged, &from->merged);
289 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
290 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
291 blkg_stat_add_aux(&from->time, &from->time);
292 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
293 blkg_stat_add_aux(&to->avg_queue_size_samples,
294 &from->avg_queue_size_samples);
295 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
296 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
297 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
298 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
299 }
300
301 /*
302 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
303 * recursive stats can still account for the amount used by this bfqg after
304 * it's gone.
305 */
bfqg_stats_xfer_dead(struct bfq_group * bfqg)306 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
307 {
308 struct bfq_group *parent;
309
310 if (!bfqg) /* root_group */
311 return;
312
313 parent = bfqg_parent(bfqg);
314
315 lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
316
317 if (unlikely(!parent))
318 return;
319
320 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
321 bfqg_stats_reset(&bfqg->stats);
322 }
323
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)324 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
325 {
326 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
327
328 entity->weight = entity->new_weight;
329 entity->orig_weight = entity->new_weight;
330 if (bfqq) {
331 bfqq->ioprio = bfqq->new_ioprio;
332 bfqq->ioprio_class = bfqq->new_ioprio_class;
333 /*
334 * Make sure that bfqg and its associated blkg do not
335 * disappear before entity.
336 */
337 bfqg_and_blkg_get(bfqg);
338 }
339 entity->parent = bfqg->my_entity; /* NULL for root group */
340 entity->sched_data = &bfqg->sched_data;
341 }
342
bfqg_stats_exit(struct bfqg_stats * stats)343 static void bfqg_stats_exit(struct bfqg_stats *stats)
344 {
345 blkg_rwstat_exit(&stats->merged);
346 blkg_rwstat_exit(&stats->service_time);
347 blkg_rwstat_exit(&stats->wait_time);
348 blkg_rwstat_exit(&stats->queued);
349 blkg_stat_exit(&stats->time);
350 blkg_stat_exit(&stats->avg_queue_size_sum);
351 blkg_stat_exit(&stats->avg_queue_size_samples);
352 blkg_stat_exit(&stats->dequeue);
353 blkg_stat_exit(&stats->group_wait_time);
354 blkg_stat_exit(&stats->idle_time);
355 blkg_stat_exit(&stats->empty_time);
356 }
357
bfqg_stats_init(struct bfqg_stats * stats,gfp_t gfp)358 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
359 {
360 if (blkg_rwstat_init(&stats->merged, gfp) ||
361 blkg_rwstat_init(&stats->service_time, gfp) ||
362 blkg_rwstat_init(&stats->wait_time, gfp) ||
363 blkg_rwstat_init(&stats->queued, gfp) ||
364 blkg_stat_init(&stats->time, gfp) ||
365 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
366 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
367 blkg_stat_init(&stats->dequeue, gfp) ||
368 blkg_stat_init(&stats->group_wait_time, gfp) ||
369 blkg_stat_init(&stats->idle_time, gfp) ||
370 blkg_stat_init(&stats->empty_time, gfp)) {
371 bfqg_stats_exit(stats);
372 return -ENOMEM;
373 }
374
375 return 0;
376 }
377
cpd_to_bfqgd(struct blkcg_policy_data * cpd)378 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
379 {
380 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
381 }
382
blkcg_to_bfqgd(struct blkcg * blkcg)383 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
384 {
385 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
386 }
387
bfq_cpd_alloc(gfp_t gfp)388 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
389 {
390 struct bfq_group_data *bgd;
391
392 bgd = kzalloc(sizeof(*bgd), gfp);
393 if (!bgd)
394 return NULL;
395 return &bgd->pd;
396 }
397
bfq_cpd_init(struct blkcg_policy_data * cpd)398 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
399 {
400 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
401
402 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
403 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
404 }
405
bfq_cpd_free(struct blkcg_policy_data * cpd)406 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
407 {
408 kfree(cpd_to_bfqgd(cpd));
409 }
410
bfq_pd_alloc(gfp_t gfp,int node)411 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
412 {
413 struct bfq_group *bfqg;
414
415 bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
416 if (!bfqg)
417 return NULL;
418
419 if (bfqg_stats_init(&bfqg->stats, gfp)) {
420 kfree(bfqg);
421 return NULL;
422 }
423
424 /* see comments in bfq_bic_update_cgroup for why refcounting */
425 bfqg_get(bfqg);
426 return &bfqg->pd;
427 }
428
bfq_pd_init(struct blkg_policy_data * pd)429 static void bfq_pd_init(struct blkg_policy_data *pd)
430 {
431 struct blkcg_gq *blkg = pd_to_blkg(pd);
432 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
433 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
434 struct bfq_entity *entity = &bfqg->entity;
435 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
436
437 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
438 entity->my_sched_data = &bfqg->sched_data;
439 bfqg->my_entity = entity; /*
440 * the root_group's will be set to NULL
441 * in bfq_init_queue()
442 */
443 bfqg->bfqd = bfqd;
444 bfqg->active_entities = 0;
445 bfqg->rq_pos_tree = RB_ROOT;
446 }
447
bfq_pd_free(struct blkg_policy_data * pd)448 static void bfq_pd_free(struct blkg_policy_data *pd)
449 {
450 struct bfq_group *bfqg = pd_to_bfqg(pd);
451
452 bfqg_stats_exit(&bfqg->stats);
453 bfqg_put(bfqg);
454 }
455
bfq_pd_reset_stats(struct blkg_policy_data * pd)456 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
457 {
458 struct bfq_group *bfqg = pd_to_bfqg(pd);
459
460 bfqg_stats_reset(&bfqg->stats);
461 }
462
bfq_group_set_parent(struct bfq_group * bfqg,struct bfq_group * parent)463 static void bfq_group_set_parent(struct bfq_group *bfqg,
464 struct bfq_group *parent)
465 {
466 struct bfq_entity *entity;
467
468 entity = &bfqg->entity;
469 entity->parent = parent->my_entity;
470 entity->sched_data = &parent->sched_data;
471 }
472
bfq_lookup_bfqg(struct bfq_data * bfqd,struct blkcg * blkcg)473 static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
474 struct blkcg *blkcg)
475 {
476 struct blkcg_gq *blkg;
477
478 blkg = blkg_lookup(blkcg, bfqd->queue);
479 if (likely(blkg))
480 return blkg_to_bfqg(blkg);
481 return NULL;
482 }
483
bfq_find_set_group(struct bfq_data * bfqd,struct blkcg * blkcg)484 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
485 struct blkcg *blkcg)
486 {
487 struct bfq_group *bfqg, *parent;
488 struct bfq_entity *entity;
489
490 bfqg = bfq_lookup_bfqg(bfqd, blkcg);
491
492 if (unlikely(!bfqg))
493 return NULL;
494
495 /*
496 * Update chain of bfq_groups as we might be handling a leaf group
497 * which, along with some of its relatives, has not been hooked yet
498 * to the private hierarchy of BFQ.
499 */
500 entity = &bfqg->entity;
501 for_each_entity(entity) {
502 struct bfq_group *curr_bfqg = container_of(entity,
503 struct bfq_group, entity);
504 if (curr_bfqg != bfqd->root_group) {
505 parent = bfqg_parent(curr_bfqg);
506 if (!parent)
507 parent = bfqd->root_group;
508 bfq_group_set_parent(curr_bfqg, parent);
509 }
510 }
511
512 return bfqg;
513 }
514
515 /**
516 * bfq_bfqq_move - migrate @bfqq to @bfqg.
517 * @bfqd: queue descriptor.
518 * @bfqq: the queue to move.
519 * @bfqg: the group to move to.
520 *
521 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
522 * it on the new one. Avoid putting the entity on the old group idle tree.
523 *
524 * Must be called under the scheduler lock, to make sure that the blkg
525 * owning @bfqg does not disappear (see comments in
526 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
527 * objects).
528 */
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)529 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
530 struct bfq_group *bfqg)
531 {
532 struct bfq_entity *entity = &bfqq->entity;
533
534 /* If bfqq is empty, then bfq_bfqq_expire also invokes
535 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
536 * from data structures related to current group. Otherwise we
537 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
538 * we do below.
539 */
540 if (bfqq == bfqd->in_service_queue)
541 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
542 false, BFQQE_PREEMPTED);
543
544 if (bfq_bfqq_busy(bfqq))
545 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
546 else if (entity->on_st)
547 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
548 bfqg_and_blkg_put(bfqq_group(bfqq));
549
550 entity->parent = bfqg->my_entity;
551 entity->sched_data = &bfqg->sched_data;
552 /* pin down bfqg and its associated blkg */
553 bfqg_and_blkg_get(bfqg);
554
555 if (bfq_bfqq_busy(bfqq)) {
556 bfq_pos_tree_add_move(bfqd, bfqq);
557 bfq_activate_bfqq(bfqd, bfqq);
558 }
559
560 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
561 bfq_schedule_dispatch(bfqd);
562 }
563
564 /**
565 * __bfq_bic_change_cgroup - move @bic to @cgroup.
566 * @bfqd: the queue descriptor.
567 * @bic: the bic to move.
568 * @blkcg: the blk-cgroup to move to.
569 *
570 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
571 * sure that the reference to cgroup is valid across the call (see
572 * comments in bfq_bic_update_cgroup on this issue)
573 *
574 * NOTE: an alternative approach might have been to store the current
575 * cgroup in bfqq and getting a reference to it, reducing the lookup
576 * time here, at the price of slightly more complex code.
577 */
__bfq_bic_change_cgroup(struct bfq_data * bfqd,struct bfq_io_cq * bic,struct blkcg * blkcg)578 static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
579 struct bfq_io_cq *bic,
580 struct blkcg *blkcg)
581 {
582 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
583 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
584 struct bfq_group *bfqg;
585 struct bfq_entity *entity;
586
587 bfqg = bfq_find_set_group(bfqd, blkcg);
588
589 if (unlikely(!bfqg))
590 bfqg = bfqd->root_group;
591
592 if (async_bfqq) {
593 entity = &async_bfqq->entity;
594
595 if (entity->sched_data != &bfqg->sched_data) {
596 bic_set_bfqq(bic, NULL, 0);
597 bfq_log_bfqq(bfqd, async_bfqq,
598 "bic_change_group: %p %d",
599 async_bfqq, async_bfqq->ref);
600 bfq_put_queue(async_bfqq);
601 }
602 }
603
604 if (sync_bfqq) {
605 entity = &sync_bfqq->entity;
606 if (entity->sched_data != &bfqg->sched_data)
607 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
608 }
609
610 return bfqg;
611 }
612
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)613 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
614 {
615 struct bfq_data *bfqd = bic_to_bfqd(bic);
616 struct bfq_group *bfqg = NULL;
617 uint64_t serial_nr;
618
619 rcu_read_lock();
620 serial_nr = bio_blkcg(bio)->css.serial_nr;
621
622 /*
623 * Check whether blkcg has changed. The condition may trigger
624 * spuriously on a newly created cic but there's no harm.
625 */
626 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
627 goto out;
628
629 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
630 /*
631 * Update blkg_path for bfq_log_* functions. We cache this
632 * path, and update it here, for the following
633 * reasons. Operations on blkg objects in blk-cgroup are
634 * protected with the request_queue lock, and not with the
635 * lock that protects the instances of this scheduler
636 * (bfqd->lock). This exposes BFQ to the following sort of
637 * race.
638 *
639 * The blkg_lookup performed in bfq_get_queue, protected
640 * through rcu, may happen to return the address of a copy of
641 * the original blkg. If this is the case, then the
642 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
643 * the blkg, is useless: it does not prevent blk-cgroup code
644 * from destroying both the original blkg and all objects
645 * directly or indirectly referred by the copy of the
646 * blkg.
647 *
648 * On the bright side, destroy operations on a blkg invoke, as
649 * a first step, hooks of the scheduler associated with the
650 * blkg. And these hooks are executed with bfqd->lock held for
651 * BFQ. As a consequence, for any blkg associated with the
652 * request queue this instance of the scheduler is attached
653 * to, we are guaranteed that such a blkg is not destroyed, and
654 * that all the pointers it contains are consistent, while we
655 * are holding bfqd->lock. A blkg_lookup performed with
656 * bfqd->lock held then returns a fully consistent blkg, which
657 * remains consistent until this lock is held.
658 *
659 * Thanks to the last fact, and to the fact that: (1) bfqg has
660 * been obtained through a blkg_lookup in the above
661 * assignment, and (2) bfqd->lock is being held, here we can
662 * safely use the policy data for the involved blkg (i.e., the
663 * field bfqg->pd) to get to the blkg associated with bfqg,
664 * and then we can safely use any field of blkg. After we
665 * release bfqd->lock, even just getting blkg through this
666 * bfqg may cause dangling references to be traversed, as
667 * bfqg->pd may not exist any more.
668 *
669 * In view of the above facts, here we cache, in the bfqg, any
670 * blkg data we may need for this bic, and for its associated
671 * bfq_queue. As of now, we need to cache only the path of the
672 * blkg, which is used in the bfq_log_* functions.
673 *
674 * Finally, note that bfqg itself needs to be protected from
675 * destruction on the blkg_free of the original blkg (which
676 * invokes bfq_pd_free). We use an additional private
677 * refcounter for bfqg, to let it disappear only after no
678 * bfq_queue refers to it any longer.
679 */
680 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
681 bic->blkcg_serial_nr = serial_nr;
682 out:
683 rcu_read_unlock();
684 }
685
686 /**
687 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
688 * @st: the service tree being flushed.
689 */
bfq_flush_idle_tree(struct bfq_service_tree * st)690 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
691 {
692 struct bfq_entity *entity = st->first_idle;
693
694 for (; entity ; entity = st->first_idle)
695 __bfq_deactivate_entity(entity, false);
696 }
697
698 /**
699 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
700 * @bfqd: the device data structure with the root group.
701 * @entity: the entity to move.
702 */
bfq_reparent_leaf_entity(struct bfq_data * bfqd,struct bfq_entity * entity)703 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
704 struct bfq_entity *entity)
705 {
706 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
707
708 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
709 }
710
711 /**
712 * bfq_reparent_active_entities - move to the root group all active
713 * entities.
714 * @bfqd: the device data structure with the root group.
715 * @bfqg: the group to move from.
716 * @st: the service tree with the entities.
717 */
bfq_reparent_active_entities(struct bfq_data * bfqd,struct bfq_group * bfqg,struct bfq_service_tree * st)718 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
719 struct bfq_group *bfqg,
720 struct bfq_service_tree *st)
721 {
722 struct rb_root *active = &st->active;
723 struct bfq_entity *entity = NULL;
724
725 if (!RB_EMPTY_ROOT(&st->active))
726 entity = bfq_entity_of(rb_first(active));
727
728 for (; entity ; entity = bfq_entity_of(rb_first(active)))
729 bfq_reparent_leaf_entity(bfqd, entity);
730
731 if (bfqg->sched_data.in_service_entity)
732 bfq_reparent_leaf_entity(bfqd,
733 bfqg->sched_data.in_service_entity);
734 }
735
736 /**
737 * bfq_pd_offline - deactivate the entity associated with @pd,
738 * and reparent its children entities.
739 * @pd: descriptor of the policy going offline.
740 *
741 * blkio already grabs the queue_lock for us, so no need to use
742 * RCU-based magic
743 */
bfq_pd_offline(struct blkg_policy_data * pd)744 static void bfq_pd_offline(struct blkg_policy_data *pd)
745 {
746 struct bfq_service_tree *st;
747 struct bfq_group *bfqg = pd_to_bfqg(pd);
748 struct bfq_data *bfqd = bfqg->bfqd;
749 struct bfq_entity *entity = bfqg->my_entity;
750 unsigned long flags;
751 int i;
752
753 spin_lock_irqsave(&bfqd->lock, flags);
754
755 if (!entity) /* root group */
756 goto put_async_queues;
757
758 /*
759 * Empty all service_trees belonging to this group before
760 * deactivating the group itself.
761 */
762 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
763 st = bfqg->sched_data.service_tree + i;
764
765 /*
766 * The idle tree may still contain bfq_queues belonging
767 * to exited task because they never migrated to a different
768 * cgroup from the one being destroyed now.
769 */
770 bfq_flush_idle_tree(st);
771
772 /*
773 * It may happen that some queues are still active
774 * (busy) upon group destruction (if the corresponding
775 * processes have been forced to terminate). We move
776 * all the leaf entities corresponding to these queues
777 * to the root_group.
778 * Also, it may happen that the group has an entity
779 * in service, which is disconnected from the active
780 * tree: it must be moved, too.
781 * There is no need to put the sync queues, as the
782 * scheduler has taken no reference.
783 */
784 bfq_reparent_active_entities(bfqd, bfqg, st);
785 }
786
787 __bfq_deactivate_entity(entity, false);
788
789 put_async_queues:
790 bfq_put_async_queues(bfqd, bfqg);
791
792 spin_unlock_irqrestore(&bfqd->lock, flags);
793 /*
794 * @blkg is going offline and will be ignored by
795 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
796 * that they don't get lost. If IOs complete after this point, the
797 * stats for them will be lost. Oh well...
798 */
799 bfqg_stats_xfer_dead(bfqg);
800 }
801
bfq_end_wr_async(struct bfq_data * bfqd)802 void bfq_end_wr_async(struct bfq_data *bfqd)
803 {
804 struct blkcg_gq *blkg;
805
806 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
807 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
808
809 bfq_end_wr_async_queues(bfqd, bfqg);
810 }
811 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
812 }
813
bfq_io_show_weight(struct seq_file * sf,void * v)814 static int bfq_io_show_weight(struct seq_file *sf, void *v)
815 {
816 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
817 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
818 unsigned int val = 0;
819
820 if (bfqgd)
821 val = bfqgd->weight;
822
823 seq_printf(sf, "%u\n", val);
824
825 return 0;
826 }
827
bfq_io_set_weight_legacy(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)828 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
829 struct cftype *cftype,
830 u64 val)
831 {
832 struct blkcg *blkcg = css_to_blkcg(css);
833 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
834 struct blkcg_gq *blkg;
835 int ret = -ERANGE;
836
837 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
838 return ret;
839
840 ret = 0;
841 spin_lock_irq(&blkcg->lock);
842 bfqgd->weight = (unsigned short)val;
843 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
844 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
845
846 if (!bfqg)
847 continue;
848 /*
849 * Setting the prio_changed flag of the entity
850 * to 1 with new_weight == weight would re-set
851 * the value of the weight to its ioprio mapping.
852 * Set the flag only if necessary.
853 */
854 if ((unsigned short)val != bfqg->entity.new_weight) {
855 bfqg->entity.new_weight = (unsigned short)val;
856 /*
857 * Make sure that the above new value has been
858 * stored in bfqg->entity.new_weight before
859 * setting the prio_changed flag. In fact,
860 * this flag may be read asynchronously (in
861 * critical sections protected by a different
862 * lock than that held here), and finding this
863 * flag set may cause the execution of the code
864 * for updating parameters whose value may
865 * depend also on bfqg->entity.new_weight (in
866 * __bfq_entity_update_weight_prio).
867 * This barrier makes sure that the new value
868 * of bfqg->entity.new_weight is correctly
869 * seen in that code.
870 */
871 smp_wmb();
872 bfqg->entity.prio_changed = 1;
873 }
874 }
875 spin_unlock_irq(&blkcg->lock);
876
877 return ret;
878 }
879
bfq_io_set_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)880 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
881 char *buf, size_t nbytes,
882 loff_t off)
883 {
884 u64 weight;
885 /* First unsigned long found in the file is used */
886 int ret = kstrtoull(strim(buf), 0, &weight);
887
888 if (ret)
889 return ret;
890
891 ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
892 return ret ?: nbytes;
893 }
894
bfqg_print_stat(struct seq_file * sf,void * v)895 static int bfqg_print_stat(struct seq_file *sf, void *v)
896 {
897 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
898 &blkcg_policy_bfq, seq_cft(sf)->private, false);
899 return 0;
900 }
901
bfqg_print_rwstat(struct seq_file * sf,void * v)902 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
903 {
904 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
905 &blkcg_policy_bfq, seq_cft(sf)->private, true);
906 return 0;
907 }
908
bfqg_prfill_stat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)909 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
910 struct blkg_policy_data *pd, int off)
911 {
912 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
913 &blkcg_policy_bfq, off);
914 return __blkg_prfill_u64(sf, pd, sum);
915 }
916
bfqg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)917 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
918 struct blkg_policy_data *pd, int off)
919 {
920 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
921 &blkcg_policy_bfq,
922 off);
923 return __blkg_prfill_rwstat(sf, pd, &sum);
924 }
925
bfqg_print_stat_recursive(struct seq_file * sf,void * v)926 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
927 {
928 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
929 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
930 seq_cft(sf)->private, false);
931 return 0;
932 }
933
bfqg_print_rwstat_recursive(struct seq_file * sf,void * v)934 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
935 {
936 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
937 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
938 seq_cft(sf)->private, true);
939 return 0;
940 }
941
bfqg_prfill_sectors(struct seq_file * sf,struct blkg_policy_data * pd,int off)942 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
943 int off)
944 {
945 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
946
947 return __blkg_prfill_u64(sf, pd, sum >> 9);
948 }
949
bfqg_print_stat_sectors(struct seq_file * sf,void * v)950 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
951 {
952 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
953 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
954 return 0;
955 }
956
bfqg_prfill_sectors_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)957 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
958 struct blkg_policy_data *pd, int off)
959 {
960 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
961 offsetof(struct blkcg_gq, stat_bytes));
962 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
963 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
964
965 return __blkg_prfill_u64(sf, pd, sum >> 9);
966 }
967
bfqg_print_stat_sectors_recursive(struct seq_file * sf,void * v)968 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
969 {
970 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
971 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
972 false);
973 return 0;
974 }
975
bfqg_prfill_avg_queue_size(struct seq_file * sf,struct blkg_policy_data * pd,int off)976 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
977 struct blkg_policy_data *pd, int off)
978 {
979 struct bfq_group *bfqg = pd_to_bfqg(pd);
980 u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
981 u64 v = 0;
982
983 if (samples) {
984 v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
985 v = div64_u64(v, samples);
986 }
987 __blkg_prfill_u64(sf, pd, v);
988 return 0;
989 }
990
991 /* print avg_queue_size */
bfqg_print_avg_queue_size(struct seq_file * sf,void * v)992 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
993 {
994 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
995 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
996 0, false);
997 return 0;
998 }
999
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1000 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1001 {
1002 int ret;
1003
1004 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1005 if (ret)
1006 return NULL;
1007
1008 return blkg_to_bfqg(bfqd->queue->root_blkg);
1009 }
1010
1011 struct blkcg_policy blkcg_policy_bfq = {
1012 .dfl_cftypes = bfq_blkg_files,
1013 .legacy_cftypes = bfq_blkcg_legacy_files,
1014
1015 .cpd_alloc_fn = bfq_cpd_alloc,
1016 .cpd_init_fn = bfq_cpd_init,
1017 .cpd_bind_fn = bfq_cpd_init,
1018 .cpd_free_fn = bfq_cpd_free,
1019
1020 .pd_alloc_fn = bfq_pd_alloc,
1021 .pd_init_fn = bfq_pd_init,
1022 .pd_offline_fn = bfq_pd_offline,
1023 .pd_free_fn = bfq_pd_free,
1024 .pd_reset_stats_fn = bfq_pd_reset_stats,
1025 };
1026
1027 struct cftype bfq_blkcg_legacy_files[] = {
1028 {
1029 .name = "bfq.weight",
1030 .flags = CFTYPE_NOT_ON_ROOT,
1031 .seq_show = bfq_io_show_weight,
1032 .write_u64 = bfq_io_set_weight_legacy,
1033 },
1034
1035 /* statistics, covers only the tasks in the bfqg */
1036 {
1037 .name = "bfq.time",
1038 .private = offsetof(struct bfq_group, stats.time),
1039 .seq_show = bfqg_print_stat,
1040 },
1041 {
1042 .name = "bfq.sectors",
1043 .seq_show = bfqg_print_stat_sectors,
1044 },
1045 {
1046 .name = "bfq.io_service_bytes",
1047 .private = (unsigned long)&blkcg_policy_bfq,
1048 .seq_show = blkg_print_stat_bytes,
1049 },
1050 {
1051 .name = "bfq.io_serviced",
1052 .private = (unsigned long)&blkcg_policy_bfq,
1053 .seq_show = blkg_print_stat_ios,
1054 },
1055 {
1056 .name = "bfq.io_service_time",
1057 .private = offsetof(struct bfq_group, stats.service_time),
1058 .seq_show = bfqg_print_rwstat,
1059 },
1060 {
1061 .name = "bfq.io_wait_time",
1062 .private = offsetof(struct bfq_group, stats.wait_time),
1063 .seq_show = bfqg_print_rwstat,
1064 },
1065 {
1066 .name = "bfq.io_merged",
1067 .private = offsetof(struct bfq_group, stats.merged),
1068 .seq_show = bfqg_print_rwstat,
1069 },
1070 {
1071 .name = "bfq.io_queued",
1072 .private = offsetof(struct bfq_group, stats.queued),
1073 .seq_show = bfqg_print_rwstat,
1074 },
1075
1076 /* the same statictics which cover the bfqg and its descendants */
1077 {
1078 .name = "bfq.time_recursive",
1079 .private = offsetof(struct bfq_group, stats.time),
1080 .seq_show = bfqg_print_stat_recursive,
1081 },
1082 {
1083 .name = "bfq.sectors_recursive",
1084 .seq_show = bfqg_print_stat_sectors_recursive,
1085 },
1086 {
1087 .name = "bfq.io_service_bytes_recursive",
1088 .private = (unsigned long)&blkcg_policy_bfq,
1089 .seq_show = blkg_print_stat_bytes_recursive,
1090 },
1091 {
1092 .name = "bfq.io_serviced_recursive",
1093 .private = (unsigned long)&blkcg_policy_bfq,
1094 .seq_show = blkg_print_stat_ios_recursive,
1095 },
1096 {
1097 .name = "bfq.io_service_time_recursive",
1098 .private = offsetof(struct bfq_group, stats.service_time),
1099 .seq_show = bfqg_print_rwstat_recursive,
1100 },
1101 {
1102 .name = "bfq.io_wait_time_recursive",
1103 .private = offsetof(struct bfq_group, stats.wait_time),
1104 .seq_show = bfqg_print_rwstat_recursive,
1105 },
1106 {
1107 .name = "bfq.io_merged_recursive",
1108 .private = offsetof(struct bfq_group, stats.merged),
1109 .seq_show = bfqg_print_rwstat_recursive,
1110 },
1111 {
1112 .name = "bfq.io_queued_recursive",
1113 .private = offsetof(struct bfq_group, stats.queued),
1114 .seq_show = bfqg_print_rwstat_recursive,
1115 },
1116 {
1117 .name = "bfq.avg_queue_size",
1118 .seq_show = bfqg_print_avg_queue_size,
1119 },
1120 {
1121 .name = "bfq.group_wait_time",
1122 .private = offsetof(struct bfq_group, stats.group_wait_time),
1123 .seq_show = bfqg_print_stat,
1124 },
1125 {
1126 .name = "bfq.idle_time",
1127 .private = offsetof(struct bfq_group, stats.idle_time),
1128 .seq_show = bfqg_print_stat,
1129 },
1130 {
1131 .name = "bfq.empty_time",
1132 .private = offsetof(struct bfq_group, stats.empty_time),
1133 .seq_show = bfqg_print_stat,
1134 },
1135 {
1136 .name = "bfq.dequeue",
1137 .private = offsetof(struct bfq_group, stats.dequeue),
1138 .seq_show = bfqg_print_stat,
1139 },
1140 { } /* terminate */
1141 };
1142
1143 struct cftype bfq_blkg_files[] = {
1144 {
1145 .name = "bfq.weight",
1146 .flags = CFTYPE_NOT_ON_ROOT,
1147 .seq_show = bfq_io_show_weight,
1148 .write = bfq_io_set_weight,
1149 },
1150 {} /* terminate */
1151 };
1152
1153 #else /* CONFIG_BFQ_GROUP_IOSCHED */
1154
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,unsigned int op)1155 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
1156 unsigned int op) { }
bfqg_stats_update_io_remove(struct bfq_group * bfqg,unsigned int op)1157 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
bfqg_stats_update_io_merged(struct bfq_group * bfqg,unsigned int op)1158 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
bfqg_stats_update_completion(struct bfq_group * bfqg,uint64_t start_time,uint64_t io_start_time,unsigned int op)1159 void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
1160 uint64_t io_start_time, unsigned int op) { }
bfqg_stats_update_dequeue(struct bfq_group * bfqg)1161 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)1162 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
bfqg_stats_update_idle_time(struct bfq_group * bfqg)1163 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)1164 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)1165 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
1166
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)1167 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1168 struct bfq_group *bfqg) {}
1169
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)1170 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1171 {
1172 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1173
1174 entity->weight = entity->new_weight;
1175 entity->orig_weight = entity->new_weight;
1176 if (bfqq) {
1177 bfqq->ioprio = bfqq->new_ioprio;
1178 bfqq->ioprio_class = bfqq->new_ioprio_class;
1179 }
1180 entity->sched_data = &bfqg->sched_data;
1181 }
1182
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)1183 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1184
bfq_end_wr_async(struct bfq_data * bfqd)1185 void bfq_end_wr_async(struct bfq_data *bfqd)
1186 {
1187 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1188 }
1189
bfq_find_set_group(struct bfq_data * bfqd,struct blkcg * blkcg)1190 struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
1191 {
1192 return bfqd->root_group;
1193 }
1194
bfqq_group(struct bfq_queue * bfqq)1195 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1196 {
1197 return bfqq->bfqd->root_group;
1198 }
1199
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1200 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1201 {
1202 struct bfq_group *bfqg;
1203 int i;
1204
1205 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1206 if (!bfqg)
1207 return NULL;
1208
1209 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1210 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1211
1212 return bfqg;
1213 }
1214 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
1215