1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
4 * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
5 * scheduler schedules generic entities. The latter can represent
6 * either single bfq queues (associated with processes) or groups of
7 * bfq queues (associated with cgroups).
8 */
9 #include "bfq-iosched.h"
10
11 /**
12 * bfq_gt - compare two timestamps.
13 * @a: first ts.
14 * @b: second ts.
15 *
16 * Return @a > @b, dealing with wrapping correctly.
17 */
bfq_gt(u64 a,u64 b)18 static int bfq_gt(u64 a, u64 b)
19 {
20 return (s64)(a - b) > 0;
21 }
22
bfq_root_active_entity(struct rb_root * tree)23 static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
24 {
25 struct rb_node *node = tree->rb_node;
26
27 return rb_entry(node, struct bfq_entity, rb_node);
28 }
29
bfq_class_idx(struct bfq_entity * entity)30 static unsigned int bfq_class_idx(struct bfq_entity *entity)
31 {
32 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
33
34 return bfqq ? bfqq->ioprio_class - 1 :
35 BFQ_DEFAULT_GRP_CLASS - 1;
36 }
37
bfq_tot_busy_queues(struct bfq_data * bfqd)38 unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
39 {
40 return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
41 bfqd->busy_queues[2];
42 }
43
44 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
45 bool expiration);
46
47 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
48
49 /**
50 * bfq_update_next_in_service - update sd->next_in_service
51 * @sd: sched_data for which to perform the update.
52 * @new_entity: if not NULL, pointer to the entity whose activation,
53 * requeueing or repositioning triggered the invocation of
54 * this function.
55 * @expiration: id true, this function is being invoked after the
56 * expiration of the in-service entity
57 *
58 * This function is called to update sd->next_in_service, which, in
59 * its turn, may change as a consequence of the insertion or
60 * extraction of an entity into/from one of the active trees of
61 * sd. These insertions/extractions occur as a consequence of
62 * activations/deactivations of entities, with some activations being
63 * 'true' activations, and other activations being requeueings (i.e.,
64 * implementing the second, requeueing phase of the mechanism used to
65 * reposition an entity in its active tree; see comments on
66 * __bfq_activate_entity and __bfq_requeue_entity for details). In
67 * both the last two activation sub-cases, new_entity points to the
68 * just activated or requeued entity.
69 *
70 * Returns true if sd->next_in_service changes in such a way that
71 * entity->parent may become the next_in_service for its parent
72 * entity.
73 */
bfq_update_next_in_service(struct bfq_sched_data * sd,struct bfq_entity * new_entity,bool expiration)74 static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
75 struct bfq_entity *new_entity,
76 bool expiration)
77 {
78 struct bfq_entity *next_in_service = sd->next_in_service;
79 bool parent_sched_may_change = false;
80 bool change_without_lookup = false;
81
82 /*
83 * If this update is triggered by the activation, requeueing
84 * or repositioning of an entity that does not coincide with
85 * sd->next_in_service, then a full lookup in the active tree
86 * can be avoided. In fact, it is enough to check whether the
87 * just-modified entity has the same priority as
88 * sd->next_in_service, is eligible and has a lower virtual
89 * finish time than sd->next_in_service. If this compound
90 * condition holds, then the new entity becomes the new
91 * next_in_service. Otherwise no change is needed.
92 */
93 if (new_entity && new_entity != sd->next_in_service) {
94 /*
95 * Flag used to decide whether to replace
96 * sd->next_in_service with new_entity. Tentatively
97 * set to true, and left as true if
98 * sd->next_in_service is NULL.
99 */
100 change_without_lookup = true;
101
102 /*
103 * If there is already a next_in_service candidate
104 * entity, then compare timestamps to decide whether
105 * to replace sd->service_tree with new_entity.
106 */
107 if (next_in_service) {
108 unsigned int new_entity_class_idx =
109 bfq_class_idx(new_entity);
110 struct bfq_service_tree *st =
111 sd->service_tree + new_entity_class_idx;
112
113 change_without_lookup =
114 (new_entity_class_idx ==
115 bfq_class_idx(next_in_service)
116 &&
117 !bfq_gt(new_entity->start, st->vtime)
118 &&
119 bfq_gt(next_in_service->finish,
120 new_entity->finish));
121 }
122
123 if (change_without_lookup)
124 next_in_service = new_entity;
125 }
126
127 if (!change_without_lookup) /* lookup needed */
128 next_in_service = bfq_lookup_next_entity(sd, expiration);
129
130 if (next_in_service) {
131 bool new_budget_triggers_change =
132 bfq_update_parent_budget(next_in_service);
133
134 parent_sched_may_change = !sd->next_in_service ||
135 new_budget_triggers_change;
136 }
137
138 sd->next_in_service = next_in_service;
139
140 if (!next_in_service)
141 return parent_sched_may_change;
142
143 return parent_sched_may_change;
144 }
145
146 #ifdef CONFIG_BFQ_GROUP_IOSCHED
147
bfq_bfqq_to_bfqg(struct bfq_queue * bfqq)148 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
149 {
150 struct bfq_entity *group_entity = bfqq->entity.parent;
151
152 if (!group_entity)
153 group_entity = &bfqq->bfqd->root_group->entity;
154
155 return container_of(group_entity, struct bfq_group, entity);
156 }
157
158 /*
159 * Returns true if this budget changes may let next_in_service->parent
160 * become the next_in_service entity for its parent entity.
161 */
bfq_update_parent_budget(struct bfq_entity * next_in_service)162 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
163 {
164 struct bfq_entity *bfqg_entity;
165 struct bfq_group *bfqg;
166 struct bfq_sched_data *group_sd;
167 bool ret = false;
168
169 group_sd = next_in_service->sched_data;
170
171 bfqg = container_of(group_sd, struct bfq_group, sched_data);
172 /*
173 * bfq_group's my_entity field is not NULL only if the group
174 * is not the root group. We must not touch the root entity
175 * as it must never become an in-service entity.
176 */
177 bfqg_entity = bfqg->my_entity;
178 if (bfqg_entity) {
179 if (bfqg_entity->budget > next_in_service->budget)
180 ret = true;
181 bfqg_entity->budget = next_in_service->budget;
182 }
183
184 return ret;
185 }
186
187 /*
188 * This function tells whether entity stops being a candidate for next
189 * service, according to the restrictive definition of the field
190 * next_in_service. In particular, this function is invoked for an
191 * entity that is about to be set in service.
192 *
193 * If entity is a queue, then the entity is no longer a candidate for
194 * next service according to the that definition, because entity is
195 * about to become the in-service queue. This function then returns
196 * true if entity is a queue.
197 *
198 * In contrast, entity could still be a candidate for next service if
199 * it is not a queue, and has more than one active child. In fact,
200 * even if one of its children is about to be set in service, other
201 * active children may still be the next to serve, for the parent
202 * entity, even according to the above definition. As a consequence, a
203 * non-queue entity is not a candidate for next-service only if it has
204 * only one active child. And only if this condition holds, then this
205 * function returns true for a non-queue entity.
206 */
bfq_no_longer_next_in_service(struct bfq_entity * entity)207 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
208 {
209 struct bfq_group *bfqg;
210
211 if (bfq_entity_to_bfqq(entity))
212 return true;
213
214 bfqg = container_of(entity, struct bfq_group, entity);
215
216 /*
217 * The field active_entities does not always contain the
218 * actual number of active children entities: it happens to
219 * not account for the in-service entity in case the latter is
220 * removed from its active tree (which may get done after
221 * invoking the function bfq_no_longer_next_in_service in
222 * bfq_get_next_queue). Fortunately, here, i.e., while
223 * bfq_no_longer_next_in_service is not yet completed in
224 * bfq_get_next_queue, bfq_active_extract has not yet been
225 * invoked, and thus active_entities still coincides with the
226 * actual number of active entities.
227 */
228 if (bfqg->active_entities == 1)
229 return true;
230
231 return false;
232 }
233
234 #else /* CONFIG_BFQ_GROUP_IOSCHED */
235
bfq_bfqq_to_bfqg(struct bfq_queue * bfqq)236 struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq)
237 {
238 return bfqq->bfqd->root_group;
239 }
240
bfq_update_parent_budget(struct bfq_entity * next_in_service)241 static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
242 {
243 return false;
244 }
245
bfq_no_longer_next_in_service(struct bfq_entity * entity)246 static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
247 {
248 return true;
249 }
250
251 #endif /* CONFIG_BFQ_GROUP_IOSCHED */
252
253 /*
254 * Shift for timestamp calculations. This actually limits the maximum
255 * service allowed in one timestamp delta (small shift values increase it),
256 * the maximum total weight that can be used for the queues in the system
257 * (big shift values increase it), and the period of virtual time
258 * wraparounds.
259 */
260 #define WFQ_SERVICE_SHIFT 22
261
bfq_entity_to_bfqq(struct bfq_entity * entity)262 struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
263 {
264 struct bfq_queue *bfqq = NULL;
265
266 if (!entity->my_sched_data)
267 bfqq = container_of(entity, struct bfq_queue, entity);
268
269 return bfqq;
270 }
271
272
273 /**
274 * bfq_delta - map service into the virtual time domain.
275 * @service: amount of service.
276 * @weight: scale factor (weight of an entity or weight sum).
277 */
bfq_delta(unsigned long service,unsigned long weight)278 static u64 bfq_delta(unsigned long service, unsigned long weight)
279 {
280 u64 d = (u64)service << WFQ_SERVICE_SHIFT;
281
282 do_div(d, weight);
283 return d;
284 }
285
286 /**
287 * bfq_calc_finish - assign the finish time to an entity.
288 * @entity: the entity to act upon.
289 * @service: the service to be charged to the entity.
290 */
bfq_calc_finish(struct bfq_entity * entity,unsigned long service)291 static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
292 {
293 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
294
295 entity->finish = entity->start +
296 bfq_delta(service, entity->weight);
297
298 if (bfqq) {
299 bfq_log_bfqq(bfqq->bfqd, bfqq,
300 "calc_finish: serv %lu, w %d",
301 service, entity->weight);
302 bfq_log_bfqq(bfqq->bfqd, bfqq,
303 "calc_finish: start %llu, finish %llu, delta %llu",
304 entity->start, entity->finish,
305 bfq_delta(service, entity->weight));
306 }
307 }
308
309 /**
310 * bfq_entity_of - get an entity from a node.
311 * @node: the node field of the entity.
312 *
313 * Convert a node pointer to the relative entity. This is used only
314 * to simplify the logic of some functions and not as the generic
315 * conversion mechanism because, e.g., in the tree walking functions,
316 * the check for a %NULL value would be redundant.
317 */
bfq_entity_of(struct rb_node * node)318 struct bfq_entity *bfq_entity_of(struct rb_node *node)
319 {
320 struct bfq_entity *entity = NULL;
321
322 if (node)
323 entity = rb_entry(node, struct bfq_entity, rb_node);
324
325 return entity;
326 }
327
328 /**
329 * bfq_extract - remove an entity from a tree.
330 * @root: the tree root.
331 * @entity: the entity to remove.
332 */
bfq_extract(struct rb_root * root,struct bfq_entity * entity)333 static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
334 {
335 entity->tree = NULL;
336 rb_erase(&entity->rb_node, root);
337 }
338
339 /**
340 * bfq_idle_extract - extract an entity from the idle tree.
341 * @st: the service tree of the owning @entity.
342 * @entity: the entity being removed.
343 */
bfq_idle_extract(struct bfq_service_tree * st,struct bfq_entity * entity)344 static void bfq_idle_extract(struct bfq_service_tree *st,
345 struct bfq_entity *entity)
346 {
347 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
348 struct rb_node *next;
349
350 if (entity == st->first_idle) {
351 next = rb_next(&entity->rb_node);
352 st->first_idle = bfq_entity_of(next);
353 }
354
355 if (entity == st->last_idle) {
356 next = rb_prev(&entity->rb_node);
357 st->last_idle = bfq_entity_of(next);
358 }
359
360 bfq_extract(&st->idle, entity);
361
362 if (bfqq)
363 list_del(&bfqq->bfqq_list);
364 }
365
366 /**
367 * bfq_insert - generic tree insertion.
368 * @root: tree root.
369 * @entity: entity to insert.
370 *
371 * This is used for the idle and the active tree, since they are both
372 * ordered by finish time.
373 */
bfq_insert(struct rb_root * root,struct bfq_entity * entity)374 static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
375 {
376 struct bfq_entity *entry;
377 struct rb_node **node = &root->rb_node;
378 struct rb_node *parent = NULL;
379
380 while (*node) {
381 parent = *node;
382 entry = rb_entry(parent, struct bfq_entity, rb_node);
383
384 if (bfq_gt(entry->finish, entity->finish))
385 node = &parent->rb_left;
386 else
387 node = &parent->rb_right;
388 }
389
390 rb_link_node(&entity->rb_node, parent, node);
391 rb_insert_color(&entity->rb_node, root);
392
393 entity->tree = root;
394 }
395
396 /**
397 * bfq_update_min - update the min_start field of a entity.
398 * @entity: the entity to update.
399 * @node: one of its children.
400 *
401 * This function is called when @entity may store an invalid value for
402 * min_start due to updates to the active tree. The function assumes
403 * that the subtree rooted at @node (which may be its left or its right
404 * child) has a valid min_start value.
405 */
bfq_update_min(struct bfq_entity * entity,struct rb_node * node)406 static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
407 {
408 struct bfq_entity *child;
409
410 if (node) {
411 child = rb_entry(node, struct bfq_entity, rb_node);
412 if (bfq_gt(entity->min_start, child->min_start))
413 entity->min_start = child->min_start;
414 }
415 }
416
417 /**
418 * bfq_update_active_node - recalculate min_start.
419 * @node: the node to update.
420 *
421 * @node may have changed position or one of its children may have moved,
422 * this function updates its min_start value. The left and right subtrees
423 * are assumed to hold a correct min_start value.
424 */
bfq_update_active_node(struct rb_node * node)425 static void bfq_update_active_node(struct rb_node *node)
426 {
427 struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
428
429 entity->min_start = entity->start;
430 bfq_update_min(entity, node->rb_right);
431 bfq_update_min(entity, node->rb_left);
432 }
433
434 /**
435 * bfq_update_active_tree - update min_start for the whole active tree.
436 * @node: the starting node.
437 *
438 * @node must be the deepest modified node after an update. This function
439 * updates its min_start using the values held by its children, assuming
440 * that they did not change, and then updates all the nodes that may have
441 * changed in the path to the root. The only nodes that may have changed
442 * are the ones in the path or their siblings.
443 */
bfq_update_active_tree(struct rb_node * node)444 static void bfq_update_active_tree(struct rb_node *node)
445 {
446 struct rb_node *parent;
447
448 up:
449 bfq_update_active_node(node);
450
451 parent = rb_parent(node);
452 if (!parent)
453 return;
454
455 if (node == parent->rb_left && parent->rb_right)
456 bfq_update_active_node(parent->rb_right);
457 else if (parent->rb_left)
458 bfq_update_active_node(parent->rb_left);
459
460 node = parent;
461 goto up;
462 }
463
464 /**
465 * bfq_active_insert - insert an entity in the active tree of its
466 * group/device.
467 * @st: the service tree of the entity.
468 * @entity: the entity being inserted.
469 *
470 * The active tree is ordered by finish time, but an extra key is kept
471 * per each node, containing the minimum value for the start times of
472 * its children (and the node itself), so it's possible to search for
473 * the eligible node with the lowest finish time in logarithmic time.
474 */
bfq_active_insert(struct bfq_service_tree * st,struct bfq_entity * entity)475 static void bfq_active_insert(struct bfq_service_tree *st,
476 struct bfq_entity *entity)
477 {
478 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
479 struct rb_node *node = &entity->rb_node;
480 #ifdef CONFIG_BFQ_GROUP_IOSCHED
481 struct bfq_sched_data *sd = NULL;
482 struct bfq_group *bfqg = NULL;
483 struct bfq_data *bfqd = NULL;
484 #endif
485
486 bfq_insert(&st->active, entity);
487
488 if (node->rb_left)
489 node = node->rb_left;
490 else if (node->rb_right)
491 node = node->rb_right;
492
493 bfq_update_active_tree(node);
494
495 #ifdef CONFIG_BFQ_GROUP_IOSCHED
496 sd = entity->sched_data;
497 bfqg = container_of(sd, struct bfq_group, sched_data);
498 bfqd = (struct bfq_data *)bfqg->bfqd;
499 #endif
500 if (bfqq)
501 list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
502 #ifdef CONFIG_BFQ_GROUP_IOSCHED
503 if (bfqg != bfqd->root_group)
504 bfqg->active_entities++;
505 #endif
506 }
507
508 /**
509 * bfq_ioprio_to_weight - calc a weight from an ioprio.
510 * @ioprio: the ioprio value to convert.
511 */
bfq_ioprio_to_weight(int ioprio)512 unsigned short bfq_ioprio_to_weight(int ioprio)
513 {
514 return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
515 }
516
517 /**
518 * bfq_weight_to_ioprio - calc an ioprio from a weight.
519 * @weight: the weight value to convert.
520 *
521 * To preserve as much as possible the old only-ioprio user interface,
522 * 0 is used as an escape ioprio value for weights (numerically) equal or
523 * larger than IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF.
524 */
bfq_weight_to_ioprio(int weight)525 static unsigned short bfq_weight_to_ioprio(int weight)
526 {
527 return max_t(int, 0,
528 IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight);
529 }
530
bfq_get_entity(struct bfq_entity * entity)531 static void bfq_get_entity(struct bfq_entity *entity)
532 {
533 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
534
535 if (bfqq) {
536 bfqq->ref++;
537 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
538 bfqq, bfqq->ref);
539 }
540 }
541
542 /**
543 * bfq_find_deepest - find the deepest node that an extraction can modify.
544 * @node: the node being removed.
545 *
546 * Do the first step of an extraction in an rb tree, looking for the
547 * node that will replace @node, and returning the deepest node that
548 * the following modifications to the tree can touch. If @node is the
549 * last node in the tree return %NULL.
550 */
bfq_find_deepest(struct rb_node * node)551 static struct rb_node *bfq_find_deepest(struct rb_node *node)
552 {
553 struct rb_node *deepest;
554
555 if (!node->rb_right && !node->rb_left)
556 deepest = rb_parent(node);
557 else if (!node->rb_right)
558 deepest = node->rb_left;
559 else if (!node->rb_left)
560 deepest = node->rb_right;
561 else {
562 deepest = rb_next(node);
563 if (deepest->rb_right)
564 deepest = deepest->rb_right;
565 else if (rb_parent(deepest) != node)
566 deepest = rb_parent(deepest);
567 }
568
569 return deepest;
570 }
571
572 /**
573 * bfq_active_extract - remove an entity from the active tree.
574 * @st: the service_tree containing the tree.
575 * @entity: the entity being removed.
576 */
bfq_active_extract(struct bfq_service_tree * st,struct bfq_entity * entity)577 static void bfq_active_extract(struct bfq_service_tree *st,
578 struct bfq_entity *entity)
579 {
580 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
581 struct rb_node *node;
582 #ifdef CONFIG_BFQ_GROUP_IOSCHED
583 struct bfq_sched_data *sd = NULL;
584 struct bfq_group *bfqg = NULL;
585 struct bfq_data *bfqd = NULL;
586 #endif
587
588 node = bfq_find_deepest(&entity->rb_node);
589 bfq_extract(&st->active, entity);
590
591 if (node)
592 bfq_update_active_tree(node);
593
594 #ifdef CONFIG_BFQ_GROUP_IOSCHED
595 sd = entity->sched_data;
596 bfqg = container_of(sd, struct bfq_group, sched_data);
597 bfqd = (struct bfq_data *)bfqg->bfqd;
598 #endif
599 if (bfqq)
600 list_del(&bfqq->bfqq_list);
601 #ifdef CONFIG_BFQ_GROUP_IOSCHED
602 if (bfqg != bfqd->root_group)
603 bfqg->active_entities--;
604 #endif
605 }
606
607 /**
608 * bfq_idle_insert - insert an entity into the idle tree.
609 * @st: the service tree containing the tree.
610 * @entity: the entity to insert.
611 */
bfq_idle_insert(struct bfq_service_tree * st,struct bfq_entity * entity)612 static void bfq_idle_insert(struct bfq_service_tree *st,
613 struct bfq_entity *entity)
614 {
615 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
616 struct bfq_entity *first_idle = st->first_idle;
617 struct bfq_entity *last_idle = st->last_idle;
618
619 if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
620 st->first_idle = entity;
621 if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
622 st->last_idle = entity;
623
624 bfq_insert(&st->idle, entity);
625
626 if (bfqq)
627 list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
628 }
629
630 /**
631 * bfq_forget_entity - do not consider entity any longer for scheduling
632 * @st: the service tree.
633 * @entity: the entity being removed.
634 * @is_in_service: true if entity is currently the in-service entity.
635 *
636 * Forget everything about @entity. In addition, if entity represents
637 * a queue, and the latter is not in service, then release the service
638 * reference to the queue (the one taken through bfq_get_entity). In
639 * fact, in this case, there is really no more service reference to
640 * the queue, as the latter is also outside any service tree. If,
641 * instead, the queue is in service, then __bfq_bfqd_reset_in_service
642 * will take care of putting the reference when the queue finally
643 * stops being served.
644 */
bfq_forget_entity(struct bfq_service_tree * st,struct bfq_entity * entity,bool is_in_service)645 static void bfq_forget_entity(struct bfq_service_tree *st,
646 struct bfq_entity *entity,
647 bool is_in_service)
648 {
649 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
650
651 entity->on_st = false;
652 st->wsum -= entity->weight;
653 if (bfqq && !is_in_service)
654 bfq_put_queue(bfqq);
655 }
656
657 /**
658 * bfq_put_idle_entity - release the idle tree ref of an entity.
659 * @st: service tree for the entity.
660 * @entity: the entity being released.
661 */
bfq_put_idle_entity(struct bfq_service_tree * st,struct bfq_entity * entity)662 void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
663 {
664 bfq_idle_extract(st, entity);
665 bfq_forget_entity(st, entity,
666 entity == entity->sched_data->in_service_entity);
667 }
668
669 /**
670 * bfq_forget_idle - update the idle tree if necessary.
671 * @st: the service tree to act upon.
672 *
673 * To preserve the global O(log N) complexity we only remove one entry here;
674 * as the idle tree will not grow indefinitely this can be done safely.
675 */
bfq_forget_idle(struct bfq_service_tree * st)676 static void bfq_forget_idle(struct bfq_service_tree *st)
677 {
678 struct bfq_entity *first_idle = st->first_idle;
679 struct bfq_entity *last_idle = st->last_idle;
680
681 if (RB_EMPTY_ROOT(&st->active) && last_idle &&
682 !bfq_gt(last_idle->finish, st->vtime)) {
683 /*
684 * Forget the whole idle tree, increasing the vtime past
685 * the last finish time of idle entities.
686 */
687 st->vtime = last_idle->finish;
688 }
689
690 if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
691 bfq_put_idle_entity(st, first_idle);
692 }
693
bfq_entity_service_tree(struct bfq_entity * entity)694 struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
695 {
696 struct bfq_sched_data *sched_data = entity->sched_data;
697 unsigned int idx = bfq_class_idx(entity);
698
699 return sched_data->service_tree + idx;
700 }
701
702 /*
703 * Update weight and priority of entity. If update_class_too is true,
704 * then update the ioprio_class of entity too.
705 *
706 * The reason why the update of ioprio_class is controlled through the
707 * last parameter is as follows. Changing the ioprio class of an
708 * entity implies changing the destination service trees for that
709 * entity. If such a change occurred when the entity is already on one
710 * of the service trees for its previous class, then the state of the
711 * entity would become more complex: none of the new possible service
712 * trees for the entity, according to bfq_entity_service_tree(), would
713 * match any of the possible service trees on which the entity
714 * is. Complex operations involving these trees, such as entity
715 * activations and deactivations, should take into account this
716 * additional complexity. To avoid this issue, this function is
717 * invoked with update_class_too unset in the points in the code where
718 * entity may happen to be on some tree.
719 */
720 struct bfq_service_tree *
__bfq_entity_update_weight_prio(struct bfq_service_tree * old_st,struct bfq_entity * entity,bool update_class_too)721 __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
722 struct bfq_entity *entity,
723 bool update_class_too)
724 {
725 struct bfq_service_tree *new_st = old_st;
726
727 if (entity->prio_changed) {
728 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
729 unsigned int prev_weight, new_weight;
730 struct bfq_data *bfqd = NULL;
731 struct rb_root_cached *root;
732 #ifdef CONFIG_BFQ_GROUP_IOSCHED
733 struct bfq_sched_data *sd;
734 struct bfq_group *bfqg;
735 #endif
736
737 if (bfqq)
738 bfqd = bfqq->bfqd;
739 #ifdef CONFIG_BFQ_GROUP_IOSCHED
740 else {
741 sd = entity->my_sched_data;
742 bfqg = container_of(sd, struct bfq_group, sched_data);
743 bfqd = (struct bfq_data *)bfqg->bfqd;
744 }
745 #endif
746
747 /* Matches the smp_wmb() in bfq_group_set_weight. */
748 smp_rmb();
749 old_st->wsum -= entity->weight;
750
751 if (entity->new_weight != entity->orig_weight) {
752 if (entity->new_weight < BFQ_MIN_WEIGHT ||
753 entity->new_weight > BFQ_MAX_WEIGHT) {
754 pr_crit("update_weight_prio: new_weight %d\n",
755 entity->new_weight);
756 if (entity->new_weight < BFQ_MIN_WEIGHT)
757 entity->new_weight = BFQ_MIN_WEIGHT;
758 else
759 entity->new_weight = BFQ_MAX_WEIGHT;
760 }
761 entity->orig_weight = entity->new_weight;
762 if (bfqq)
763 bfqq->ioprio =
764 bfq_weight_to_ioprio(entity->orig_weight);
765 }
766
767 if (bfqq && update_class_too)
768 bfqq->ioprio_class = bfqq->new_ioprio_class;
769
770 /*
771 * Reset prio_changed only if the ioprio_class change
772 * is not pending any longer.
773 */
774 if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
775 entity->prio_changed = 0;
776
777 /*
778 * NOTE: here we may be changing the weight too early,
779 * this will cause unfairness. The correct approach
780 * would have required additional complexity to defer
781 * weight changes to the proper time instants (i.e.,
782 * when entity->finish <= old_st->vtime).
783 */
784 new_st = bfq_entity_service_tree(entity);
785
786 prev_weight = entity->weight;
787 new_weight = entity->orig_weight *
788 (bfqq ? bfqq->wr_coeff : 1);
789 /*
790 * If the weight of the entity changes, and the entity is a
791 * queue, remove the entity from its old weight counter (if
792 * there is a counter associated with the entity).
793 */
794 if (prev_weight != new_weight && bfqq) {
795 root = &bfqd->queue_weights_tree;
796 __bfq_weights_tree_remove(bfqd, bfqq, root);
797 }
798 entity->weight = new_weight;
799 /*
800 * Add the entity, if it is not a weight-raised queue,
801 * to the counter associated with its new weight.
802 */
803 if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
804 /* If we get here, root has been initialized. */
805 bfq_weights_tree_add(bfqd, bfqq, root);
806 }
807
808 new_st->wsum += entity->weight;
809
810 if (new_st != old_st)
811 entity->start = new_st->vtime;
812 }
813
814 return new_st;
815 }
816
817 /**
818 * bfq_bfqq_served - update the scheduler status after selection for
819 * service.
820 * @bfqq: the queue being served.
821 * @served: bytes to transfer.
822 *
823 * NOTE: this can be optimized, as the timestamps of upper level entities
824 * are synchronized every time a new bfqq is selected for service. By now,
825 * we keep it to better check consistency.
826 */
bfq_bfqq_served(struct bfq_queue * bfqq,int served)827 void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
828 {
829 struct bfq_entity *entity = &bfqq->entity;
830 struct bfq_service_tree *st;
831
832 if (!bfqq->service_from_backlogged)
833 bfqq->first_IO_time = jiffies;
834
835 if (bfqq->wr_coeff > 1)
836 bfqq->service_from_wr += served;
837
838 bfqq->service_from_backlogged += served;
839 for_each_entity(entity) {
840 st = bfq_entity_service_tree(entity);
841
842 entity->service += served;
843
844 st->vtime += bfq_delta(served, st->wsum);
845 bfq_forget_idle(st);
846 }
847 bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
848 }
849
850 /**
851 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
852 * of the time interval during which bfqq has been in
853 * service.
854 * @bfqd: the device
855 * @bfqq: the queue that needs a service update.
856 * @time_ms: the amount of time during which the queue has received service
857 *
858 * If a queue does not consume its budget fast enough, then providing
859 * the queue with service fairness may impair throughput, more or less
860 * severely. For this reason, queues that consume their budget slowly
861 * are provided with time fairness instead of service fairness. This
862 * goal is achieved through the BFQ scheduling engine, even if such an
863 * engine works in the service, and not in the time domain. The trick
864 * is charging these queues with an inflated amount of service, equal
865 * to the amount of service that they would have received during their
866 * service slot if they had been fast, i.e., if their requests had
867 * been dispatched at a rate equal to the estimated peak rate.
868 *
869 * It is worth noting that time fairness can cause important
870 * distortions in terms of bandwidth distribution, on devices with
871 * internal queueing. The reason is that I/O requests dispatched
872 * during the service slot of a queue may be served after that service
873 * slot is finished, and may have a total processing time loosely
874 * correlated with the duration of the service slot. This is
875 * especially true for short service slots.
876 */
bfq_bfqq_charge_time(struct bfq_data * bfqd,struct bfq_queue * bfqq,unsigned long time_ms)877 void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
878 unsigned long time_ms)
879 {
880 struct bfq_entity *entity = &bfqq->entity;
881 unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
882 unsigned long bounded_time_ms = min(time_ms, timeout_ms);
883 int serv_to_charge_for_time =
884 (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
885 int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
886
887 /* Increase budget to avoid inconsistencies */
888 if (tot_serv_to_charge > entity->budget)
889 entity->budget = tot_serv_to_charge;
890
891 bfq_bfqq_served(bfqq,
892 max_t(int, 0, tot_serv_to_charge - entity->service));
893 }
894
bfq_update_fin_time_enqueue(struct bfq_entity * entity,struct bfq_service_tree * st,bool backshifted)895 static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
896 struct bfq_service_tree *st,
897 bool backshifted)
898 {
899 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
900
901 /*
902 * When this function is invoked, entity is not in any service
903 * tree, then it is safe to invoke next function with the last
904 * parameter set (see the comments on the function).
905 */
906 st = __bfq_entity_update_weight_prio(st, entity, true);
907 bfq_calc_finish(entity, entity->budget);
908
909 /*
910 * If some queues enjoy backshifting for a while, then their
911 * (virtual) finish timestamps may happen to become lower and
912 * lower than the system virtual time. In particular, if
913 * these queues often happen to be idle for short time
914 * periods, and during such time periods other queues with
915 * higher timestamps happen to be busy, then the backshifted
916 * timestamps of the former queues can become much lower than
917 * the system virtual time. In fact, to serve the queues with
918 * higher timestamps while the ones with lower timestamps are
919 * idle, the system virtual time may be pushed-up to much
920 * higher values than the finish timestamps of the idle
921 * queues. As a consequence, the finish timestamps of all new
922 * or newly activated queues may end up being much larger than
923 * those of lucky queues with backshifted timestamps. The
924 * latter queues may then monopolize the device for a lot of
925 * time. This would simply break service guarantees.
926 *
927 * To reduce this problem, push up a little bit the
928 * backshifted timestamps of the queue associated with this
929 * entity (only a queue can happen to have the backshifted
930 * flag set): just enough to let the finish timestamp of the
931 * queue be equal to the current value of the system virtual
932 * time. This may introduce a little unfairness among queues
933 * with backshifted timestamps, but it does not break
934 * worst-case fairness guarantees.
935 *
936 * As a special case, if bfqq is weight-raised, push up
937 * timestamps much less, to keep very low the probability that
938 * this push up causes the backshifted finish timestamps of
939 * weight-raised queues to become higher than the backshifted
940 * finish timestamps of non weight-raised queues.
941 */
942 if (backshifted && bfq_gt(st->vtime, entity->finish)) {
943 unsigned long delta = st->vtime - entity->finish;
944
945 if (bfqq)
946 delta /= bfqq->wr_coeff;
947
948 entity->start += delta;
949 entity->finish += delta;
950 }
951
952 bfq_active_insert(st, entity);
953 }
954
955 /**
956 * __bfq_activate_entity - handle activation of entity.
957 * @entity: the entity being activated.
958 * @non_blocking_wait_rq: true if entity was waiting for a request
959 *
960 * Called for a 'true' activation, i.e., if entity is not active and
961 * one of its children receives a new request.
962 *
963 * Basically, this function updates the timestamps of entity and
964 * inserts entity into its active tree, after possibly extracting it
965 * from its idle tree.
966 */
__bfq_activate_entity(struct bfq_entity * entity,bool non_blocking_wait_rq)967 static void __bfq_activate_entity(struct bfq_entity *entity,
968 bool non_blocking_wait_rq)
969 {
970 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
971 bool backshifted = false;
972 unsigned long long min_vstart;
973
974 /* See comments on bfq_fqq_update_budg_for_activation */
975 if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
976 backshifted = true;
977 min_vstart = entity->finish;
978 } else
979 min_vstart = st->vtime;
980
981 if (entity->tree == &st->idle) {
982 /*
983 * Must be on the idle tree, bfq_idle_extract() will
984 * check for that.
985 */
986 bfq_idle_extract(st, entity);
987 entity->start = bfq_gt(min_vstart, entity->finish) ?
988 min_vstart : entity->finish;
989 } else {
990 /*
991 * The finish time of the entity may be invalid, and
992 * it is in the past for sure, otherwise the queue
993 * would have been on the idle tree.
994 */
995 entity->start = min_vstart;
996 st->wsum += entity->weight;
997 /*
998 * entity is about to be inserted into a service tree,
999 * and then set in service: get a reference to make
1000 * sure entity does not disappear until it is no
1001 * longer in service or scheduled for service.
1002 */
1003 bfq_get_entity(entity);
1004
1005 entity->on_st = true;
1006 }
1007
1008 #ifdef CONFIG_BFQ_GROUP_IOSCHED
1009 if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
1010 struct bfq_group *bfqg =
1011 container_of(entity, struct bfq_group, entity);
1012 struct bfq_data *bfqd = bfqg->bfqd;
1013
1014 if (!entity->in_groups_with_pending_reqs) {
1015 entity->in_groups_with_pending_reqs = true;
1016 bfqd->num_groups_with_pending_reqs++;
1017 }
1018 }
1019 #endif
1020
1021 bfq_update_fin_time_enqueue(entity, st, backshifted);
1022 }
1023
1024 /**
1025 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1026 * @entity: the entity being requeued or repositioned.
1027 *
1028 * Requeueing is needed if this entity stops being served, which
1029 * happens if a leaf descendant entity has expired. On the other hand,
1030 * repositioning is needed if the next_inservice_entity for the child
1031 * entity has changed. See the comments inside the function for
1032 * details.
1033 *
1034 * Basically, this function: 1) removes entity from its active tree if
1035 * present there, 2) updates the timestamps of entity and 3) inserts
1036 * entity back into its active tree (in the new, right position for
1037 * the new values of the timestamps).
1038 */
__bfq_requeue_entity(struct bfq_entity * entity)1039 static void __bfq_requeue_entity(struct bfq_entity *entity)
1040 {
1041 struct bfq_sched_data *sd = entity->sched_data;
1042 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1043
1044 if (entity == sd->in_service_entity) {
1045 /*
1046 * We are requeueing the current in-service entity,
1047 * which may have to be done for one of the following
1048 * reasons:
1049 * - entity represents the in-service queue, and the
1050 * in-service queue is being requeued after an
1051 * expiration;
1052 * - entity represents a group, and its budget has
1053 * changed because one of its child entities has
1054 * just been either activated or requeued for some
1055 * reason; the timestamps of the entity need then to
1056 * be updated, and the entity needs to be enqueued
1057 * or repositioned accordingly.
1058 *
1059 * In particular, before requeueing, the start time of
1060 * the entity must be moved forward to account for the
1061 * service that the entity has received while in
1062 * service. This is done by the next instructions. The
1063 * finish time will then be updated according to this
1064 * new value of the start time, and to the budget of
1065 * the entity.
1066 */
1067 bfq_calc_finish(entity, entity->service);
1068 entity->start = entity->finish;
1069 /*
1070 * In addition, if the entity had more than one child
1071 * when set in service, then it was not extracted from
1072 * the active tree. This implies that the position of
1073 * the entity in the active tree may need to be
1074 * changed now, because we have just updated the start
1075 * time of the entity, and we will update its finish
1076 * time in a moment (the requeueing is then, more
1077 * precisely, a repositioning in this case). To
1078 * implement this repositioning, we: 1) dequeue the
1079 * entity here, 2) update the finish time and requeue
1080 * the entity according to the new timestamps below.
1081 */
1082 if (entity->tree)
1083 bfq_active_extract(st, entity);
1084 } else { /* The entity is already active, and not in service */
1085 /*
1086 * In this case, this function gets called only if the
1087 * next_in_service entity below this entity has
1088 * changed, and this change has caused the budget of
1089 * this entity to change, which, finally implies that
1090 * the finish time of this entity must be
1091 * updated. Such an update may cause the scheduling,
1092 * i.e., the position in the active tree, of this
1093 * entity to change. We handle this change by: 1)
1094 * dequeueing the entity here, 2) updating the finish
1095 * time and requeueing the entity according to the new
1096 * timestamps below. This is the same approach as the
1097 * non-extracted-entity sub-case above.
1098 */
1099 bfq_active_extract(st, entity);
1100 }
1101
1102 bfq_update_fin_time_enqueue(entity, st, false);
1103 }
1104
__bfq_activate_requeue_entity(struct bfq_entity * entity,struct bfq_sched_data * sd,bool non_blocking_wait_rq)1105 static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1106 struct bfq_sched_data *sd,
1107 bool non_blocking_wait_rq)
1108 {
1109 struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1110
1111 if (sd->in_service_entity == entity || entity->tree == &st->active)
1112 /*
1113 * in service or already queued on the active tree,
1114 * requeue or reposition
1115 */
1116 __bfq_requeue_entity(entity);
1117 else
1118 /*
1119 * Not in service and not queued on its active tree:
1120 * the activity is idle and this is a true activation.
1121 */
1122 __bfq_activate_entity(entity, non_blocking_wait_rq);
1123 }
1124
1125
1126 /**
1127 * bfq_activate_requeue_entity - activate or requeue an entity representing a
1128 * bfq_queue, and activate, requeue or reposition
1129 * all ancestors for which such an update becomes
1130 * necessary.
1131 * @entity: the entity to activate.
1132 * @non_blocking_wait_rq: true if this entity was waiting for a request
1133 * @requeue: true if this is a requeue, which implies that bfqq is
1134 * being expired; thus ALL its ancestors stop being served and must
1135 * therefore be requeued
1136 * @expiration: true if this function is being invoked in the expiration path
1137 * of the in-service queue
1138 */
bfq_activate_requeue_entity(struct bfq_entity * entity,bool non_blocking_wait_rq,bool requeue,bool expiration)1139 static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1140 bool non_blocking_wait_rq,
1141 bool requeue, bool expiration)
1142 {
1143 struct bfq_sched_data *sd;
1144
1145 for_each_entity(entity) {
1146 sd = entity->sched_data;
1147 __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1148
1149 if (!bfq_update_next_in_service(sd, entity, expiration) &&
1150 !requeue)
1151 break;
1152 }
1153 }
1154
1155 /**
1156 * __bfq_deactivate_entity - update sched_data and service trees for
1157 * entity, so as to represent entity as inactive
1158 * @entity: the entity being deactivated.
1159 * @ins_into_idle_tree: if false, the entity will not be put into the
1160 * idle tree.
1161 *
1162 * If necessary and allowed, puts entity into the idle tree. NOTE:
1163 * entity may be on no tree if in service.
1164 */
__bfq_deactivate_entity(struct bfq_entity * entity,bool ins_into_idle_tree)1165 bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1166 {
1167 struct bfq_sched_data *sd = entity->sched_data;
1168 struct bfq_service_tree *st;
1169 bool is_in_service;
1170
1171 if (!entity->on_st) /* entity never activated, or already inactive */
1172 return false;
1173
1174 /*
1175 * If we get here, then entity is active, which implies that
1176 * bfq_group_set_parent has already been invoked for the group
1177 * represented by entity. Therefore, the field
1178 * entity->sched_data has been set, and we can safely use it.
1179 */
1180 st = bfq_entity_service_tree(entity);
1181 is_in_service = entity == sd->in_service_entity;
1182
1183 bfq_calc_finish(entity, entity->service);
1184
1185 if (is_in_service)
1186 sd->in_service_entity = NULL;
1187 else
1188 /*
1189 * Non in-service entity: nobody will take care of
1190 * resetting its service counter on expiration. Do it
1191 * now.
1192 */
1193 entity->service = 0;
1194
1195 if (entity->tree == &st->active)
1196 bfq_active_extract(st, entity);
1197 else if (!is_in_service && entity->tree == &st->idle)
1198 bfq_idle_extract(st, entity);
1199
1200 if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1201 bfq_forget_entity(st, entity, is_in_service);
1202 else
1203 bfq_idle_insert(st, entity);
1204
1205 return true;
1206 }
1207
1208 /**
1209 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1210 * @entity: the entity to deactivate.
1211 * @ins_into_idle_tree: true if the entity can be put into the idle tree
1212 * @expiration: true if this function is being invoked in the expiration path
1213 * of the in-service queue
1214 */
bfq_deactivate_entity(struct bfq_entity * entity,bool ins_into_idle_tree,bool expiration)1215 static void bfq_deactivate_entity(struct bfq_entity *entity,
1216 bool ins_into_idle_tree,
1217 bool expiration)
1218 {
1219 struct bfq_sched_data *sd;
1220 struct bfq_entity *parent = NULL;
1221
1222 for_each_entity_safe(entity, parent) {
1223 sd = entity->sched_data;
1224
1225 if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1226 /*
1227 * entity is not in any tree any more, so
1228 * this deactivation is a no-op, and there is
1229 * nothing to change for upper-level entities
1230 * (in case of expiration, this can never
1231 * happen).
1232 */
1233 return;
1234 }
1235
1236 if (sd->next_in_service == entity)
1237 /*
1238 * entity was the next_in_service entity,
1239 * then, since entity has just been
1240 * deactivated, a new one must be found.
1241 */
1242 bfq_update_next_in_service(sd, NULL, expiration);
1243
1244 if (sd->next_in_service || sd->in_service_entity) {
1245 /*
1246 * The parent entity is still active, because
1247 * either next_in_service or in_service_entity
1248 * is not NULL. So, no further upwards
1249 * deactivation must be performed. Yet,
1250 * next_in_service has changed. Then the
1251 * schedule does need to be updated upwards.
1252 *
1253 * NOTE If in_service_entity is not NULL, then
1254 * next_in_service may happen to be NULL,
1255 * although the parent entity is evidently
1256 * active. This happens if 1) the entity
1257 * pointed by in_service_entity is the only
1258 * active entity in the parent entity, and 2)
1259 * according to the definition of
1260 * next_in_service, the in_service_entity
1261 * cannot be considered as
1262 * next_in_service. See the comments on the
1263 * definition of next_in_service for details.
1264 */
1265 break;
1266 }
1267
1268 /*
1269 * If we get here, then the parent is no more
1270 * backlogged and we need to propagate the
1271 * deactivation upwards. Thus let the loop go on.
1272 */
1273
1274 /*
1275 * Also let parent be queued into the idle tree on
1276 * deactivation, to preserve service guarantees, and
1277 * assuming that who invoked this function does not
1278 * need parent entities too to be removed completely.
1279 */
1280 ins_into_idle_tree = true;
1281 }
1282
1283 /*
1284 * If the deactivation loop is fully executed, then there are
1285 * no more entities to touch and next loop is not executed at
1286 * all. Otherwise, requeue remaining entities if they are
1287 * about to stop receiving service, or reposition them if this
1288 * is not the case.
1289 */
1290 entity = parent;
1291 for_each_entity(entity) {
1292 /*
1293 * Invoke __bfq_requeue_entity on entity, even if
1294 * already active, to requeue/reposition it in the
1295 * active tree (because sd->next_in_service has
1296 * changed)
1297 */
1298 __bfq_requeue_entity(entity);
1299
1300 sd = entity->sched_data;
1301 if (!bfq_update_next_in_service(sd, entity, expiration) &&
1302 !expiration)
1303 /*
1304 * next_in_service unchanged or not causing
1305 * any change in entity->parent->sd, and no
1306 * requeueing needed for expiration: stop
1307 * here.
1308 */
1309 break;
1310 }
1311 }
1312
1313 /**
1314 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1315 * if needed, to have at least one entity eligible.
1316 * @st: the service tree to act upon.
1317 *
1318 * Assumes that st is not empty.
1319 */
bfq_calc_vtime_jump(struct bfq_service_tree * st)1320 static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1321 {
1322 struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1323
1324 if (bfq_gt(root_entity->min_start, st->vtime))
1325 return root_entity->min_start;
1326
1327 return st->vtime;
1328 }
1329
bfq_update_vtime(struct bfq_service_tree * st,u64 new_value)1330 static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1331 {
1332 if (new_value > st->vtime) {
1333 st->vtime = new_value;
1334 bfq_forget_idle(st);
1335 }
1336 }
1337
1338 /**
1339 * bfq_first_active_entity - find the eligible entity with
1340 * the smallest finish time
1341 * @st: the service tree to select from.
1342 * @vtime: the system virtual to use as a reference for eligibility
1343 *
1344 * This function searches the first schedulable entity, starting from the
1345 * root of the tree and going on the left every time on this side there is
1346 * a subtree with at least one eligible (start <= vtime) entity. The path on
1347 * the right is followed only if a) the left subtree contains no eligible
1348 * entities and b) no eligible entity has been found yet.
1349 */
bfq_first_active_entity(struct bfq_service_tree * st,u64 vtime)1350 static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1351 u64 vtime)
1352 {
1353 struct bfq_entity *entry, *first = NULL;
1354 struct rb_node *node = st->active.rb_node;
1355
1356 while (node) {
1357 entry = rb_entry(node, struct bfq_entity, rb_node);
1358 left:
1359 if (!bfq_gt(entry->start, vtime))
1360 first = entry;
1361
1362 if (node->rb_left) {
1363 entry = rb_entry(node->rb_left,
1364 struct bfq_entity, rb_node);
1365 if (!bfq_gt(entry->min_start, vtime)) {
1366 node = node->rb_left;
1367 goto left;
1368 }
1369 }
1370 if (first)
1371 break;
1372 node = node->rb_right;
1373 }
1374
1375 return first;
1376 }
1377
1378 /**
1379 * __bfq_lookup_next_entity - return the first eligible entity in @st.
1380 * @st: the service tree.
1381 *
1382 * If there is no in-service entity for the sched_data st belongs to,
1383 * then return the entity that will be set in service if:
1384 * 1) the parent entity this st belongs to is set in service;
1385 * 2) no entity belonging to such parent entity undergoes a state change
1386 * that would influence the timestamps of the entity (e.g., becomes idle,
1387 * becomes backlogged, changes its budget, ...).
1388 *
1389 * In this first case, update the virtual time in @st too (see the
1390 * comments on this update inside the function).
1391 *
1392 * In contrast, if there is an in-service entity, then return the
1393 * entity that would be set in service if not only the above
1394 * conditions, but also the next one held true: the currently
1395 * in-service entity, on expiration,
1396 * 1) gets a finish time equal to the current one, or
1397 * 2) is not eligible any more, or
1398 * 3) is idle.
1399 */
1400 static struct bfq_entity *
__bfq_lookup_next_entity(struct bfq_service_tree * st,bool in_service)1401 __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1402 {
1403 struct bfq_entity *entity;
1404 u64 new_vtime;
1405
1406 if (RB_EMPTY_ROOT(&st->active))
1407 return NULL;
1408
1409 /*
1410 * Get the value of the system virtual time for which at
1411 * least one entity is eligible.
1412 */
1413 new_vtime = bfq_calc_vtime_jump(st);
1414
1415 /*
1416 * If there is no in-service entity for the sched_data this
1417 * active tree belongs to, then push the system virtual time
1418 * up to the value that guarantees that at least one entity is
1419 * eligible. If, instead, there is an in-service entity, then
1420 * do not make any such update, because there is already an
1421 * eligible entity, namely the in-service one (even if the
1422 * entity is not on st, because it was extracted when set in
1423 * service).
1424 */
1425 if (!in_service)
1426 bfq_update_vtime(st, new_vtime);
1427
1428 entity = bfq_first_active_entity(st, new_vtime);
1429
1430 return entity;
1431 }
1432
1433 /**
1434 * bfq_lookup_next_entity - return the first eligible entity in @sd.
1435 * @sd: the sched_data.
1436 * @expiration: true if we are on the expiration path of the in-service queue
1437 *
1438 * This function is invoked when there has been a change in the trees
1439 * for sd, and we need to know what is the new next entity to serve
1440 * after this change.
1441 */
bfq_lookup_next_entity(struct bfq_sched_data * sd,bool expiration)1442 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
1443 bool expiration)
1444 {
1445 struct bfq_service_tree *st = sd->service_tree;
1446 struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1447 struct bfq_entity *entity = NULL;
1448 int class_idx = 0;
1449
1450 /*
1451 * Choose from idle class, if needed to guarantee a minimum
1452 * bandwidth to this class (and if there is some active entity
1453 * in idle class). This should also mitigate
1454 * priority-inversion problems in case a low priority task is
1455 * holding file system resources.
1456 */
1457 if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1458 BFQ_CL_IDLE_TIMEOUT)) {
1459 if (!RB_EMPTY_ROOT(&idle_class_st->active))
1460 class_idx = BFQ_IOPRIO_CLASSES - 1;
1461 /* About to be served if backlogged, or not yet backlogged */
1462 sd->bfq_class_idle_last_service = jiffies;
1463 }
1464
1465 /*
1466 * Find the next entity to serve for the highest-priority
1467 * class, unless the idle class needs to be served.
1468 */
1469 for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
1470 /*
1471 * If expiration is true, then bfq_lookup_next_entity
1472 * is being invoked as a part of the expiration path
1473 * of the in-service queue. In this case, even if
1474 * sd->in_service_entity is not NULL,
1475 * sd->in_service_entity at this point is actually not
1476 * in service any more, and, if needed, has already
1477 * been properly queued or requeued into the right
1478 * tree. The reason why sd->in_service_entity is still
1479 * not NULL here, even if expiration is true, is that
1480 * sd->in_service_entity is reset as a last step in the
1481 * expiration path. So, if expiration is true, tell
1482 * __bfq_lookup_next_entity that there is no
1483 * sd->in_service_entity.
1484 */
1485 entity = __bfq_lookup_next_entity(st + class_idx,
1486 sd->in_service_entity &&
1487 !expiration);
1488
1489 if (entity)
1490 break;
1491 }
1492
1493 if (!entity)
1494 return NULL;
1495
1496 return entity;
1497 }
1498
next_queue_may_preempt(struct bfq_data * bfqd)1499 bool next_queue_may_preempt(struct bfq_data *bfqd)
1500 {
1501 struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1502
1503 return sd->next_in_service != sd->in_service_entity;
1504 }
1505
1506 /*
1507 * Get next queue for service.
1508 */
bfq_get_next_queue(struct bfq_data * bfqd)1509 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1510 {
1511 struct bfq_entity *entity = NULL;
1512 struct bfq_sched_data *sd;
1513 struct bfq_queue *bfqq;
1514
1515 if (bfq_tot_busy_queues(bfqd) == 0)
1516 return NULL;
1517
1518 /*
1519 * Traverse the path from the root to the leaf entity to
1520 * serve. Set in service all the entities visited along the
1521 * way.
1522 */
1523 sd = &bfqd->root_group->sched_data;
1524 for (; sd ; sd = entity->my_sched_data) {
1525 /*
1526 * WARNING. We are about to set the in-service entity
1527 * to sd->next_in_service, i.e., to the (cached) value
1528 * returned by bfq_lookup_next_entity(sd) the last
1529 * time it was invoked, i.e., the last time when the
1530 * service order in sd changed as a consequence of the
1531 * activation or deactivation of an entity. In this
1532 * respect, if we execute bfq_lookup_next_entity(sd)
1533 * in this very moment, it may, although with low
1534 * probability, yield a different entity than that
1535 * pointed to by sd->next_in_service. This rare event
1536 * happens in case there was no CLASS_IDLE entity to
1537 * serve for sd when bfq_lookup_next_entity(sd) was
1538 * invoked for the last time, while there is now one
1539 * such entity.
1540 *
1541 * If the above event happens, then the scheduling of
1542 * such entity in CLASS_IDLE is postponed until the
1543 * service of the sd->next_in_service entity
1544 * finishes. In fact, when the latter is expired,
1545 * bfq_lookup_next_entity(sd) gets called again,
1546 * exactly to update sd->next_in_service.
1547 */
1548
1549 /* Make next_in_service entity become in_service_entity */
1550 entity = sd->next_in_service;
1551 sd->in_service_entity = entity;
1552
1553 /*
1554 * If entity is no longer a candidate for next
1555 * service, then it must be extracted from its active
1556 * tree, so as to make sure that it won't be
1557 * considered when computing next_in_service. See the
1558 * comments on the function
1559 * bfq_no_longer_next_in_service() for details.
1560 */
1561 if (bfq_no_longer_next_in_service(entity))
1562 bfq_active_extract(bfq_entity_service_tree(entity),
1563 entity);
1564
1565 /*
1566 * Even if entity is not to be extracted according to
1567 * the above check, a descendant entity may get
1568 * extracted in one of the next iterations of this
1569 * loop. Such an event could cause a change in
1570 * next_in_service for the level of the descendant
1571 * entity, and thus possibly back to this level.
1572 *
1573 * However, we cannot perform the resulting needed
1574 * update of next_in_service for this level before the
1575 * end of the whole loop, because, to know which is
1576 * the correct next-to-serve candidate entity for each
1577 * level, we need first to find the leaf entity to set
1578 * in service. In fact, only after we know which is
1579 * the next-to-serve leaf entity, we can discover
1580 * whether the parent entity of the leaf entity
1581 * becomes the next-to-serve, and so on.
1582 */
1583 }
1584
1585 bfqq = bfq_entity_to_bfqq(entity);
1586
1587 /*
1588 * We can finally update all next-to-serve entities along the
1589 * path from the leaf entity just set in service to the root.
1590 */
1591 for_each_entity(entity) {
1592 struct bfq_sched_data *sd = entity->sched_data;
1593
1594 if (!bfq_update_next_in_service(sd, NULL, false))
1595 break;
1596 }
1597
1598 return bfqq;
1599 }
1600
1601 /* returns true if the in-service queue gets freed */
__bfq_bfqd_reset_in_service(struct bfq_data * bfqd)1602 bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1603 {
1604 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1605 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1606 struct bfq_entity *entity = in_serv_entity;
1607
1608 bfq_clear_bfqq_wait_request(in_serv_bfqq);
1609 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1610 bfqd->in_service_queue = NULL;
1611
1612 /*
1613 * When this function is called, all in-service entities have
1614 * been properly deactivated or requeued, so we can safely
1615 * execute the final step: reset in_service_entity along the
1616 * path from entity to the root.
1617 */
1618 for_each_entity(entity)
1619 entity->sched_data->in_service_entity = NULL;
1620
1621 /*
1622 * in_serv_entity is no longer in service, so, if it is in no
1623 * service tree either, then release the service reference to
1624 * the queue it represents (taken with bfq_get_entity).
1625 */
1626 if (!in_serv_entity->on_st) {
1627 /*
1628 * If no process is referencing in_serv_bfqq any
1629 * longer, then the service reference may be the only
1630 * reference to the queue. If this is the case, then
1631 * bfqq gets freed here.
1632 */
1633 int ref = in_serv_bfqq->ref;
1634 bfq_put_queue(in_serv_bfqq);
1635 if (ref == 1)
1636 return true;
1637 }
1638
1639 return false;
1640 }
1641
bfq_deactivate_bfqq(struct bfq_data * bfqd,struct bfq_queue * bfqq,bool ins_into_idle_tree,bool expiration)1642 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1643 bool ins_into_idle_tree, bool expiration)
1644 {
1645 struct bfq_entity *entity = &bfqq->entity;
1646
1647 bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1648 }
1649
bfq_activate_bfqq(struct bfq_data * bfqd,struct bfq_queue * bfqq)1650 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1651 {
1652 struct bfq_entity *entity = &bfqq->entity;
1653
1654 bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
1655 false, false);
1656 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1657 }
1658
bfq_requeue_bfqq(struct bfq_data * bfqd,struct bfq_queue * bfqq,bool expiration)1659 void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1660 bool expiration)
1661 {
1662 struct bfq_entity *entity = &bfqq->entity;
1663
1664 bfq_activate_requeue_entity(entity, false,
1665 bfqq == bfqd->in_service_queue, expiration);
1666 }
1667
1668 /*
1669 * Called when the bfqq no longer has requests pending, remove it from
1670 * the service tree. As a special case, it can be invoked during an
1671 * expiration.
1672 */
bfq_del_bfqq_busy(struct bfq_data * bfqd,struct bfq_queue * bfqq,bool expiration)1673 void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1674 bool expiration)
1675 {
1676 bfq_log_bfqq(bfqd, bfqq, "del from busy");
1677
1678 bfq_clear_bfqq_busy(bfqq);
1679
1680 bfqd->busy_queues[bfqq->ioprio_class - 1]--;
1681
1682 if (bfqq->wr_coeff > 1)
1683 bfqd->wr_busy_queues--;
1684
1685 bfqg_stats_update_dequeue(bfqq_group(bfqq));
1686
1687 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1688
1689 if (!bfqq->dispatched)
1690 bfq_weights_tree_remove(bfqd, bfqq);
1691 }
1692
1693 /*
1694 * Called when an inactive queue receives a new request.
1695 */
bfq_add_bfqq_busy(struct bfq_data * bfqd,struct bfq_queue * bfqq)1696 void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1697 {
1698 bfq_log_bfqq(bfqd, bfqq, "add to busy");
1699
1700 bfq_activate_bfqq(bfqd, bfqq);
1701
1702 bfq_mark_bfqq_busy(bfqq);
1703 bfqd->busy_queues[bfqq->ioprio_class - 1]++;
1704
1705 if (!bfqq->dispatched)
1706 if (bfqq->wr_coeff == 1)
1707 bfq_weights_tree_add(bfqd, bfqq,
1708 &bfqd->queue_weights_tree);
1709
1710 if (bfqq->wr_coeff > 1)
1711 bfqd->wr_busy_queues++;
1712 }
1713