• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
4   * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
5   * scheduler schedules generic entities. The latter can represent
6   * either single bfq queues (associated with processes) or groups of
7   * bfq queues (associated with cgroups).
8   */
9  #include "bfq-iosched.h"
10  
11  /**
12   * bfq_gt - compare two timestamps.
13   * @a: first ts.
14   * @b: second ts.
15   *
16   * Return @a > @b, dealing with wrapping correctly.
17   */
bfq_gt(u64 a,u64 b)18  static int bfq_gt(u64 a, u64 b)
19  {
20  	return (s64)(a - b) > 0;
21  }
22  
bfq_root_active_entity(struct rb_root * tree)23  static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree)
24  {
25  	struct rb_node *node = tree->rb_node;
26  
27  	return rb_entry(node, struct bfq_entity, rb_node);
28  }
29  
bfq_class_idx(struct bfq_entity * entity)30  static unsigned int bfq_class_idx(struct bfq_entity *entity)
31  {
32  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
33  
34  	return bfqq ? bfqq->ioprio_class - 1 :
35  		BFQ_DEFAULT_GRP_CLASS - 1;
36  }
37  
bfq_tot_busy_queues(struct bfq_data * bfqd)38  unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
39  {
40  	return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
41  		bfqd->busy_queues[2];
42  }
43  
44  static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
45  						 bool expiration);
46  
47  static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
48  
49  /**
50   * bfq_update_next_in_service - update sd->next_in_service
51   * @sd: sched_data for which to perform the update.
52   * @new_entity: if not NULL, pointer to the entity whose activation,
53   *		requeueing or repositioning triggered the invocation of
54   *		this function.
55   * @expiration: id true, this function is being invoked after the
56   *             expiration of the in-service entity
57   *
58   * This function is called to update sd->next_in_service, which, in
59   * its turn, may change as a consequence of the insertion or
60   * extraction of an entity into/from one of the active trees of
61   * sd. These insertions/extractions occur as a consequence of
62   * activations/deactivations of entities, with some activations being
63   * 'true' activations, and other activations being requeueings (i.e.,
64   * implementing the second, requeueing phase of the mechanism used to
65   * reposition an entity in its active tree; see comments on
66   * __bfq_activate_entity and __bfq_requeue_entity for details). In
67   * both the last two activation sub-cases, new_entity points to the
68   * just activated or requeued entity.
69   *
70   * Returns true if sd->next_in_service changes in such a way that
71   * entity->parent may become the next_in_service for its parent
72   * entity.
73   */
bfq_update_next_in_service(struct bfq_sched_data * sd,struct bfq_entity * new_entity,bool expiration)74  static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
75  				       struct bfq_entity *new_entity,
76  				       bool expiration)
77  {
78  	struct bfq_entity *next_in_service = sd->next_in_service;
79  	bool parent_sched_may_change = false;
80  	bool change_without_lookup = false;
81  
82  	/*
83  	 * If this update is triggered by the activation, requeueing
84  	 * or repositioning of an entity that does not coincide with
85  	 * sd->next_in_service, then a full lookup in the active tree
86  	 * can be avoided. In fact, it is enough to check whether the
87  	 * just-modified entity has the same priority as
88  	 * sd->next_in_service, is eligible and has a lower virtual
89  	 * finish time than sd->next_in_service. If this compound
90  	 * condition holds, then the new entity becomes the new
91  	 * next_in_service. Otherwise no change is needed.
92  	 */
93  	if (new_entity && new_entity != sd->next_in_service) {
94  		/*
95  		 * Flag used to decide whether to replace
96  		 * sd->next_in_service with new_entity. Tentatively
97  		 * set to true, and left as true if
98  		 * sd->next_in_service is NULL.
99  		 */
100  		change_without_lookup = true;
101  
102  		/*
103  		 * If there is already a next_in_service candidate
104  		 * entity, then compare timestamps to decide whether
105  		 * to replace sd->service_tree with new_entity.
106  		 */
107  		if (next_in_service) {
108  			unsigned int new_entity_class_idx =
109  				bfq_class_idx(new_entity);
110  			struct bfq_service_tree *st =
111  				sd->service_tree + new_entity_class_idx;
112  
113  			change_without_lookup =
114  				(new_entity_class_idx ==
115  				 bfq_class_idx(next_in_service)
116  				 &&
117  				 !bfq_gt(new_entity->start, st->vtime)
118  				 &&
119  				 bfq_gt(next_in_service->finish,
120  					new_entity->finish));
121  		}
122  
123  		if (change_without_lookup)
124  			next_in_service = new_entity;
125  	}
126  
127  	if (!change_without_lookup) /* lookup needed */
128  		next_in_service = bfq_lookup_next_entity(sd, expiration);
129  
130  	if (next_in_service) {
131  		bool new_budget_triggers_change =
132  			bfq_update_parent_budget(next_in_service);
133  
134  		parent_sched_may_change = !sd->next_in_service ||
135  			new_budget_triggers_change;
136  	}
137  
138  	sd->next_in_service = next_in_service;
139  
140  	return parent_sched_may_change;
141  }
142  
143  #ifdef CONFIG_BFQ_GROUP_IOSCHED
144  
145  /*
146   * Returns true if this budget changes may let next_in_service->parent
147   * become the next_in_service entity for its parent entity.
148   */
bfq_update_parent_budget(struct bfq_entity * next_in_service)149  static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
150  {
151  	struct bfq_entity *bfqg_entity;
152  	struct bfq_group *bfqg;
153  	struct bfq_sched_data *group_sd;
154  	bool ret = false;
155  
156  	group_sd = next_in_service->sched_data;
157  
158  	bfqg = container_of(group_sd, struct bfq_group, sched_data);
159  	/*
160  	 * bfq_group's my_entity field is not NULL only if the group
161  	 * is not the root group. We must not touch the root entity
162  	 * as it must never become an in-service entity.
163  	 */
164  	bfqg_entity = bfqg->my_entity;
165  	if (bfqg_entity) {
166  		if (bfqg_entity->budget > next_in_service->budget)
167  			ret = true;
168  		bfqg_entity->budget = next_in_service->budget;
169  	}
170  
171  	return ret;
172  }
173  
174  /*
175   * This function tells whether entity stops being a candidate for next
176   * service, according to the restrictive definition of the field
177   * next_in_service. In particular, this function is invoked for an
178   * entity that is about to be set in service.
179   *
180   * If entity is a queue, then the entity is no longer a candidate for
181   * next service according to the that definition, because entity is
182   * about to become the in-service queue. This function then returns
183   * true if entity is a queue.
184   *
185   * In contrast, entity could still be a candidate for next service if
186   * it is not a queue, and has more than one active child. In fact,
187   * even if one of its children is about to be set in service, other
188   * active children may still be the next to serve, for the parent
189   * entity, even according to the above definition. As a consequence, a
190   * non-queue entity is not a candidate for next-service only if it has
191   * only one active child. And only if this condition holds, then this
192   * function returns true for a non-queue entity.
193   */
bfq_no_longer_next_in_service(struct bfq_entity * entity)194  static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
195  {
196  	struct bfq_group *bfqg;
197  
198  	if (bfq_entity_to_bfqq(entity))
199  		return true;
200  
201  	bfqg = container_of(entity, struct bfq_group, entity);
202  
203  	/*
204  	 * The field active_entities does not always contain the
205  	 * actual number of active children entities: it happens to
206  	 * not account for the in-service entity in case the latter is
207  	 * removed from its active tree (which may get done after
208  	 * invoking the function bfq_no_longer_next_in_service in
209  	 * bfq_get_next_queue). Fortunately, here, i.e., while
210  	 * bfq_no_longer_next_in_service is not yet completed in
211  	 * bfq_get_next_queue, bfq_active_extract has not yet been
212  	 * invoked, and thus active_entities still coincides with the
213  	 * actual number of active entities.
214  	 */
215  	if (bfqg->active_entities == 1)
216  		return true;
217  
218  	return false;
219  }
220  
221  #else /* CONFIG_BFQ_GROUP_IOSCHED */
222  
bfq_update_parent_budget(struct bfq_entity * next_in_service)223  static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
224  {
225  	return false;
226  }
227  
bfq_no_longer_next_in_service(struct bfq_entity * entity)228  static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
229  {
230  	return true;
231  }
232  
233  #endif /* CONFIG_BFQ_GROUP_IOSCHED */
234  
235  /*
236   * Shift for timestamp calculations.  This actually limits the maximum
237   * service allowed in one timestamp delta (small shift values increase it),
238   * the maximum total weight that can be used for the queues in the system
239   * (big shift values increase it), and the period of virtual time
240   * wraparounds.
241   */
242  #define WFQ_SERVICE_SHIFT	22
243  
bfq_entity_to_bfqq(struct bfq_entity * entity)244  struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
245  {
246  	struct bfq_queue *bfqq = NULL;
247  
248  	if (!entity->my_sched_data)
249  		bfqq = container_of(entity, struct bfq_queue, entity);
250  
251  	return bfqq;
252  }
253  
254  
255  /**
256   * bfq_delta - map service into the virtual time domain.
257   * @service: amount of service.
258   * @weight: scale factor (weight of an entity or weight sum).
259   */
bfq_delta(unsigned long service,unsigned long weight)260  static u64 bfq_delta(unsigned long service, unsigned long weight)
261  {
262  	return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight);
263  }
264  
265  /**
266   * bfq_calc_finish - assign the finish time to an entity.
267   * @entity: the entity to act upon.
268   * @service: the service to be charged to the entity.
269   */
bfq_calc_finish(struct bfq_entity * entity,unsigned long service)270  static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service)
271  {
272  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
273  
274  	entity->finish = entity->start +
275  		bfq_delta(service, entity->weight);
276  
277  	if (bfqq) {
278  		bfq_log_bfqq(bfqq->bfqd, bfqq,
279  			"calc_finish: serv %lu, w %d",
280  			service, entity->weight);
281  		bfq_log_bfqq(bfqq->bfqd, bfqq,
282  			"calc_finish: start %llu, finish %llu, delta %llu",
283  			entity->start, entity->finish,
284  			bfq_delta(service, entity->weight));
285  	}
286  }
287  
288  /**
289   * bfq_entity_of - get an entity from a node.
290   * @node: the node field of the entity.
291   *
292   * Convert a node pointer to the relative entity.  This is used only
293   * to simplify the logic of some functions and not as the generic
294   * conversion mechanism because, e.g., in the tree walking functions,
295   * the check for a %NULL value would be redundant.
296   */
bfq_entity_of(struct rb_node * node)297  struct bfq_entity *bfq_entity_of(struct rb_node *node)
298  {
299  	struct bfq_entity *entity = NULL;
300  
301  	if (node)
302  		entity = rb_entry(node, struct bfq_entity, rb_node);
303  
304  	return entity;
305  }
306  
307  /**
308   * bfq_extract - remove an entity from a tree.
309   * @root: the tree root.
310   * @entity: the entity to remove.
311   */
bfq_extract(struct rb_root * root,struct bfq_entity * entity)312  static void bfq_extract(struct rb_root *root, struct bfq_entity *entity)
313  {
314  	entity->tree = NULL;
315  	rb_erase(&entity->rb_node, root);
316  }
317  
318  /**
319   * bfq_idle_extract - extract an entity from the idle tree.
320   * @st: the service tree of the owning @entity.
321   * @entity: the entity being removed.
322   */
bfq_idle_extract(struct bfq_service_tree * st,struct bfq_entity * entity)323  static void bfq_idle_extract(struct bfq_service_tree *st,
324  			     struct bfq_entity *entity)
325  {
326  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
327  	struct rb_node *next;
328  
329  	if (entity == st->first_idle) {
330  		next = rb_next(&entity->rb_node);
331  		st->first_idle = bfq_entity_of(next);
332  	}
333  
334  	if (entity == st->last_idle) {
335  		next = rb_prev(&entity->rb_node);
336  		st->last_idle = bfq_entity_of(next);
337  	}
338  
339  	bfq_extract(&st->idle, entity);
340  
341  	if (bfqq)
342  		list_del(&bfqq->bfqq_list);
343  }
344  
345  /**
346   * bfq_insert - generic tree insertion.
347   * @root: tree root.
348   * @entity: entity to insert.
349   *
350   * This is used for the idle and the active tree, since they are both
351   * ordered by finish time.
352   */
bfq_insert(struct rb_root * root,struct bfq_entity * entity)353  static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
354  {
355  	struct bfq_entity *entry;
356  	struct rb_node **node = &root->rb_node;
357  	struct rb_node *parent = NULL;
358  
359  	while (*node) {
360  		parent = *node;
361  		entry = rb_entry(parent, struct bfq_entity, rb_node);
362  
363  		if (bfq_gt(entry->finish, entity->finish))
364  			node = &parent->rb_left;
365  		else
366  			node = &parent->rb_right;
367  	}
368  
369  	rb_link_node(&entity->rb_node, parent, node);
370  	rb_insert_color(&entity->rb_node, root);
371  
372  	entity->tree = root;
373  }
374  
375  /**
376   * bfq_update_min - update the min_start field of a entity.
377   * @entity: the entity to update.
378   * @node: one of its children.
379   *
380   * This function is called when @entity may store an invalid value for
381   * min_start due to updates to the active tree.  The function  assumes
382   * that the subtree rooted at @node (which may be its left or its right
383   * child) has a valid min_start value.
384   */
bfq_update_min(struct bfq_entity * entity,struct rb_node * node)385  static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node)
386  {
387  	struct bfq_entity *child;
388  
389  	if (node) {
390  		child = rb_entry(node, struct bfq_entity, rb_node);
391  		if (bfq_gt(entity->min_start, child->min_start))
392  			entity->min_start = child->min_start;
393  	}
394  }
395  
396  /**
397   * bfq_update_active_node - recalculate min_start.
398   * @node: the node to update.
399   *
400   * @node may have changed position or one of its children may have moved,
401   * this function updates its min_start value.  The left and right subtrees
402   * are assumed to hold a correct min_start value.
403   */
bfq_update_active_node(struct rb_node * node)404  static void bfq_update_active_node(struct rb_node *node)
405  {
406  	struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
407  
408  	entity->min_start = entity->start;
409  	bfq_update_min(entity, node->rb_right);
410  	bfq_update_min(entity, node->rb_left);
411  }
412  
413  /**
414   * bfq_update_active_tree - update min_start for the whole active tree.
415   * @node: the starting node.
416   *
417   * @node must be the deepest modified node after an update.  This function
418   * updates its min_start using the values held by its children, assuming
419   * that they did not change, and then updates all the nodes that may have
420   * changed in the path to the root.  The only nodes that may have changed
421   * are the ones in the path or their siblings.
422   */
bfq_update_active_tree(struct rb_node * node)423  static void bfq_update_active_tree(struct rb_node *node)
424  {
425  	struct rb_node *parent;
426  
427  up:
428  	bfq_update_active_node(node);
429  
430  	parent = rb_parent(node);
431  	if (!parent)
432  		return;
433  
434  	if (node == parent->rb_left && parent->rb_right)
435  		bfq_update_active_node(parent->rb_right);
436  	else if (parent->rb_left)
437  		bfq_update_active_node(parent->rb_left);
438  
439  	node = parent;
440  	goto up;
441  }
442  
443  /**
444   * bfq_active_insert - insert an entity in the active tree of its
445   *                     group/device.
446   * @st: the service tree of the entity.
447   * @entity: the entity being inserted.
448   *
449   * The active tree is ordered by finish time, but an extra key is kept
450   * per each node, containing the minimum value for the start times of
451   * its children (and the node itself), so it's possible to search for
452   * the eligible node with the lowest finish time in logarithmic time.
453   */
bfq_active_insert(struct bfq_service_tree * st,struct bfq_entity * entity)454  static void bfq_active_insert(struct bfq_service_tree *st,
455  			      struct bfq_entity *entity)
456  {
457  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
458  	struct rb_node *node = &entity->rb_node;
459  #ifdef CONFIG_BFQ_GROUP_IOSCHED
460  	struct bfq_sched_data *sd = NULL;
461  	struct bfq_group *bfqg = NULL;
462  	struct bfq_data *bfqd = NULL;
463  #endif
464  
465  	bfq_insert(&st->active, entity);
466  
467  	if (node->rb_left)
468  		node = node->rb_left;
469  	else if (node->rb_right)
470  		node = node->rb_right;
471  
472  	bfq_update_active_tree(node);
473  
474  #ifdef CONFIG_BFQ_GROUP_IOSCHED
475  	sd = entity->sched_data;
476  	bfqg = container_of(sd, struct bfq_group, sched_data);
477  	bfqd = (struct bfq_data *)bfqg->bfqd;
478  #endif
479  	if (bfqq)
480  		list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
481  #ifdef CONFIG_BFQ_GROUP_IOSCHED
482  	if (bfqg != bfqd->root_group)
483  		bfqg->active_entities++;
484  #endif
485  }
486  
487  /**
488   * bfq_ioprio_to_weight - calc a weight from an ioprio.
489   * @ioprio: the ioprio value to convert.
490   */
bfq_ioprio_to_weight(int ioprio)491  unsigned short bfq_ioprio_to_weight(int ioprio)
492  {
493  	return (IOPRIO_NR_LEVELS - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF;
494  }
495  
496  /**
497   * bfq_weight_to_ioprio - calc an ioprio from a weight.
498   * @weight: the weight value to convert.
499   *
500   * To preserve as much as possible the old only-ioprio user interface,
501   * 0 is used as an escape ioprio value for weights (numerically) equal or
502   * larger than IOPRIO_NR_LEVELS * BFQ_WEIGHT_CONVERSION_COEFF.
503   */
bfq_weight_to_ioprio(int weight)504  static unsigned short bfq_weight_to_ioprio(int weight)
505  {
506  	return max_t(int, 0,
507  		     IOPRIO_NR_LEVELS - weight / BFQ_WEIGHT_CONVERSION_COEFF);
508  }
509  
bfq_get_entity(struct bfq_entity * entity)510  static void bfq_get_entity(struct bfq_entity *entity)
511  {
512  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
513  
514  	if (bfqq) {
515  		bfqq->ref++;
516  		bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
517  			     bfqq, bfqq->ref);
518  	}
519  }
520  
521  /**
522   * bfq_find_deepest - find the deepest node that an extraction can modify.
523   * @node: the node being removed.
524   *
525   * Do the first step of an extraction in an rb tree, looking for the
526   * node that will replace @node, and returning the deepest node that
527   * the following modifications to the tree can touch.  If @node is the
528   * last node in the tree return %NULL.
529   */
bfq_find_deepest(struct rb_node * node)530  static struct rb_node *bfq_find_deepest(struct rb_node *node)
531  {
532  	struct rb_node *deepest;
533  
534  	if (!node->rb_right && !node->rb_left)
535  		deepest = rb_parent(node);
536  	else if (!node->rb_right)
537  		deepest = node->rb_left;
538  	else if (!node->rb_left)
539  		deepest = node->rb_right;
540  	else {
541  		deepest = rb_next(node);
542  		if (deepest->rb_right)
543  			deepest = deepest->rb_right;
544  		else if (rb_parent(deepest) != node)
545  			deepest = rb_parent(deepest);
546  	}
547  
548  	return deepest;
549  }
550  
551  /**
552   * bfq_active_extract - remove an entity from the active tree.
553   * @st: the service_tree containing the tree.
554   * @entity: the entity being removed.
555   */
bfq_active_extract(struct bfq_service_tree * st,struct bfq_entity * entity)556  static void bfq_active_extract(struct bfq_service_tree *st,
557  			       struct bfq_entity *entity)
558  {
559  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
560  	struct rb_node *node;
561  #ifdef CONFIG_BFQ_GROUP_IOSCHED
562  	struct bfq_sched_data *sd = NULL;
563  	struct bfq_group *bfqg = NULL;
564  	struct bfq_data *bfqd = NULL;
565  #endif
566  
567  	node = bfq_find_deepest(&entity->rb_node);
568  	bfq_extract(&st->active, entity);
569  
570  	if (node)
571  		bfq_update_active_tree(node);
572  
573  #ifdef CONFIG_BFQ_GROUP_IOSCHED
574  	sd = entity->sched_data;
575  	bfqg = container_of(sd, struct bfq_group, sched_data);
576  	bfqd = (struct bfq_data *)bfqg->bfqd;
577  #endif
578  	if (bfqq)
579  		list_del(&bfqq->bfqq_list);
580  #ifdef CONFIG_BFQ_GROUP_IOSCHED
581  	if (bfqg != bfqd->root_group)
582  		bfqg->active_entities--;
583  #endif
584  }
585  
586  /**
587   * bfq_idle_insert - insert an entity into the idle tree.
588   * @st: the service tree containing the tree.
589   * @entity: the entity to insert.
590   */
bfq_idle_insert(struct bfq_service_tree * st,struct bfq_entity * entity)591  static void bfq_idle_insert(struct bfq_service_tree *st,
592  			    struct bfq_entity *entity)
593  {
594  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
595  	struct bfq_entity *first_idle = st->first_idle;
596  	struct bfq_entity *last_idle = st->last_idle;
597  
598  	if (!first_idle || bfq_gt(first_idle->finish, entity->finish))
599  		st->first_idle = entity;
600  	if (!last_idle || bfq_gt(entity->finish, last_idle->finish))
601  		st->last_idle = entity;
602  
603  	bfq_insert(&st->idle, entity);
604  
605  	if (bfqq)
606  		list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
607  }
608  
609  /**
610   * bfq_forget_entity - do not consider entity any longer for scheduling
611   * @st: the service tree.
612   * @entity: the entity being removed.
613   * @is_in_service: true if entity is currently the in-service entity.
614   *
615   * Forget everything about @entity. In addition, if entity represents
616   * a queue, and the latter is not in service, then release the service
617   * reference to the queue (the one taken through bfq_get_entity). In
618   * fact, in this case, there is really no more service reference to
619   * the queue, as the latter is also outside any service tree. If,
620   * instead, the queue is in service, then __bfq_bfqd_reset_in_service
621   * will take care of putting the reference when the queue finally
622   * stops being served.
623   */
bfq_forget_entity(struct bfq_service_tree * st,struct bfq_entity * entity,bool is_in_service)624  static void bfq_forget_entity(struct bfq_service_tree *st,
625  			      struct bfq_entity *entity,
626  			      bool is_in_service)
627  {
628  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
629  
630  	entity->on_st_or_in_serv = false;
631  	st->wsum -= entity->weight;
632  	if (bfqq && !is_in_service)
633  		bfq_put_queue(bfqq);
634  }
635  
636  /**
637   * bfq_put_idle_entity - release the idle tree ref of an entity.
638   * @st: service tree for the entity.
639   * @entity: the entity being released.
640   */
bfq_put_idle_entity(struct bfq_service_tree * st,struct bfq_entity * entity)641  void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity)
642  {
643  	bfq_idle_extract(st, entity);
644  	bfq_forget_entity(st, entity,
645  			  entity == entity->sched_data->in_service_entity);
646  }
647  
648  /**
649   * bfq_forget_idle - update the idle tree if necessary.
650   * @st: the service tree to act upon.
651   *
652   * To preserve the global O(log N) complexity we only remove one entry here;
653   * as the idle tree will not grow indefinitely this can be done safely.
654   */
bfq_forget_idle(struct bfq_service_tree * st)655  static void bfq_forget_idle(struct bfq_service_tree *st)
656  {
657  	struct bfq_entity *first_idle = st->first_idle;
658  	struct bfq_entity *last_idle = st->last_idle;
659  
660  	if (RB_EMPTY_ROOT(&st->active) && last_idle &&
661  	    !bfq_gt(last_idle->finish, st->vtime)) {
662  		/*
663  		 * Forget the whole idle tree, increasing the vtime past
664  		 * the last finish time of idle entities.
665  		 */
666  		st->vtime = last_idle->finish;
667  	}
668  
669  	if (first_idle && !bfq_gt(first_idle->finish, st->vtime))
670  		bfq_put_idle_entity(st, first_idle);
671  }
672  
bfq_entity_service_tree(struct bfq_entity * entity)673  struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity)
674  {
675  	struct bfq_sched_data *sched_data = entity->sched_data;
676  	unsigned int idx = bfq_class_idx(entity);
677  
678  	return sched_data->service_tree + idx;
679  }
680  
681  /*
682   * Update weight and priority of entity. If update_class_too is true,
683   * then update the ioprio_class of entity too.
684   *
685   * The reason why the update of ioprio_class is controlled through the
686   * last parameter is as follows. Changing the ioprio class of an
687   * entity implies changing the destination service trees for that
688   * entity. If such a change occurred when the entity is already on one
689   * of the service trees for its previous class, then the state of the
690   * entity would become more complex: none of the new possible service
691   * trees for the entity, according to bfq_entity_service_tree(), would
692   * match any of the possible service trees on which the entity
693   * is. Complex operations involving these trees, such as entity
694   * activations and deactivations, should take into account this
695   * additional complexity.  To avoid this issue, this function is
696   * invoked with update_class_too unset in the points in the code where
697   * entity may happen to be on some tree.
698   */
699  struct bfq_service_tree *
__bfq_entity_update_weight_prio(struct bfq_service_tree * old_st,struct bfq_entity * entity,bool update_class_too)700  __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
701  				struct bfq_entity *entity,
702  				bool update_class_too)
703  {
704  	struct bfq_service_tree *new_st = old_st;
705  
706  	if (entity->prio_changed) {
707  		struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
708  		unsigned int prev_weight, new_weight;
709  		struct bfq_data *bfqd = NULL;
710  		struct rb_root_cached *root;
711  #ifdef CONFIG_BFQ_GROUP_IOSCHED
712  		struct bfq_sched_data *sd;
713  		struct bfq_group *bfqg;
714  #endif
715  
716  		if (bfqq)
717  			bfqd = bfqq->bfqd;
718  #ifdef CONFIG_BFQ_GROUP_IOSCHED
719  		else {
720  			sd = entity->my_sched_data;
721  			bfqg = container_of(sd, struct bfq_group, sched_data);
722  			bfqd = (struct bfq_data *)bfqg->bfqd;
723  		}
724  #endif
725  
726  		/* Matches the smp_wmb() in bfq_group_set_weight. */
727  		smp_rmb();
728  		old_st->wsum -= entity->weight;
729  
730  		if (entity->new_weight != entity->orig_weight) {
731  			if (entity->new_weight < BFQ_MIN_WEIGHT ||
732  			    entity->new_weight > BFQ_MAX_WEIGHT) {
733  				pr_crit("update_weight_prio: new_weight %d\n",
734  					entity->new_weight);
735  				if (entity->new_weight < BFQ_MIN_WEIGHT)
736  					entity->new_weight = BFQ_MIN_WEIGHT;
737  				else
738  					entity->new_weight = BFQ_MAX_WEIGHT;
739  			}
740  			entity->orig_weight = entity->new_weight;
741  			if (bfqq)
742  				bfqq->ioprio =
743  				  bfq_weight_to_ioprio(entity->orig_weight);
744  		}
745  
746  		if (bfqq && update_class_too)
747  			bfqq->ioprio_class = bfqq->new_ioprio_class;
748  
749  		/*
750  		 * Reset prio_changed only if the ioprio_class change
751  		 * is not pending any longer.
752  		 */
753  		if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class)
754  			entity->prio_changed = 0;
755  
756  		/*
757  		 * NOTE: here we may be changing the weight too early,
758  		 * this will cause unfairness.  The correct approach
759  		 * would have required additional complexity to defer
760  		 * weight changes to the proper time instants (i.e.,
761  		 * when entity->finish <= old_st->vtime).
762  		 */
763  		new_st = bfq_entity_service_tree(entity);
764  
765  		prev_weight = entity->weight;
766  		new_weight = entity->orig_weight *
767  			     (bfqq ? bfqq->wr_coeff : 1);
768  		/*
769  		 * If the weight of the entity changes, and the entity is a
770  		 * queue, remove the entity from its old weight counter (if
771  		 * there is a counter associated with the entity).
772  		 */
773  		if (prev_weight != new_weight && bfqq) {
774  			root = &bfqd->queue_weights_tree;
775  			__bfq_weights_tree_remove(bfqd, bfqq, root);
776  		}
777  		entity->weight = new_weight;
778  		/*
779  		 * Add the entity, if it is not a weight-raised queue,
780  		 * to the counter associated with its new weight.
781  		 */
782  		if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
783  			/* If we get here, root has been initialized. */
784  			bfq_weights_tree_add(bfqd, bfqq, root);
785  		}
786  
787  		new_st->wsum += entity->weight;
788  
789  		if (new_st != old_st)
790  			entity->start = new_st->vtime;
791  	}
792  
793  	return new_st;
794  }
795  
796  /**
797   * bfq_bfqq_served - update the scheduler status after selection for
798   *                   service.
799   * @bfqq: the queue being served.
800   * @served: bytes to transfer.
801   *
802   * NOTE: this can be optimized, as the timestamps of upper level entities
803   * are synchronized every time a new bfqq is selected for service.  By now,
804   * we keep it to better check consistency.
805   */
bfq_bfqq_served(struct bfq_queue * bfqq,int served)806  void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
807  {
808  	struct bfq_entity *entity = &bfqq->entity;
809  	struct bfq_service_tree *st;
810  
811  	if (!bfqq->service_from_backlogged)
812  		bfqq->first_IO_time = jiffies;
813  
814  	if (bfqq->wr_coeff > 1)
815  		bfqq->service_from_wr += served;
816  
817  	bfqq->service_from_backlogged += served;
818  	for_each_entity(entity) {
819  		st = bfq_entity_service_tree(entity);
820  
821  		entity->service += served;
822  
823  		st->vtime += bfq_delta(served, st->wsum);
824  		bfq_forget_idle(st);
825  	}
826  	bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served);
827  }
828  
829  /**
830   * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
831   *			  of the time interval during which bfqq has been in
832   *			  service.
833   * @bfqd: the device
834   * @bfqq: the queue that needs a service update.
835   * @time_ms: the amount of time during which the queue has received service
836   *
837   * If a queue does not consume its budget fast enough, then providing
838   * the queue with service fairness may impair throughput, more or less
839   * severely. For this reason, queues that consume their budget slowly
840   * are provided with time fairness instead of service fairness. This
841   * goal is achieved through the BFQ scheduling engine, even if such an
842   * engine works in the service, and not in the time domain. The trick
843   * is charging these queues with an inflated amount of service, equal
844   * to the amount of service that they would have received during their
845   * service slot if they had been fast, i.e., if their requests had
846   * been dispatched at a rate equal to the estimated peak rate.
847   *
848   * It is worth noting that time fairness can cause important
849   * distortions in terms of bandwidth distribution, on devices with
850   * internal queueing. The reason is that I/O requests dispatched
851   * during the service slot of a queue may be served after that service
852   * slot is finished, and may have a total processing time loosely
853   * correlated with the duration of the service slot. This is
854   * especially true for short service slots.
855   */
bfq_bfqq_charge_time(struct bfq_data * bfqd,struct bfq_queue * bfqq,unsigned long time_ms)856  void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
857  			  unsigned long time_ms)
858  {
859  	struct bfq_entity *entity = &bfqq->entity;
860  	unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
861  	unsigned long bounded_time_ms = min(time_ms, timeout_ms);
862  	int serv_to_charge_for_time =
863  		(bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
864  	int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
865  
866  	/* Increase budget to avoid inconsistencies */
867  	if (tot_serv_to_charge > entity->budget)
868  		entity->budget = tot_serv_to_charge;
869  
870  	bfq_bfqq_served(bfqq,
871  			max_t(int, 0, tot_serv_to_charge - entity->service));
872  }
873  
bfq_update_fin_time_enqueue(struct bfq_entity * entity,struct bfq_service_tree * st,bool backshifted)874  static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
875  					struct bfq_service_tree *st,
876  					bool backshifted)
877  {
878  	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
879  
880  	/*
881  	 * When this function is invoked, entity is not in any service
882  	 * tree, then it is safe to invoke next function with the last
883  	 * parameter set (see the comments on the function).
884  	 */
885  	st = __bfq_entity_update_weight_prio(st, entity, true);
886  	bfq_calc_finish(entity, entity->budget);
887  
888  	/*
889  	 * If some queues enjoy backshifting for a while, then their
890  	 * (virtual) finish timestamps may happen to become lower and
891  	 * lower than the system virtual time.	In particular, if
892  	 * these queues often happen to be idle for short time
893  	 * periods, and during such time periods other queues with
894  	 * higher timestamps happen to be busy, then the backshifted
895  	 * timestamps of the former queues can become much lower than
896  	 * the system virtual time. In fact, to serve the queues with
897  	 * higher timestamps while the ones with lower timestamps are
898  	 * idle, the system virtual time may be pushed-up to much
899  	 * higher values than the finish timestamps of the idle
900  	 * queues. As a consequence, the finish timestamps of all new
901  	 * or newly activated queues may end up being much larger than
902  	 * those of lucky queues with backshifted timestamps. The
903  	 * latter queues may then monopolize the device for a lot of
904  	 * time. This would simply break service guarantees.
905  	 *
906  	 * To reduce this problem, push up a little bit the
907  	 * backshifted timestamps of the queue associated with this
908  	 * entity (only a queue can happen to have the backshifted
909  	 * flag set): just enough to let the finish timestamp of the
910  	 * queue be equal to the current value of the system virtual
911  	 * time. This may introduce a little unfairness among queues
912  	 * with backshifted timestamps, but it does not break
913  	 * worst-case fairness guarantees.
914  	 *
915  	 * As a special case, if bfqq is weight-raised, push up
916  	 * timestamps much less, to keep very low the probability that
917  	 * this push up causes the backshifted finish timestamps of
918  	 * weight-raised queues to become higher than the backshifted
919  	 * finish timestamps of non weight-raised queues.
920  	 */
921  	if (backshifted && bfq_gt(st->vtime, entity->finish)) {
922  		unsigned long delta = st->vtime - entity->finish;
923  
924  		if (bfqq)
925  			delta /= bfqq->wr_coeff;
926  
927  		entity->start += delta;
928  		entity->finish += delta;
929  	}
930  
931  	bfq_active_insert(st, entity);
932  }
933  
934  /**
935   * __bfq_activate_entity - handle activation of entity.
936   * @entity: the entity being activated.
937   * @non_blocking_wait_rq: true if entity was waiting for a request
938   *
939   * Called for a 'true' activation, i.e., if entity is not active and
940   * one of its children receives a new request.
941   *
942   * Basically, this function updates the timestamps of entity and
943   * inserts entity into its active tree, after possibly extracting it
944   * from its idle tree.
945   */
__bfq_activate_entity(struct bfq_entity * entity,bool non_blocking_wait_rq)946  static void __bfq_activate_entity(struct bfq_entity *entity,
947  				  bool non_blocking_wait_rq)
948  {
949  	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
950  	bool backshifted = false;
951  	unsigned long long min_vstart;
952  
953  	/* See comments on bfq_fqq_update_budg_for_activation */
954  	if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) {
955  		backshifted = true;
956  		min_vstart = entity->finish;
957  	} else
958  		min_vstart = st->vtime;
959  
960  	if (entity->tree == &st->idle) {
961  		/*
962  		 * Must be on the idle tree, bfq_idle_extract() will
963  		 * check for that.
964  		 */
965  		bfq_idle_extract(st, entity);
966  		entity->start = bfq_gt(min_vstart, entity->finish) ?
967  			min_vstart : entity->finish;
968  	} else {
969  		/*
970  		 * The finish time of the entity may be invalid, and
971  		 * it is in the past for sure, otherwise the queue
972  		 * would have been on the idle tree.
973  		 */
974  		entity->start = min_vstart;
975  		st->wsum += entity->weight;
976  		/*
977  		 * entity is about to be inserted into a service tree,
978  		 * and then set in service: get a reference to make
979  		 * sure entity does not disappear until it is no
980  		 * longer in service or scheduled for service.
981  		 */
982  		bfq_get_entity(entity);
983  
984  		entity->on_st_or_in_serv = true;
985  	}
986  
987  #ifdef CONFIG_BFQ_GROUP_IOSCHED
988  	if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
989  		struct bfq_group *bfqg =
990  			container_of(entity, struct bfq_group, entity);
991  		struct bfq_data *bfqd = bfqg->bfqd;
992  
993  		if (!entity->in_groups_with_pending_reqs) {
994  			entity->in_groups_with_pending_reqs = true;
995  			bfqd->num_groups_with_pending_reqs++;
996  		}
997  	}
998  #endif
999  
1000  	bfq_update_fin_time_enqueue(entity, st, backshifted);
1001  }
1002  
1003  /**
1004   * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1005   * @entity: the entity being requeued or repositioned.
1006   *
1007   * Requeueing is needed if this entity stops being served, which
1008   * happens if a leaf descendant entity has expired. On the other hand,
1009   * repositioning is needed if the next_inservice_entity for the child
1010   * entity has changed. See the comments inside the function for
1011   * details.
1012   *
1013   * Basically, this function: 1) removes entity from its active tree if
1014   * present there, 2) updates the timestamps of entity and 3) inserts
1015   * entity back into its active tree (in the new, right position for
1016   * the new values of the timestamps).
1017   */
__bfq_requeue_entity(struct bfq_entity * entity)1018  static void __bfq_requeue_entity(struct bfq_entity *entity)
1019  {
1020  	struct bfq_sched_data *sd = entity->sched_data;
1021  	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1022  
1023  	if (entity == sd->in_service_entity) {
1024  		/*
1025  		 * We are requeueing the current in-service entity,
1026  		 * which may have to be done for one of the following
1027  		 * reasons:
1028  		 * - entity represents the in-service queue, and the
1029  		 *   in-service queue is being requeued after an
1030  		 *   expiration;
1031  		 * - entity represents a group, and its budget has
1032  		 *   changed because one of its child entities has
1033  		 *   just been either activated or requeued for some
1034  		 *   reason; the timestamps of the entity need then to
1035  		 *   be updated, and the entity needs to be enqueued
1036  		 *   or repositioned accordingly.
1037  		 *
1038  		 * In particular, before requeueing, the start time of
1039  		 * the entity must be moved forward to account for the
1040  		 * service that the entity has received while in
1041  		 * service. This is done by the next instructions. The
1042  		 * finish time will then be updated according to this
1043  		 * new value of the start time, and to the budget of
1044  		 * the entity.
1045  		 */
1046  		bfq_calc_finish(entity, entity->service);
1047  		entity->start = entity->finish;
1048  		/*
1049  		 * In addition, if the entity had more than one child
1050  		 * when set in service, then it was not extracted from
1051  		 * the active tree. This implies that the position of
1052  		 * the entity in the active tree may need to be
1053  		 * changed now, because we have just updated the start
1054  		 * time of the entity, and we will update its finish
1055  		 * time in a moment (the requeueing is then, more
1056  		 * precisely, a repositioning in this case). To
1057  		 * implement this repositioning, we: 1) dequeue the
1058  		 * entity here, 2) update the finish time and requeue
1059  		 * the entity according to the new timestamps below.
1060  		 */
1061  		if (entity->tree)
1062  			bfq_active_extract(st, entity);
1063  	} else { /* The entity is already active, and not in service */
1064  		/*
1065  		 * In this case, this function gets called only if the
1066  		 * next_in_service entity below this entity has
1067  		 * changed, and this change has caused the budget of
1068  		 * this entity to change, which, finally implies that
1069  		 * the finish time of this entity must be
1070  		 * updated. Such an update may cause the scheduling,
1071  		 * i.e., the position in the active tree, of this
1072  		 * entity to change. We handle this change by: 1)
1073  		 * dequeueing the entity here, 2) updating the finish
1074  		 * time and requeueing the entity according to the new
1075  		 * timestamps below. This is the same approach as the
1076  		 * non-extracted-entity sub-case above.
1077  		 */
1078  		bfq_active_extract(st, entity);
1079  	}
1080  
1081  	bfq_update_fin_time_enqueue(entity, st, false);
1082  }
1083  
__bfq_activate_requeue_entity(struct bfq_entity * entity,struct bfq_sched_data * sd,bool non_blocking_wait_rq)1084  static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1085  					  struct bfq_sched_data *sd,
1086  					  bool non_blocking_wait_rq)
1087  {
1088  	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
1089  
1090  	if (sd->in_service_entity == entity || entity->tree == &st->active)
1091  		 /*
1092  		  * in service or already queued on the active tree,
1093  		  * requeue or reposition
1094  		  */
1095  		__bfq_requeue_entity(entity);
1096  	else
1097  		/*
1098  		 * Not in service and not queued on its active tree:
1099  		 * the activity is idle and this is a true activation.
1100  		 */
1101  		__bfq_activate_entity(entity, non_blocking_wait_rq);
1102  }
1103  
1104  
1105  /**
1106   * bfq_activate_requeue_entity - activate or requeue an entity representing a
1107   *				 bfq_queue, and activate, requeue or reposition
1108   *				 all ancestors for which such an update becomes
1109   *				 necessary.
1110   * @entity: the entity to activate.
1111   * @non_blocking_wait_rq: true if this entity was waiting for a request
1112   * @requeue: true if this is a requeue, which implies that bfqq is
1113   *	     being expired; thus ALL its ancestors stop being served and must
1114   *	     therefore be requeued
1115   * @expiration: true if this function is being invoked in the expiration path
1116   *             of the in-service queue
1117   */
bfq_activate_requeue_entity(struct bfq_entity * entity,bool non_blocking_wait_rq,bool requeue,bool expiration)1118  static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1119  					bool non_blocking_wait_rq,
1120  					bool requeue, bool expiration)
1121  {
1122  	struct bfq_sched_data *sd;
1123  
1124  	for_each_entity(entity) {
1125  		sd = entity->sched_data;
1126  		__bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
1127  
1128  		if (!bfq_update_next_in_service(sd, entity, expiration) &&
1129  		    !requeue)
1130  			break;
1131  	}
1132  }
1133  
1134  /**
1135   * __bfq_deactivate_entity - update sched_data and service trees for
1136   * entity, so as to represent entity as inactive
1137   * @entity: the entity being deactivated.
1138   * @ins_into_idle_tree: if false, the entity will not be put into the
1139   *			idle tree.
1140   *
1141   * If necessary and allowed, puts entity into the idle tree. NOTE:
1142   * entity may be on no tree if in service.
1143   */
__bfq_deactivate_entity(struct bfq_entity * entity,bool ins_into_idle_tree)1144  bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1145  {
1146  	struct bfq_sched_data *sd = entity->sched_data;
1147  	struct bfq_service_tree *st;
1148  	bool is_in_service;
1149  
1150  	if (!entity->on_st_or_in_serv) /*
1151  					* entity never activated, or
1152  					* already inactive
1153  					*/
1154  		return false;
1155  
1156  	/*
1157  	 * If we get here, then entity is active, which implies that
1158  	 * bfq_group_set_parent has already been invoked for the group
1159  	 * represented by entity. Therefore, the field
1160  	 * entity->sched_data has been set, and we can safely use it.
1161  	 */
1162  	st = bfq_entity_service_tree(entity);
1163  	is_in_service = entity == sd->in_service_entity;
1164  
1165  	bfq_calc_finish(entity, entity->service);
1166  
1167  	if (is_in_service)
1168  		sd->in_service_entity = NULL;
1169  	else
1170  		/*
1171  		 * Non in-service entity: nobody will take care of
1172  		 * resetting its service counter on expiration. Do it
1173  		 * now.
1174  		 */
1175  		entity->service = 0;
1176  
1177  	if (entity->tree == &st->active)
1178  		bfq_active_extract(st, entity);
1179  	else if (!is_in_service && entity->tree == &st->idle)
1180  		bfq_idle_extract(st, entity);
1181  
1182  	if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime))
1183  		bfq_forget_entity(st, entity, is_in_service);
1184  	else
1185  		bfq_idle_insert(st, entity);
1186  
1187  	return true;
1188  }
1189  
1190  /**
1191   * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1192   * @entity: the entity to deactivate.
1193   * @ins_into_idle_tree: true if the entity can be put into the idle tree
1194   * @expiration: true if this function is being invoked in the expiration path
1195   *             of the in-service queue
1196   */
bfq_deactivate_entity(struct bfq_entity * entity,bool ins_into_idle_tree,bool expiration)1197  static void bfq_deactivate_entity(struct bfq_entity *entity,
1198  				  bool ins_into_idle_tree,
1199  				  bool expiration)
1200  {
1201  	struct bfq_sched_data *sd;
1202  	struct bfq_entity *parent = NULL;
1203  
1204  	for_each_entity_safe(entity, parent) {
1205  		sd = entity->sched_data;
1206  
1207  		if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) {
1208  			/*
1209  			 * entity is not in any tree any more, so
1210  			 * this deactivation is a no-op, and there is
1211  			 * nothing to change for upper-level entities
1212  			 * (in case of expiration, this can never
1213  			 * happen).
1214  			 */
1215  			return;
1216  		}
1217  
1218  		if (sd->next_in_service == entity)
1219  			/*
1220  			 * entity was the next_in_service entity,
1221  			 * then, since entity has just been
1222  			 * deactivated, a new one must be found.
1223  			 */
1224  			bfq_update_next_in_service(sd, NULL, expiration);
1225  
1226  		if (sd->next_in_service || sd->in_service_entity) {
1227  			/*
1228  			 * The parent entity is still active, because
1229  			 * either next_in_service or in_service_entity
1230  			 * is not NULL. So, no further upwards
1231  			 * deactivation must be performed.  Yet,
1232  			 * next_in_service has changed.	Then the
1233  			 * schedule does need to be updated upwards.
1234  			 *
1235  			 * NOTE If in_service_entity is not NULL, then
1236  			 * next_in_service may happen to be NULL,
1237  			 * although the parent entity is evidently
1238  			 * active. This happens if 1) the entity
1239  			 * pointed by in_service_entity is the only
1240  			 * active entity in the parent entity, and 2)
1241  			 * according to the definition of
1242  			 * next_in_service, the in_service_entity
1243  			 * cannot be considered as
1244  			 * next_in_service. See the comments on the
1245  			 * definition of next_in_service for details.
1246  			 */
1247  			break;
1248  		}
1249  
1250  		/*
1251  		 * If we get here, then the parent is no more
1252  		 * backlogged and we need to propagate the
1253  		 * deactivation upwards. Thus let the loop go on.
1254  		 */
1255  
1256  		/*
1257  		 * Also let parent be queued into the idle tree on
1258  		 * deactivation, to preserve service guarantees, and
1259  		 * assuming that who invoked this function does not
1260  		 * need parent entities too to be removed completely.
1261  		 */
1262  		ins_into_idle_tree = true;
1263  	}
1264  
1265  	/*
1266  	 * If the deactivation loop is fully executed, then there are
1267  	 * no more entities to touch and next loop is not executed at
1268  	 * all. Otherwise, requeue remaining entities if they are
1269  	 * about to stop receiving service, or reposition them if this
1270  	 * is not the case.
1271  	 */
1272  	entity = parent;
1273  	for_each_entity(entity) {
1274  		/*
1275  		 * Invoke __bfq_requeue_entity on entity, even if
1276  		 * already active, to requeue/reposition it in the
1277  		 * active tree (because sd->next_in_service has
1278  		 * changed)
1279  		 */
1280  		__bfq_requeue_entity(entity);
1281  
1282  		sd = entity->sched_data;
1283  		if (!bfq_update_next_in_service(sd, entity, expiration) &&
1284  		    !expiration)
1285  			/*
1286  			 * next_in_service unchanged or not causing
1287  			 * any change in entity->parent->sd, and no
1288  			 * requeueing needed for expiration: stop
1289  			 * here.
1290  			 */
1291  			break;
1292  	}
1293  }
1294  
1295  /**
1296   * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1297   *                       if needed, to have at least one entity eligible.
1298   * @st: the service tree to act upon.
1299   *
1300   * Assumes that st is not empty.
1301   */
bfq_calc_vtime_jump(struct bfq_service_tree * st)1302  static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st)
1303  {
1304  	struct bfq_entity *root_entity = bfq_root_active_entity(&st->active);
1305  
1306  	if (bfq_gt(root_entity->min_start, st->vtime))
1307  		return root_entity->min_start;
1308  
1309  	return st->vtime;
1310  }
1311  
bfq_update_vtime(struct bfq_service_tree * st,u64 new_value)1312  static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
1313  {
1314  	if (new_value > st->vtime) {
1315  		st->vtime = new_value;
1316  		bfq_forget_idle(st);
1317  	}
1318  }
1319  
1320  /**
1321   * bfq_first_active_entity - find the eligible entity with
1322   *                           the smallest finish time
1323   * @st: the service tree to select from.
1324   * @vtime: the system virtual to use as a reference for eligibility
1325   *
1326   * This function searches the first schedulable entity, starting from the
1327   * root of the tree and going on the left every time on this side there is
1328   * a subtree with at least one eligible (start <= vtime) entity. The path on
1329   * the right is followed only if a) the left subtree contains no eligible
1330   * entities and b) no eligible entity has been found yet.
1331   */
bfq_first_active_entity(struct bfq_service_tree * st,u64 vtime)1332  static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st,
1333  						  u64 vtime)
1334  {
1335  	struct bfq_entity *entry, *first = NULL;
1336  	struct rb_node *node = st->active.rb_node;
1337  
1338  	while (node) {
1339  		entry = rb_entry(node, struct bfq_entity, rb_node);
1340  left:
1341  		if (!bfq_gt(entry->start, vtime))
1342  			first = entry;
1343  
1344  		if (node->rb_left) {
1345  			entry = rb_entry(node->rb_left,
1346  					 struct bfq_entity, rb_node);
1347  			if (!bfq_gt(entry->min_start, vtime)) {
1348  				node = node->rb_left;
1349  				goto left;
1350  			}
1351  		}
1352  		if (first)
1353  			break;
1354  		node = node->rb_right;
1355  	}
1356  
1357  	return first;
1358  }
1359  
1360  /**
1361   * __bfq_lookup_next_entity - return the first eligible entity in @st.
1362   * @st: the service tree.
1363   * @in_service: whether or not there is an in-service entity for the sched_data
1364   *	this active tree belongs to.
1365   *
1366   * If there is no in-service entity for the sched_data st belongs to,
1367   * then return the entity that will be set in service if:
1368   * 1) the parent entity this st belongs to is set in service;
1369   * 2) no entity belonging to such parent entity undergoes a state change
1370   * that would influence the timestamps of the entity (e.g., becomes idle,
1371   * becomes backlogged, changes its budget, ...).
1372   *
1373   * In this first case, update the virtual time in @st too (see the
1374   * comments on this update inside the function).
1375   *
1376   * In contrast, if there is an in-service entity, then return the
1377   * entity that would be set in service if not only the above
1378   * conditions, but also the next one held true: the currently
1379   * in-service entity, on expiration,
1380   * 1) gets a finish time equal to the current one, or
1381   * 2) is not eligible any more, or
1382   * 3) is idle.
1383   */
1384  static struct bfq_entity *
__bfq_lookup_next_entity(struct bfq_service_tree * st,bool in_service)1385  __bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service)
1386  {
1387  	struct bfq_entity *entity;
1388  	u64 new_vtime;
1389  
1390  	if (RB_EMPTY_ROOT(&st->active))
1391  		return NULL;
1392  
1393  	/*
1394  	 * Get the value of the system virtual time for which at
1395  	 * least one entity is eligible.
1396  	 */
1397  	new_vtime = bfq_calc_vtime_jump(st);
1398  
1399  	/*
1400  	 * If there is no in-service entity for the sched_data this
1401  	 * active tree belongs to, then push the system virtual time
1402  	 * up to the value that guarantees that at least one entity is
1403  	 * eligible. If, instead, there is an in-service entity, then
1404  	 * do not make any such update, because there is already an
1405  	 * eligible entity, namely the in-service one (even if the
1406  	 * entity is not on st, because it was extracted when set in
1407  	 * service).
1408  	 */
1409  	if (!in_service)
1410  		bfq_update_vtime(st, new_vtime);
1411  
1412  	entity = bfq_first_active_entity(st, new_vtime);
1413  
1414  	return entity;
1415  }
1416  
1417  /**
1418   * bfq_lookup_next_entity - return the first eligible entity in @sd.
1419   * @sd: the sched_data.
1420   * @expiration: true if we are on the expiration path of the in-service queue
1421   *
1422   * This function is invoked when there has been a change in the trees
1423   * for sd, and we need to know what is the new next entity to serve
1424   * after this change.
1425   */
bfq_lookup_next_entity(struct bfq_sched_data * sd,bool expiration)1426  static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
1427  						 bool expiration)
1428  {
1429  	struct bfq_service_tree *st = sd->service_tree;
1430  	struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1);
1431  	struct bfq_entity *entity = NULL;
1432  	int class_idx = 0;
1433  
1434  	/*
1435  	 * Choose from idle class, if needed to guarantee a minimum
1436  	 * bandwidth to this class (and if there is some active entity
1437  	 * in idle class). This should also mitigate
1438  	 * priority-inversion problems in case a low priority task is
1439  	 * holding file system resources.
1440  	 */
1441  	if (time_is_before_jiffies(sd->bfq_class_idle_last_service +
1442  				   BFQ_CL_IDLE_TIMEOUT)) {
1443  		if (!RB_EMPTY_ROOT(&idle_class_st->active))
1444  			class_idx = BFQ_IOPRIO_CLASSES - 1;
1445  		/* About to be served if backlogged, or not yet backlogged */
1446  		sd->bfq_class_idle_last_service = jiffies;
1447  	}
1448  
1449  	/*
1450  	 * Find the next entity to serve for the highest-priority
1451  	 * class, unless the idle class needs to be served.
1452  	 */
1453  	for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) {
1454  		/*
1455  		 * If expiration is true, then bfq_lookup_next_entity
1456  		 * is being invoked as a part of the expiration path
1457  		 * of the in-service queue. In this case, even if
1458  		 * sd->in_service_entity is not NULL,
1459  		 * sd->in_service_entity at this point is actually not
1460  		 * in service any more, and, if needed, has already
1461  		 * been properly queued or requeued into the right
1462  		 * tree. The reason why sd->in_service_entity is still
1463  		 * not NULL here, even if expiration is true, is that
1464  		 * sd->in_service_entity is reset as a last step in the
1465  		 * expiration path. So, if expiration is true, tell
1466  		 * __bfq_lookup_next_entity that there is no
1467  		 * sd->in_service_entity.
1468  		 */
1469  		entity = __bfq_lookup_next_entity(st + class_idx,
1470  						  sd->in_service_entity &&
1471  						  !expiration);
1472  
1473  		if (entity)
1474  			break;
1475  	}
1476  
1477  	return entity;
1478  }
1479  
next_queue_may_preempt(struct bfq_data * bfqd)1480  bool next_queue_may_preempt(struct bfq_data *bfqd)
1481  {
1482  	struct bfq_sched_data *sd = &bfqd->root_group->sched_data;
1483  
1484  	return sd->next_in_service != sd->in_service_entity;
1485  }
1486  
1487  /*
1488   * Get next queue for service.
1489   */
bfq_get_next_queue(struct bfq_data * bfqd)1490  struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1491  {
1492  	struct bfq_entity *entity = NULL;
1493  	struct bfq_sched_data *sd;
1494  	struct bfq_queue *bfqq;
1495  
1496  	if (bfq_tot_busy_queues(bfqd) == 0)
1497  		return NULL;
1498  
1499  	/*
1500  	 * Traverse the path from the root to the leaf entity to
1501  	 * serve. Set in service all the entities visited along the
1502  	 * way.
1503  	 */
1504  	sd = &bfqd->root_group->sched_data;
1505  	for (; sd ; sd = entity->my_sched_data) {
1506  		/*
1507  		 * WARNING. We are about to set the in-service entity
1508  		 * to sd->next_in_service, i.e., to the (cached) value
1509  		 * returned by bfq_lookup_next_entity(sd) the last
1510  		 * time it was invoked, i.e., the last time when the
1511  		 * service order in sd changed as a consequence of the
1512  		 * activation or deactivation of an entity. In this
1513  		 * respect, if we execute bfq_lookup_next_entity(sd)
1514  		 * in this very moment, it may, although with low
1515  		 * probability, yield a different entity than that
1516  		 * pointed to by sd->next_in_service. This rare event
1517  		 * happens in case there was no CLASS_IDLE entity to
1518  		 * serve for sd when bfq_lookup_next_entity(sd) was
1519  		 * invoked for the last time, while there is now one
1520  		 * such entity.
1521  		 *
1522  		 * If the above event happens, then the scheduling of
1523  		 * such entity in CLASS_IDLE is postponed until the
1524  		 * service of the sd->next_in_service entity
1525  		 * finishes. In fact, when the latter is expired,
1526  		 * bfq_lookup_next_entity(sd) gets called again,
1527  		 * exactly to update sd->next_in_service.
1528  		 */
1529  
1530  		/* Make next_in_service entity become in_service_entity */
1531  		entity = sd->next_in_service;
1532  		sd->in_service_entity = entity;
1533  
1534  		/*
1535  		 * If entity is no longer a candidate for next
1536  		 * service, then it must be extracted from its active
1537  		 * tree, so as to make sure that it won't be
1538  		 * considered when computing next_in_service. See the
1539  		 * comments on the function
1540  		 * bfq_no_longer_next_in_service() for details.
1541  		 */
1542  		if (bfq_no_longer_next_in_service(entity))
1543  			bfq_active_extract(bfq_entity_service_tree(entity),
1544  					   entity);
1545  
1546  		/*
1547  		 * Even if entity is not to be extracted according to
1548  		 * the above check, a descendant entity may get
1549  		 * extracted in one of the next iterations of this
1550  		 * loop. Such an event could cause a change in
1551  		 * next_in_service for the level of the descendant
1552  		 * entity, and thus possibly back to this level.
1553  		 *
1554  		 * However, we cannot perform the resulting needed
1555  		 * update of next_in_service for this level before the
1556  		 * end of the whole loop, because, to know which is
1557  		 * the correct next-to-serve candidate entity for each
1558  		 * level, we need first to find the leaf entity to set
1559  		 * in service. In fact, only after we know which is
1560  		 * the next-to-serve leaf entity, we can discover
1561  		 * whether the parent entity of the leaf entity
1562  		 * becomes the next-to-serve, and so on.
1563  		 */
1564  	}
1565  
1566  	bfqq = bfq_entity_to_bfqq(entity);
1567  
1568  	/*
1569  	 * We can finally update all next-to-serve entities along the
1570  	 * path from the leaf entity just set in service to the root.
1571  	 */
1572  	for_each_entity(entity) {
1573  		struct bfq_sched_data *sd = entity->sched_data;
1574  
1575  		if (!bfq_update_next_in_service(sd, NULL, false))
1576  			break;
1577  	}
1578  
1579  	return bfqq;
1580  }
1581  
1582  /* returns true if the in-service queue gets freed */
__bfq_bfqd_reset_in_service(struct bfq_data * bfqd)1583  bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
1584  {
1585  	struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
1586  	struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
1587  	struct bfq_entity *entity = in_serv_entity;
1588  
1589  	bfq_clear_bfqq_wait_request(in_serv_bfqq);
1590  	hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
1591  	bfqd->in_service_queue = NULL;
1592  
1593  	/*
1594  	 * When this function is called, all in-service entities have
1595  	 * been properly deactivated or requeued, so we can safely
1596  	 * execute the final step: reset in_service_entity along the
1597  	 * path from entity to the root.
1598  	 */
1599  	for_each_entity(entity)
1600  		entity->sched_data->in_service_entity = NULL;
1601  
1602  	/*
1603  	 * in_serv_entity is no longer in service, so, if it is in no
1604  	 * service tree either, then release the service reference to
1605  	 * the queue it represents (taken with bfq_get_entity).
1606  	 */
1607  	if (!in_serv_entity->on_st_or_in_serv) {
1608  		/*
1609  		 * If no process is referencing in_serv_bfqq any
1610  		 * longer, then the service reference may be the only
1611  		 * reference to the queue. If this is the case, then
1612  		 * bfqq gets freed here.
1613  		 */
1614  		int ref = in_serv_bfqq->ref;
1615  		bfq_put_queue(in_serv_bfqq);
1616  		if (ref == 1)
1617  			return true;
1618  	}
1619  
1620  	return false;
1621  }
1622  
bfq_deactivate_bfqq(struct bfq_data * bfqd,struct bfq_queue * bfqq,bool ins_into_idle_tree,bool expiration)1623  void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1624  			 bool ins_into_idle_tree, bool expiration)
1625  {
1626  	struct bfq_entity *entity = &bfqq->entity;
1627  
1628  	bfq_deactivate_entity(entity, ins_into_idle_tree, expiration);
1629  }
1630  
bfq_activate_bfqq(struct bfq_data * bfqd,struct bfq_queue * bfqq)1631  void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1632  {
1633  	struct bfq_entity *entity = &bfqq->entity;
1634  
1635  	bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq),
1636  				    false, false);
1637  	bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1638  }
1639  
bfq_requeue_bfqq(struct bfq_data * bfqd,struct bfq_queue * bfqq,bool expiration)1640  void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1641  		      bool expiration)
1642  {
1643  	struct bfq_entity *entity = &bfqq->entity;
1644  
1645  	bfq_activate_requeue_entity(entity, false,
1646  				    bfqq == bfqd->in_service_queue, expiration);
1647  }
1648  
1649  /*
1650   * Called when the bfqq no longer has requests pending, remove it from
1651   * the service tree. As a special case, it can be invoked during an
1652   * expiration.
1653   */
bfq_del_bfqq_busy(struct bfq_queue * bfqq,bool expiration)1654  void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration)
1655  {
1656  	struct bfq_data *bfqd = bfqq->bfqd;
1657  
1658  	bfq_log_bfqq(bfqd, bfqq, "del from busy");
1659  
1660  	bfq_clear_bfqq_busy(bfqq);
1661  
1662  	bfqd->busy_queues[bfqq->ioprio_class - 1]--;
1663  
1664  	if (bfqq->wr_coeff > 1)
1665  		bfqd->wr_busy_queues--;
1666  
1667  	bfqg_stats_update_dequeue(bfqq_group(bfqq));
1668  
1669  	bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1670  
1671  	if (!bfqq->dispatched)
1672  		bfq_weights_tree_remove(bfqd, bfqq);
1673  }
1674  
1675  /*
1676   * Called when an inactive queue receives a new request.
1677   */
bfq_add_bfqq_busy(struct bfq_queue * bfqq)1678  void bfq_add_bfqq_busy(struct bfq_queue *bfqq)
1679  {
1680  	struct bfq_data *bfqd = bfqq->bfqd;
1681  
1682  	bfq_log_bfqq(bfqd, bfqq, "add to busy");
1683  
1684  	bfq_activate_bfqq(bfqd, bfqq);
1685  
1686  	bfq_mark_bfqq_busy(bfqq);
1687  	bfqd->busy_queues[bfqq->ioprio_class - 1]++;
1688  
1689  	if (!bfqq->dispatched)
1690  		if (bfqq->wr_coeff == 1)
1691  			bfq_weights_tree_add(bfqd, bfqq,
1692  					     &bfqd->queue_weights_tree);
1693  
1694  	if (bfqq->wr_coeff > 1)
1695  		bfqd->wr_busy_queues++;
1696  
1697  	/* Move bfqq to the head of the woken list of its waker */
1698  	if (!hlist_unhashed(&bfqq->woken_list_node) &&
1699  	    &bfqq->woken_list_node != bfqq->waker_bfqq->woken_list.first) {
1700  		hlist_del_init(&bfqq->woken_list_node);
1701  		hlist_add_head(&bfqq->woken_list_node,
1702  			       &bfqq->waker_bfqq->woken_list);
1703  	}
1704  }
1705