• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * cgroups support for the BFQ I/O scheduler.
4  */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/blkdev.h>
8 #include <linux/cgroup.h>
9 #include <linux/elevator.h>
10 #include <linux/ktime.h>
11 #include <linux/rbtree.h>
12 #include <linux/ioprio.h>
13 #include <linux/sbitmap.h>
14 #include <linux/delay.h>
15 
16 #include "bfq-iosched.h"
17 
18 #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfq_stat_init(struct bfq_stat * stat,gfp_t gfp)19 static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20 {
21 	int ret;
22 
23 	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 	if (ret)
25 		return ret;
26 
27 	atomic64_set(&stat->aux_cnt, 0);
28 	return 0;
29 }
30 
bfq_stat_exit(struct bfq_stat * stat)31 static void bfq_stat_exit(struct bfq_stat *stat)
32 {
33 	percpu_counter_destroy(&stat->cpu_cnt);
34 }
35 
36 /**
37  * bfq_stat_add - add a value to a bfq_stat
38  * @stat: target bfq_stat
39  * @val: value to add
40  *
41  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
42  * don't re-enter this function for the same counter.
43  */
bfq_stat_add(struct bfq_stat * stat,uint64_t val)44 static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45 {
46 	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47 }
48 
49 /**
50  * bfq_stat_read - read the current value of a bfq_stat
51  * @stat: bfq_stat to read
52  */
bfq_stat_read(struct bfq_stat * stat)53 static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54 {
55 	return percpu_counter_sum_positive(&stat->cpu_cnt);
56 }
57 
58 /**
59  * bfq_stat_reset - reset a bfq_stat
60  * @stat: bfq_stat to reset
61  */
bfq_stat_reset(struct bfq_stat * stat)62 static inline void bfq_stat_reset(struct bfq_stat *stat)
63 {
64 	percpu_counter_set(&stat->cpu_cnt, 0);
65 	atomic64_set(&stat->aux_cnt, 0);
66 }
67 
68 /**
69  * bfq_stat_add_aux - add a bfq_stat into another's aux count
70  * @to: the destination bfq_stat
71  * @from: the source
72  *
73  * Add @from's count including the aux one to @to's aux count.
74  */
bfq_stat_add_aux(struct bfq_stat * to,struct bfq_stat * from)75 static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 				     struct bfq_stat *from)
77 {
78 	atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 		     &to->aux_cnt);
80 }
81 
82 /**
83  * blkg_prfill_stat - prfill callback for bfq_stat
84  * @sf: seq_file to print to
85  * @pd: policy private data of interest
86  * @off: offset to the bfq_stat in @pd
87  *
88  * prfill callback for printing a bfq_stat.
89  */
blkg_prfill_stat(struct seq_file * sf,struct blkg_policy_data * pd,int off)90 static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 		int off)
92 {
93 	return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94 }
95 
96 /* bfqg stats flags */
97 enum bfqg_stats_flags {
98 	BFQG_stats_waiting = 0,
99 	BFQG_stats_idling,
100 	BFQG_stats_empty,
101 };
102 
103 #define BFQG_FLAG_FNS(name)						\
104 static void bfqg_stats_mark_##name(struct bfqg_stats *stats)	\
105 {									\
106 	stats->flags |= (1 << BFQG_stats_##name);			\
107 }									\
108 static void bfqg_stats_clear_##name(struct bfqg_stats *stats)	\
109 {									\
110 	stats->flags &= ~(1 << BFQG_stats_##name);			\
111 }									\
112 static int bfqg_stats_##name(struct bfqg_stats *stats)		\
113 {									\
114 	return (stats->flags & (1 << BFQG_stats_##name)) != 0;		\
115 }									\
116 
117 BFQG_FLAG_FNS(waiting)
BFQG_FLAG_FNS(idling)118 BFQG_FLAG_FNS(idling)
119 BFQG_FLAG_FNS(empty)
120 #undef BFQG_FLAG_FNS
121 
122 /* This should be called with the scheduler lock held. */
123 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124 {
125 	u64 now;
126 
127 	if (!bfqg_stats_waiting(stats))
128 		return;
129 
130 	now = ktime_get_ns();
131 	if (now > stats->start_group_wait_time)
132 		bfq_stat_add(&stats->group_wait_time,
133 			      now - stats->start_group_wait_time);
134 	bfqg_stats_clear_waiting(stats);
135 }
136 
137 /* This should be called with the scheduler lock held. */
bfqg_stats_set_start_group_wait_time(struct bfq_group * bfqg,struct bfq_group * curr_bfqg)138 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 						 struct bfq_group *curr_bfqg)
140 {
141 	struct bfqg_stats *stats = &bfqg->stats;
142 
143 	if (bfqg_stats_waiting(stats))
144 		return;
145 	if (bfqg == curr_bfqg)
146 		return;
147 	stats->start_group_wait_time = ktime_get_ns();
148 	bfqg_stats_mark_waiting(stats);
149 }
150 
151 /* This should be called with the scheduler lock held. */
bfqg_stats_end_empty_time(struct bfqg_stats * stats)152 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153 {
154 	u64 now;
155 
156 	if (!bfqg_stats_empty(stats))
157 		return;
158 
159 	now = ktime_get_ns();
160 	if (now > stats->start_empty_time)
161 		bfq_stat_add(&stats->empty_time,
162 			      now - stats->start_empty_time);
163 	bfqg_stats_clear_empty(stats);
164 }
165 
bfqg_stats_update_dequeue(struct bfq_group * bfqg)166 void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167 {
168 	bfq_stat_add(&bfqg->stats.dequeue, 1);
169 }
170 
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)171 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172 {
173 	struct bfqg_stats *stats = &bfqg->stats;
174 
175 	if (blkg_rwstat_total(&stats->queued))
176 		return;
177 
178 	/*
179 	 * group is already marked empty. This can happen if bfqq got new
180 	 * request in parent group and moved to this group while being added
181 	 * to service tree. Just ignore the event and move on.
182 	 */
183 	if (bfqg_stats_empty(stats))
184 		return;
185 
186 	stats->start_empty_time = ktime_get_ns();
187 	bfqg_stats_mark_empty(stats);
188 }
189 
bfqg_stats_update_idle_time(struct bfq_group * bfqg)190 void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191 {
192 	struct bfqg_stats *stats = &bfqg->stats;
193 
194 	if (bfqg_stats_idling(stats)) {
195 		u64 now = ktime_get_ns();
196 
197 		if (now > stats->start_idle_time)
198 			bfq_stat_add(&stats->idle_time,
199 				      now - stats->start_idle_time);
200 		bfqg_stats_clear_idling(stats);
201 	}
202 }
203 
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)204 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205 {
206 	struct bfqg_stats *stats = &bfqg->stats;
207 
208 	stats->start_idle_time = ktime_get_ns();
209 	bfqg_stats_mark_idling(stats);
210 }
211 
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)212 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213 {
214 	struct bfqg_stats *stats = &bfqg->stats;
215 
216 	bfq_stat_add(&stats->avg_queue_size_sum,
217 		      blkg_rwstat_total(&stats->queued));
218 	bfq_stat_add(&stats->avg_queue_size_samples, 1);
219 	bfqg_stats_update_group_wait_time(stats);
220 }
221 
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,unsigned int op)222 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
223 			      unsigned int op)
224 {
225 	blkg_rwstat_add(&bfqg->stats.queued, op, 1);
226 	bfqg_stats_end_empty_time(&bfqg->stats);
227 	if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
228 		bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229 }
230 
bfqg_stats_update_io_remove(struct bfq_group * bfqg,unsigned int op)231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
232 {
233 	blkg_rwstat_add(&bfqg->stats.queued, op, -1);
234 }
235 
bfqg_stats_update_io_merged(struct bfq_group * bfqg,unsigned int op)236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
237 {
238 	blkg_rwstat_add(&bfqg->stats.merged, op, 1);
239 }
240 
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,unsigned int op)241 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
242 				  u64 io_start_time_ns, unsigned int op)
243 {
244 	struct bfqg_stats *stats = &bfqg->stats;
245 	u64 now = ktime_get_ns();
246 
247 	if (now > io_start_time_ns)
248 		blkg_rwstat_add(&stats->service_time, op,
249 				now - io_start_time_ns);
250 	if (io_start_time_ns > start_time_ns)
251 		blkg_rwstat_add(&stats->wait_time, op,
252 				io_start_time_ns - start_time_ns);
253 }
254 
255 #else /* CONFIG_BFQ_CGROUP_DEBUG */
256 
bfqg_stats_update_io_add(struct bfq_group * bfqg,struct bfq_queue * bfqq,unsigned int op)257 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
258 			      unsigned int op) { }
bfqg_stats_update_io_remove(struct bfq_group * bfqg,unsigned int op)259 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
bfqg_stats_update_io_merged(struct bfq_group * bfqg,unsigned int op)260 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,unsigned int op)261 void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
262 				  u64 io_start_time_ns, unsigned int op) { }
bfqg_stats_update_dequeue(struct bfq_group * bfqg)263 void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
bfqg_stats_set_start_empty_time(struct bfq_group * bfqg)264 void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
bfqg_stats_update_idle_time(struct bfq_group * bfqg)265 void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
bfqg_stats_set_start_idle_time(struct bfq_group * bfqg)266 void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
bfqg_stats_update_avg_queue_size(struct bfq_group * bfqg)267 void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
268 
269 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
270 
271 #ifdef CONFIG_BFQ_GROUP_IOSCHED
272 
273 /*
274  * blk-cgroup policy-related handlers
275  * The following functions help in converting between blk-cgroup
276  * internal structures and BFQ-specific structures.
277  */
278 
pd_to_bfqg(struct blkg_policy_data * pd)279 static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
280 {
281 	return pd ? container_of(pd, struct bfq_group, pd) : NULL;
282 }
283 
bfqg_to_blkg(struct bfq_group * bfqg)284 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
285 {
286 	return pd_to_blkg(&bfqg->pd);
287 }
288 
blkg_to_bfqg(struct blkcg_gq * blkg)289 static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
290 {
291 	return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
292 }
293 
294 /*
295  * bfq_group handlers
296  * The following functions help in navigating the bfq_group hierarchy
297  * by allowing to find the parent of a bfq_group or the bfq_group
298  * associated to a bfq_queue.
299  */
300 
bfqg_parent(struct bfq_group * bfqg)301 static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
302 {
303 	struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
304 
305 	return pblkg ? blkg_to_bfqg(pblkg) : NULL;
306 }
307 
bfqq_group(struct bfq_queue * bfqq)308 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
309 {
310 	struct bfq_entity *group_entity = bfqq->entity.parent;
311 
312 	return group_entity ? container_of(group_entity, struct bfq_group,
313 					   entity) :
314 			      bfqq->bfqd->root_group;
315 }
316 
317 /*
318  * The following two functions handle get and put of a bfq_group by
319  * wrapping the related blk-cgroup hooks.
320  */
321 
bfqg_get(struct bfq_group * bfqg)322 static void bfqg_get(struct bfq_group *bfqg)
323 {
324 	bfqg->ref++;
325 }
326 
bfqg_put(struct bfq_group * bfqg)327 static void bfqg_put(struct bfq_group *bfqg)
328 {
329 	bfqg->ref--;
330 
331 	if (bfqg->ref == 0)
332 		kfree(bfqg);
333 }
334 
bfqg_and_blkg_get(struct bfq_group * bfqg)335 static void bfqg_and_blkg_get(struct bfq_group *bfqg)
336 {
337 	/* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
338 	bfqg_get(bfqg);
339 
340 	blkg_get(bfqg_to_blkg(bfqg));
341 }
342 
bfqg_and_blkg_put(struct bfq_group * bfqg)343 void bfqg_and_blkg_put(struct bfq_group *bfqg)
344 {
345 	blkg_put(bfqg_to_blkg(bfqg));
346 
347 	bfqg_put(bfqg);
348 }
349 
350 /* @stats = 0 */
bfqg_stats_reset(struct bfqg_stats * stats)351 static void bfqg_stats_reset(struct bfqg_stats *stats)
352 {
353 #ifdef CONFIG_BFQ_CGROUP_DEBUG
354 	/* queued stats shouldn't be cleared */
355 	blkg_rwstat_reset(&stats->merged);
356 	blkg_rwstat_reset(&stats->service_time);
357 	blkg_rwstat_reset(&stats->wait_time);
358 	bfq_stat_reset(&stats->time);
359 	bfq_stat_reset(&stats->avg_queue_size_sum);
360 	bfq_stat_reset(&stats->avg_queue_size_samples);
361 	bfq_stat_reset(&stats->dequeue);
362 	bfq_stat_reset(&stats->group_wait_time);
363 	bfq_stat_reset(&stats->idle_time);
364 	bfq_stat_reset(&stats->empty_time);
365 #endif
366 }
367 
368 /* @to += @from */
bfqg_stats_add_aux(struct bfqg_stats * to,struct bfqg_stats * from)369 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
370 {
371 	if (!to || !from)
372 		return;
373 
374 #ifdef CONFIG_BFQ_CGROUP_DEBUG
375 	/* queued stats shouldn't be cleared */
376 	blkg_rwstat_add_aux(&to->merged, &from->merged);
377 	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
378 	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
379 	bfq_stat_add_aux(&from->time, &from->time);
380 	bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
381 	bfq_stat_add_aux(&to->avg_queue_size_samples,
382 			  &from->avg_queue_size_samples);
383 	bfq_stat_add_aux(&to->dequeue, &from->dequeue);
384 	bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
385 	bfq_stat_add_aux(&to->idle_time, &from->idle_time);
386 	bfq_stat_add_aux(&to->empty_time, &from->empty_time);
387 #endif
388 }
389 
390 /*
391  * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
392  * recursive stats can still account for the amount used by this bfqg after
393  * it's gone.
394  */
bfqg_stats_xfer_dead(struct bfq_group * bfqg)395 static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
396 {
397 	struct bfq_group *parent;
398 
399 	if (!bfqg) /* root_group */
400 		return;
401 
402 	parent = bfqg_parent(bfqg);
403 
404 	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
405 
406 	if (unlikely(!parent))
407 		return;
408 
409 	bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
410 	bfqg_stats_reset(&bfqg->stats);
411 }
412 
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)413 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
414 {
415 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
416 
417 	entity->weight = entity->new_weight;
418 	entity->orig_weight = entity->new_weight;
419 	if (bfqq) {
420 		bfqq->ioprio = bfqq->new_ioprio;
421 		bfqq->ioprio_class = bfqq->new_ioprio_class;
422 		/*
423 		 * Make sure that bfqg and its associated blkg do not
424 		 * disappear before entity.
425 		 */
426 		bfqg_and_blkg_get(bfqg);
427 	}
428 	entity->parent = bfqg->my_entity; /* NULL for root group */
429 	entity->sched_data = &bfqg->sched_data;
430 }
431 
bfqg_stats_exit(struct bfqg_stats * stats)432 static void bfqg_stats_exit(struct bfqg_stats *stats)
433 {
434 #ifdef CONFIG_BFQ_CGROUP_DEBUG
435 	blkg_rwstat_exit(&stats->merged);
436 	blkg_rwstat_exit(&stats->service_time);
437 	blkg_rwstat_exit(&stats->wait_time);
438 	blkg_rwstat_exit(&stats->queued);
439 	bfq_stat_exit(&stats->time);
440 	bfq_stat_exit(&stats->avg_queue_size_sum);
441 	bfq_stat_exit(&stats->avg_queue_size_samples);
442 	bfq_stat_exit(&stats->dequeue);
443 	bfq_stat_exit(&stats->group_wait_time);
444 	bfq_stat_exit(&stats->idle_time);
445 	bfq_stat_exit(&stats->empty_time);
446 #endif
447 }
448 
bfqg_stats_init(struct bfqg_stats * stats,gfp_t gfp)449 static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
450 {
451 #ifdef CONFIG_BFQ_CGROUP_DEBUG
452 	if (blkg_rwstat_init(&stats->merged, gfp) ||
453 	    blkg_rwstat_init(&stats->service_time, gfp) ||
454 	    blkg_rwstat_init(&stats->wait_time, gfp) ||
455 	    blkg_rwstat_init(&stats->queued, gfp) ||
456 	    bfq_stat_init(&stats->time, gfp) ||
457 	    bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
458 	    bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
459 	    bfq_stat_init(&stats->dequeue, gfp) ||
460 	    bfq_stat_init(&stats->group_wait_time, gfp) ||
461 	    bfq_stat_init(&stats->idle_time, gfp) ||
462 	    bfq_stat_init(&stats->empty_time, gfp)) {
463 		bfqg_stats_exit(stats);
464 		return -ENOMEM;
465 	}
466 #endif
467 
468 	return 0;
469 }
470 
cpd_to_bfqgd(struct blkcg_policy_data * cpd)471 static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
472 {
473 	return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
474 }
475 
blkcg_to_bfqgd(struct blkcg * blkcg)476 static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
477 {
478 	return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
479 }
480 
bfq_cpd_alloc(gfp_t gfp)481 static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
482 {
483 	struct bfq_group_data *bgd;
484 
485 	bgd = kzalloc(sizeof(*bgd), gfp);
486 	if (!bgd)
487 		return NULL;
488 	return &bgd->pd;
489 }
490 
bfq_cpd_init(struct blkcg_policy_data * cpd)491 static void bfq_cpd_init(struct blkcg_policy_data *cpd)
492 {
493 	struct bfq_group_data *d = cpd_to_bfqgd(cpd);
494 
495 	d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
496 		CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
497 }
498 
bfq_cpd_free(struct blkcg_policy_data * cpd)499 static void bfq_cpd_free(struct blkcg_policy_data *cpd)
500 {
501 	kfree(cpd_to_bfqgd(cpd));
502 }
503 
bfq_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)504 static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
505 					     struct blkcg *blkcg)
506 {
507 	struct bfq_group *bfqg;
508 
509 	bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
510 	if (!bfqg)
511 		return NULL;
512 
513 	if (bfqg_stats_init(&bfqg->stats, gfp)) {
514 		kfree(bfqg);
515 		return NULL;
516 	}
517 
518 	/* see comments in bfq_bic_update_cgroup for why refcounting */
519 	bfqg_get(bfqg);
520 	return &bfqg->pd;
521 }
522 
bfq_pd_init(struct blkg_policy_data * pd)523 static void bfq_pd_init(struct blkg_policy_data *pd)
524 {
525 	struct blkcg_gq *blkg = pd_to_blkg(pd);
526 	struct bfq_group *bfqg = blkg_to_bfqg(blkg);
527 	struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
528 	struct bfq_entity *entity = &bfqg->entity;
529 	struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
530 
531 	entity->orig_weight = entity->weight = entity->new_weight = d->weight;
532 	entity->my_sched_data = &bfqg->sched_data;
533 	bfqg->my_entity = entity; /*
534 				   * the root_group's will be set to NULL
535 				   * in bfq_init_queue()
536 				   */
537 	bfqg->bfqd = bfqd;
538 	bfqg->active_entities = 0;
539 	bfqg->online = true;
540 	bfqg->rq_pos_tree = RB_ROOT;
541 }
542 
bfq_pd_free(struct blkg_policy_data * pd)543 static void bfq_pd_free(struct blkg_policy_data *pd)
544 {
545 	struct bfq_group *bfqg = pd_to_bfqg(pd);
546 
547 	bfqg_stats_exit(&bfqg->stats);
548 	bfqg_put(bfqg);
549 }
550 
bfq_pd_reset_stats(struct blkg_policy_data * pd)551 static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
552 {
553 	struct bfq_group *bfqg = pd_to_bfqg(pd);
554 
555 	bfqg_stats_reset(&bfqg->stats);
556 }
557 
bfq_group_set_parent(struct bfq_group * bfqg,struct bfq_group * parent)558 static void bfq_group_set_parent(struct bfq_group *bfqg,
559 					struct bfq_group *parent)
560 {
561 	struct bfq_entity *entity;
562 
563 	entity = &bfqg->entity;
564 	entity->parent = parent->my_entity;
565 	entity->sched_data = &parent->sched_data;
566 }
567 
bfq_link_bfqg(struct bfq_data * bfqd,struct bfq_group * bfqg)568 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
569 {
570 	struct bfq_group *parent;
571 	struct bfq_entity *entity;
572 
573 	/*
574 	 * Update chain of bfq_groups as we might be handling a leaf group
575 	 * which, along with some of its relatives, has not been hooked yet
576 	 * to the private hierarchy of BFQ.
577 	 */
578 	entity = &bfqg->entity;
579 	for_each_entity(entity) {
580 		struct bfq_group *curr_bfqg = container_of(entity,
581 						struct bfq_group, entity);
582 		if (curr_bfqg != bfqd->root_group) {
583 			parent = bfqg_parent(curr_bfqg);
584 			if (!parent)
585 				parent = bfqd->root_group;
586 			bfq_group_set_parent(curr_bfqg, parent);
587 		}
588 	}
589 }
590 
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)591 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
592 {
593 	struct blkcg_gq *blkg = bio->bi_blkg;
594 	struct bfq_group *bfqg;
595 
596 	while (blkg) {
597 		if (!blkg->online) {
598 			blkg = blkg->parent;
599 			continue;
600 		}
601 		bfqg = blkg_to_bfqg(blkg);
602 		if (bfqg->online) {
603 			bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
604 			return bfqg;
605 		}
606 		blkg = blkg->parent;
607 	}
608 	bio_associate_blkg_from_css(bio,
609 				&bfqg_to_blkg(bfqd->root_group)->blkcg->css);
610 	return bfqd->root_group;
611 }
612 
613 /**
614  * bfq_bfqq_move - migrate @bfqq to @bfqg.
615  * @bfqd: queue descriptor.
616  * @bfqq: the queue to move.
617  * @bfqg: the group to move to.
618  *
619  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
620  * it on the new one.  Avoid putting the entity on the old group idle tree.
621  *
622  * Must be called under the scheduler lock, to make sure that the blkg
623  * owning @bfqg does not disappear (see comments in
624  * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
625  * objects).
626  */
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)627 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
628 		   struct bfq_group *bfqg)
629 {
630 	struct bfq_entity *entity = &bfqq->entity;
631 
632 	/*
633 	 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
634 	 * until elevator exit.
635 	 */
636 	if (bfqq == &bfqd->oom_bfqq)
637 		return;
638 	/*
639 	 * Get extra reference to prevent bfqq from being freed in
640 	 * next possible expire or deactivate.
641 	 */
642 	bfqq->ref++;
643 
644 	/* If bfqq is empty, then bfq_bfqq_expire also invokes
645 	 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
646 	 * from data structures related to current group. Otherwise we
647 	 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
648 	 * we do below.
649 	 */
650 	if (bfqq == bfqd->in_service_queue)
651 		bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
652 				false, BFQQE_PREEMPTED);
653 
654 	if (bfq_bfqq_busy(bfqq))
655 		bfq_deactivate_bfqq(bfqd, bfqq, false, false);
656 	else if (entity->on_st)
657 		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
658 	bfqg_and_blkg_put(bfqq_group(bfqq));
659 
660 	entity->parent = bfqg->my_entity;
661 	entity->sched_data = &bfqg->sched_data;
662 	/* pin down bfqg and its associated blkg  */
663 	bfqg_and_blkg_get(bfqg);
664 
665 	if (bfq_bfqq_busy(bfqq)) {
666 		if (unlikely(!bfqd->nonrot_with_queueing))
667 			bfq_pos_tree_add_move(bfqd, bfqq);
668 		bfq_activate_bfqq(bfqd, bfqq);
669 	}
670 
671 	if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
672 		bfq_schedule_dispatch(bfqd);
673 	/* release extra ref taken above, bfqq may happen to be freed now */
674 	bfq_put_queue(bfqq);
675 }
676 
677 /**
678  * __bfq_bic_change_cgroup - move @bic to @cgroup.
679  * @bfqd: the queue descriptor.
680  * @bic: the bic to move.
681  * @blkcg: the blk-cgroup to move to.
682  *
683  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
684  * sure that the reference to cgroup is valid across the call (see
685  * comments in bfq_bic_update_cgroup on this issue)
686  */
__bfq_bic_change_cgroup(struct bfq_data * bfqd,struct bfq_io_cq * bic,struct bfq_group * bfqg)687 static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
688 				     struct bfq_io_cq *bic,
689 				     struct bfq_group *bfqg)
690 {
691 	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
692 	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
693 	struct bfq_entity *entity;
694 
695 	if (async_bfqq) {
696 		entity = &async_bfqq->entity;
697 
698 		if (entity->sched_data != &bfqg->sched_data) {
699 			bic_set_bfqq(bic, NULL, 0);
700 			bfq_release_process_ref(bfqd, async_bfqq);
701 		}
702 	}
703 
704 	if (sync_bfqq) {
705 		if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
706 			/* We are the only user of this bfqq, just move it */
707 			if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
708 				bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
709 		} else {
710 			struct bfq_queue *bfqq;
711 
712 			/*
713 			 * The queue was merged to a different queue. Check
714 			 * that the merge chain still belongs to the same
715 			 * cgroup.
716 			 */
717 			for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
718 				if (bfqq->entity.sched_data !=
719 				    &bfqg->sched_data)
720 					break;
721 			if (bfqq) {
722 				/*
723 				 * Some queue changed cgroup so the merge is
724 				 * not valid anymore. We cannot easily just
725 				 * cancel the merge (by clearing new_bfqq) as
726 				 * there may be other processes using this
727 				 * queue and holding refs to all queues below
728 				 * sync_bfqq->new_bfqq. Similarly if the merge
729 				 * already happened, we need to detach from
730 				 * bfqq now so that we cannot merge bio to a
731 				 * request from the old cgroup.
732 				 */
733 				bfq_put_cooperator(sync_bfqq);
734 				bfq_release_process_ref(bfqd, sync_bfqq);
735 				bic_set_bfqq(bic, NULL, 1);
736 			}
737 		}
738 	}
739 
740 	return bfqg;
741 }
742 
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)743 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
744 {
745 	struct bfq_data *bfqd = bic_to_bfqd(bic);
746 	struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
747 	uint64_t serial_nr;
748 
749 	serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
750 
751 	/*
752 	 * Check whether blkcg has changed.  The condition may trigger
753 	 * spuriously on a newly created cic but there's no harm.
754 	 */
755 	if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
756 		return;
757 
758 	/*
759 	 * New cgroup for this process. Make sure it is linked to bfq internal
760 	 * cgroup hierarchy.
761 	 */
762 	bfq_link_bfqg(bfqd, bfqg);
763 	__bfq_bic_change_cgroup(bfqd, bic, bfqg);
764 	/*
765 	 * Update blkg_path for bfq_log_* functions. We cache this
766 	 * path, and update it here, for the following
767 	 * reasons. Operations on blkg objects in blk-cgroup are
768 	 * protected with the request_queue lock, and not with the
769 	 * lock that protects the instances of this scheduler
770 	 * (bfqd->lock). This exposes BFQ to the following sort of
771 	 * race.
772 	 *
773 	 * The blkg_lookup performed in bfq_get_queue, protected
774 	 * through rcu, may happen to return the address of a copy of
775 	 * the original blkg. If this is the case, then the
776 	 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
777 	 * the blkg, is useless: it does not prevent blk-cgroup code
778 	 * from destroying both the original blkg and all objects
779 	 * directly or indirectly referred by the copy of the
780 	 * blkg.
781 	 *
782 	 * On the bright side, destroy operations on a blkg invoke, as
783 	 * a first step, hooks of the scheduler associated with the
784 	 * blkg. And these hooks are executed with bfqd->lock held for
785 	 * BFQ. As a consequence, for any blkg associated with the
786 	 * request queue this instance of the scheduler is attached
787 	 * to, we are guaranteed that such a blkg is not destroyed, and
788 	 * that all the pointers it contains are consistent, while we
789 	 * are holding bfqd->lock. A blkg_lookup performed with
790 	 * bfqd->lock held then returns a fully consistent blkg, which
791 	 * remains consistent until this lock is held.
792 	 *
793 	 * Thanks to the last fact, and to the fact that: (1) bfqg has
794 	 * been obtained through a blkg_lookup in the above
795 	 * assignment, and (2) bfqd->lock is being held, here we can
796 	 * safely use the policy data for the involved blkg (i.e., the
797 	 * field bfqg->pd) to get to the blkg associated with bfqg,
798 	 * and then we can safely use any field of blkg. After we
799 	 * release bfqd->lock, even just getting blkg through this
800 	 * bfqg may cause dangling references to be traversed, as
801 	 * bfqg->pd may not exist any more.
802 	 *
803 	 * In view of the above facts, here we cache, in the bfqg, any
804 	 * blkg data we may need for this bic, and for its associated
805 	 * bfq_queue. As of now, we need to cache only the path of the
806 	 * blkg, which is used in the bfq_log_* functions.
807 	 *
808 	 * Finally, note that bfqg itself needs to be protected from
809 	 * destruction on the blkg_free of the original blkg (which
810 	 * invokes bfq_pd_free). We use an additional private
811 	 * refcounter for bfqg, to let it disappear only after no
812 	 * bfq_queue refers to it any longer.
813 	 */
814 	blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
815 	bic->blkcg_serial_nr = serial_nr;
816 }
817 
818 /**
819  * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
820  * @st: the service tree being flushed.
821  */
bfq_flush_idle_tree(struct bfq_service_tree * st)822 static void bfq_flush_idle_tree(struct bfq_service_tree *st)
823 {
824 	struct bfq_entity *entity = st->first_idle;
825 
826 	for (; entity ; entity = st->first_idle)
827 		__bfq_deactivate_entity(entity, false);
828 }
829 
830 /**
831  * bfq_reparent_leaf_entity - move leaf entity to the root_group.
832  * @bfqd: the device data structure with the root group.
833  * @entity: the entity to move, if entity is a leaf; or the parent entity
834  *	    of an active leaf entity to move, if entity is not a leaf.
835  */
bfq_reparent_leaf_entity(struct bfq_data * bfqd,struct bfq_entity * entity,int ioprio_class)836 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
837 				     struct bfq_entity *entity,
838 				     int ioprio_class)
839 {
840 	struct bfq_queue *bfqq;
841 	struct bfq_entity *child_entity = entity;
842 
843 	while (child_entity->my_sched_data) { /* leaf not reached yet */
844 		struct bfq_sched_data *child_sd = child_entity->my_sched_data;
845 		struct bfq_service_tree *child_st = child_sd->service_tree +
846 			ioprio_class;
847 		struct rb_root *child_active = &child_st->active;
848 
849 		child_entity = bfq_entity_of(rb_first(child_active));
850 
851 		if (!child_entity)
852 			child_entity = child_sd->in_service_entity;
853 	}
854 
855 	bfqq = bfq_entity_to_bfqq(child_entity);
856 	bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
857 }
858 
859 /**
860  * bfq_reparent_active_queues - move to the root group all active queues.
861  * @bfqd: the device data structure with the root group.
862  * @bfqg: the group to move from.
863  * @st: the service tree to start the search from.
864  */
bfq_reparent_active_queues(struct bfq_data * bfqd,struct bfq_group * bfqg,struct bfq_service_tree * st,int ioprio_class)865 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
866 				       struct bfq_group *bfqg,
867 				       struct bfq_service_tree *st,
868 				       int ioprio_class)
869 {
870 	struct rb_root *active = &st->active;
871 	struct bfq_entity *entity;
872 
873 	while ((entity = bfq_entity_of(rb_first(active))))
874 		bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
875 
876 	if (bfqg->sched_data.in_service_entity)
877 		bfq_reparent_leaf_entity(bfqd,
878 					 bfqg->sched_data.in_service_entity,
879 					 ioprio_class);
880 }
881 
882 /**
883  * bfq_pd_offline - deactivate the entity associated with @pd,
884  *		    and reparent its children entities.
885  * @pd: descriptor of the policy going offline.
886  *
887  * blkio already grabs the queue_lock for us, so no need to use
888  * RCU-based magic
889  */
bfq_pd_offline(struct blkg_policy_data * pd)890 static void bfq_pd_offline(struct blkg_policy_data *pd)
891 {
892 	struct bfq_service_tree *st;
893 	struct bfq_group *bfqg = pd_to_bfqg(pd);
894 	struct bfq_data *bfqd = bfqg->bfqd;
895 	struct bfq_entity *entity = bfqg->my_entity;
896 	unsigned long flags;
897 	int i;
898 
899 	spin_lock_irqsave(&bfqd->lock, flags);
900 
901 	if (!entity) /* root group */
902 		goto put_async_queues;
903 
904 	/*
905 	 * Empty all service_trees belonging to this group before
906 	 * deactivating the group itself.
907 	 */
908 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
909 		st = bfqg->sched_data.service_tree + i;
910 
911 		/*
912 		 * It may happen that some queues are still active
913 		 * (busy) upon group destruction (if the corresponding
914 		 * processes have been forced to terminate). We move
915 		 * all the leaf entities corresponding to these queues
916 		 * to the root_group.
917 		 * Also, it may happen that the group has an entity
918 		 * in service, which is disconnected from the active
919 		 * tree: it must be moved, too.
920 		 * There is no need to put the sync queues, as the
921 		 * scheduler has taken no reference.
922 		 */
923 		bfq_reparent_active_queues(bfqd, bfqg, st, i);
924 
925 		/*
926 		 * The idle tree may still contain bfq_queues
927 		 * belonging to exited task because they never
928 		 * migrated to a different cgroup from the one being
929 		 * destroyed now. In addition, even
930 		 * bfq_reparent_active_queues() may happen to add some
931 		 * entities to the idle tree. It happens if, in some
932 		 * of the calls to bfq_bfqq_move() performed by
933 		 * bfq_reparent_active_queues(), the queue to move is
934 		 * empty and gets expired.
935 		 */
936 		bfq_flush_idle_tree(st);
937 	}
938 
939 	__bfq_deactivate_entity(entity, false);
940 
941 put_async_queues:
942 	bfq_put_async_queues(bfqd, bfqg);
943 	bfqg->online = false;
944 
945 	spin_unlock_irqrestore(&bfqd->lock, flags);
946 	/*
947 	 * @blkg is going offline and will be ignored by
948 	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
949 	 * that they don't get lost.  If IOs complete after this point, the
950 	 * stats for them will be lost.  Oh well...
951 	 */
952 	bfqg_stats_xfer_dead(bfqg);
953 }
954 
bfq_end_wr_async(struct bfq_data * bfqd)955 void bfq_end_wr_async(struct bfq_data *bfqd)
956 {
957 	struct blkcg_gq *blkg;
958 
959 	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
960 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
961 
962 		bfq_end_wr_async_queues(bfqd, bfqg);
963 	}
964 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
965 }
966 
bfq_io_show_weight_legacy(struct seq_file * sf,void * v)967 static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
968 {
969 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
970 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
971 	unsigned int val = 0;
972 
973 	if (bfqgd)
974 		val = bfqgd->weight;
975 
976 	seq_printf(sf, "%u\n", val);
977 
978 	return 0;
979 }
980 
bfqg_prfill_weight_device(struct seq_file * sf,struct blkg_policy_data * pd,int off)981 static u64 bfqg_prfill_weight_device(struct seq_file *sf,
982 				     struct blkg_policy_data *pd, int off)
983 {
984 	struct bfq_group *bfqg = pd_to_bfqg(pd);
985 
986 	if (!bfqg->entity.dev_weight)
987 		return 0;
988 	return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
989 }
990 
bfq_io_show_weight(struct seq_file * sf,void * v)991 static int bfq_io_show_weight(struct seq_file *sf, void *v)
992 {
993 	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
994 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
995 
996 	seq_printf(sf, "default %u\n", bfqgd->weight);
997 	blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
998 			  &blkcg_policy_bfq, 0, false);
999 	return 0;
1000 }
1001 
bfq_group_set_weight(struct bfq_group * bfqg,u64 weight,u64 dev_weight)1002 static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1003 {
1004 	weight = dev_weight ?: weight;
1005 
1006 	bfqg->entity.dev_weight = dev_weight;
1007 	/*
1008 	 * Setting the prio_changed flag of the entity
1009 	 * to 1 with new_weight == weight would re-set
1010 	 * the value of the weight to its ioprio mapping.
1011 	 * Set the flag only if necessary.
1012 	 */
1013 	if ((unsigned short)weight != bfqg->entity.new_weight) {
1014 		bfqg->entity.new_weight = (unsigned short)weight;
1015 		/*
1016 		 * Make sure that the above new value has been
1017 		 * stored in bfqg->entity.new_weight before
1018 		 * setting the prio_changed flag. In fact,
1019 		 * this flag may be read asynchronously (in
1020 		 * critical sections protected by a different
1021 		 * lock than that held here), and finding this
1022 		 * flag set may cause the execution of the code
1023 		 * for updating parameters whose value may
1024 		 * depend also on bfqg->entity.new_weight (in
1025 		 * __bfq_entity_update_weight_prio).
1026 		 * This barrier makes sure that the new value
1027 		 * of bfqg->entity.new_weight is correctly
1028 		 * seen in that code.
1029 		 */
1030 		smp_wmb();
1031 		bfqg->entity.prio_changed = 1;
1032 	}
1033 }
1034 
bfq_io_set_weight_legacy(struct cgroup_subsys_state * css,struct cftype * cftype,u64 val)1035 static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1036 				    struct cftype *cftype,
1037 				    u64 val)
1038 {
1039 	struct blkcg *blkcg = css_to_blkcg(css);
1040 	struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1041 	struct blkcg_gq *blkg;
1042 	int ret = -ERANGE;
1043 
1044 	if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1045 		return ret;
1046 
1047 	ret = 0;
1048 	spin_lock_irq(&blkcg->lock);
1049 	bfqgd->weight = (unsigned short)val;
1050 	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1051 		struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1052 
1053 		if (bfqg)
1054 			bfq_group_set_weight(bfqg, val, 0);
1055 	}
1056 	spin_unlock_irq(&blkcg->lock);
1057 
1058 	return ret;
1059 }
1060 
bfq_io_set_device_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1061 static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1062 					char *buf, size_t nbytes,
1063 					loff_t off)
1064 {
1065 	int ret;
1066 	struct blkg_conf_ctx ctx;
1067 	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1068 	struct bfq_group *bfqg;
1069 	u64 v;
1070 
1071 	ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
1072 	if (ret)
1073 		return ret;
1074 
1075 	if (sscanf(ctx.body, "%llu", &v) == 1) {
1076 		/* require "default" on dfl */
1077 		ret = -ERANGE;
1078 		if (!v)
1079 			goto out;
1080 	} else if (!strcmp(strim(ctx.body), "default")) {
1081 		v = 0;
1082 	} else {
1083 		ret = -EINVAL;
1084 		goto out;
1085 	}
1086 
1087 	bfqg = blkg_to_bfqg(ctx.blkg);
1088 
1089 	ret = -ERANGE;
1090 	if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1091 		bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1092 		ret = 0;
1093 	}
1094 out:
1095 	blkg_conf_finish(&ctx);
1096 	return ret ?: nbytes;
1097 }
1098 
bfq_io_set_weight(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)1099 static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1100 				 char *buf, size_t nbytes,
1101 				 loff_t off)
1102 {
1103 	char *endp;
1104 	int ret;
1105 	u64 v;
1106 
1107 	buf = strim(buf);
1108 
1109 	/* "WEIGHT" or "default WEIGHT" sets the default weight */
1110 	v = simple_strtoull(buf, &endp, 0);
1111 	if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1112 		ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1113 		return ret ?: nbytes;
1114 	}
1115 
1116 	return bfq_io_set_device_weight(of, buf, nbytes, off);
1117 }
1118 
1119 #ifdef CONFIG_BFQ_CGROUP_DEBUG
bfqg_print_stat(struct seq_file * sf,void * v)1120 static int bfqg_print_stat(struct seq_file *sf, void *v)
1121 {
1122 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1123 			  &blkcg_policy_bfq, seq_cft(sf)->private, false);
1124 	return 0;
1125 }
1126 
bfqg_print_rwstat(struct seq_file * sf,void * v)1127 static int bfqg_print_rwstat(struct seq_file *sf, void *v)
1128 {
1129 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1130 			  &blkcg_policy_bfq, seq_cft(sf)->private, true);
1131 	return 0;
1132 }
1133 
bfqg_prfill_stat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1134 static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1135 				      struct blkg_policy_data *pd, int off)
1136 {
1137 	struct blkcg_gq *blkg = pd_to_blkg(pd);
1138 	struct blkcg_gq *pos_blkg;
1139 	struct cgroup_subsys_state *pos_css;
1140 	u64 sum = 0;
1141 
1142 	lockdep_assert_held(&blkg->q->queue_lock);
1143 
1144 	rcu_read_lock();
1145 	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1146 		struct bfq_stat *stat;
1147 
1148 		if (!pos_blkg->online)
1149 			continue;
1150 
1151 		stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1152 		sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1153 	}
1154 	rcu_read_unlock();
1155 
1156 	return __blkg_prfill_u64(sf, pd, sum);
1157 }
1158 
bfqg_prfill_rwstat_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1159 static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1160 					struct blkg_policy_data *pd, int off)
1161 {
1162 	struct blkg_rwstat_sample sum;
1163 
1164 	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1165 	return __blkg_prfill_rwstat(sf, pd, &sum);
1166 }
1167 
bfqg_print_stat_recursive(struct seq_file * sf,void * v)1168 static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1169 {
1170 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1171 			  bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1172 			  seq_cft(sf)->private, false);
1173 	return 0;
1174 }
1175 
bfqg_print_rwstat_recursive(struct seq_file * sf,void * v)1176 static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1177 {
1178 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1179 			  bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1180 			  seq_cft(sf)->private, true);
1181 	return 0;
1182 }
1183 
bfqg_prfill_sectors(struct seq_file * sf,struct blkg_policy_data * pd,int off)1184 static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1185 			       int off)
1186 {
1187 	u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1188 
1189 	return __blkg_prfill_u64(sf, pd, sum >> 9);
1190 }
1191 
bfqg_print_stat_sectors(struct seq_file * sf,void * v)1192 static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1193 {
1194 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1195 			  bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1196 	return 0;
1197 }
1198 
bfqg_prfill_sectors_recursive(struct seq_file * sf,struct blkg_policy_data * pd,int off)1199 static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1200 					 struct blkg_policy_data *pd, int off)
1201 {
1202 	struct blkg_rwstat_sample tmp;
1203 
1204 	blkg_rwstat_recursive_sum(pd->blkg, NULL,
1205 			offsetof(struct blkcg_gq, stat_bytes), &tmp);
1206 
1207 	return __blkg_prfill_u64(sf, pd,
1208 		(tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
1209 }
1210 
bfqg_print_stat_sectors_recursive(struct seq_file * sf,void * v)1211 static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1212 {
1213 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1214 			  bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1215 			  false);
1216 	return 0;
1217 }
1218 
bfqg_prfill_avg_queue_size(struct seq_file * sf,struct blkg_policy_data * pd,int off)1219 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1220 				      struct blkg_policy_data *pd, int off)
1221 {
1222 	struct bfq_group *bfqg = pd_to_bfqg(pd);
1223 	u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
1224 	u64 v = 0;
1225 
1226 	if (samples) {
1227 		v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
1228 		v = div64_u64(v, samples);
1229 	}
1230 	__blkg_prfill_u64(sf, pd, v);
1231 	return 0;
1232 }
1233 
1234 /* print avg_queue_size */
bfqg_print_avg_queue_size(struct seq_file * sf,void * v)1235 static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1236 {
1237 	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1238 			  bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1239 			  0, false);
1240 	return 0;
1241 }
1242 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1243 
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1244 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1245 {
1246 	int ret;
1247 
1248 	ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1249 	if (ret)
1250 		return NULL;
1251 
1252 	return blkg_to_bfqg(bfqd->queue->root_blkg);
1253 }
1254 
1255 struct blkcg_policy blkcg_policy_bfq = {
1256 	.dfl_cftypes		= bfq_blkg_files,
1257 	.legacy_cftypes		= bfq_blkcg_legacy_files,
1258 
1259 	.cpd_alloc_fn		= bfq_cpd_alloc,
1260 	.cpd_init_fn		= bfq_cpd_init,
1261 	.cpd_bind_fn	        = bfq_cpd_init,
1262 	.cpd_free_fn		= bfq_cpd_free,
1263 
1264 	.pd_alloc_fn		= bfq_pd_alloc,
1265 	.pd_init_fn		= bfq_pd_init,
1266 	.pd_offline_fn		= bfq_pd_offline,
1267 	.pd_free_fn		= bfq_pd_free,
1268 	.pd_reset_stats_fn	= bfq_pd_reset_stats,
1269 };
1270 
1271 struct cftype bfq_blkcg_legacy_files[] = {
1272 	{
1273 		.name = "bfq.weight",
1274 		.flags = CFTYPE_NOT_ON_ROOT,
1275 		.seq_show = bfq_io_show_weight_legacy,
1276 		.write_u64 = bfq_io_set_weight_legacy,
1277 	},
1278 	{
1279 		.name = "bfq.weight_device",
1280 		.flags = CFTYPE_NOT_ON_ROOT,
1281 		.seq_show = bfq_io_show_weight,
1282 		.write = bfq_io_set_weight,
1283 	},
1284 
1285 	/* statistics, covers only the tasks in the bfqg */
1286 	{
1287 		.name = "bfq.io_service_bytes",
1288 		.private = (unsigned long)&blkcg_policy_bfq,
1289 		.seq_show = blkg_print_stat_bytes,
1290 	},
1291 	{
1292 		.name = "bfq.io_serviced",
1293 		.private = (unsigned long)&blkcg_policy_bfq,
1294 		.seq_show = blkg_print_stat_ios,
1295 	},
1296 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1297 	{
1298 		.name = "bfq.time",
1299 		.private = offsetof(struct bfq_group, stats.time),
1300 		.seq_show = bfqg_print_stat,
1301 	},
1302 	{
1303 		.name = "bfq.sectors",
1304 		.seq_show = bfqg_print_stat_sectors,
1305 	},
1306 	{
1307 		.name = "bfq.io_service_time",
1308 		.private = offsetof(struct bfq_group, stats.service_time),
1309 		.seq_show = bfqg_print_rwstat,
1310 	},
1311 	{
1312 		.name = "bfq.io_wait_time",
1313 		.private = offsetof(struct bfq_group, stats.wait_time),
1314 		.seq_show = bfqg_print_rwstat,
1315 	},
1316 	{
1317 		.name = "bfq.io_merged",
1318 		.private = offsetof(struct bfq_group, stats.merged),
1319 		.seq_show = bfqg_print_rwstat,
1320 	},
1321 	{
1322 		.name = "bfq.io_queued",
1323 		.private = offsetof(struct bfq_group, stats.queued),
1324 		.seq_show = bfqg_print_rwstat,
1325 	},
1326 #endif /* CONFIG_BFQ_CGROUP_DEBUG */
1327 
1328 	/* the same statistics which cover the bfqg and its descendants */
1329 	{
1330 		.name = "bfq.io_service_bytes_recursive",
1331 		.private = (unsigned long)&blkcg_policy_bfq,
1332 		.seq_show = blkg_print_stat_bytes_recursive,
1333 	},
1334 	{
1335 		.name = "bfq.io_serviced_recursive",
1336 		.private = (unsigned long)&blkcg_policy_bfq,
1337 		.seq_show = blkg_print_stat_ios_recursive,
1338 	},
1339 #ifdef CONFIG_BFQ_CGROUP_DEBUG
1340 	{
1341 		.name = "bfq.time_recursive",
1342 		.private = offsetof(struct bfq_group, stats.time),
1343 		.seq_show = bfqg_print_stat_recursive,
1344 	},
1345 	{
1346 		.name = "bfq.sectors_recursive",
1347 		.seq_show = bfqg_print_stat_sectors_recursive,
1348 	},
1349 	{
1350 		.name = "bfq.io_service_time_recursive",
1351 		.private = offsetof(struct bfq_group, stats.service_time),
1352 		.seq_show = bfqg_print_rwstat_recursive,
1353 	},
1354 	{
1355 		.name = "bfq.io_wait_time_recursive",
1356 		.private = offsetof(struct bfq_group, stats.wait_time),
1357 		.seq_show = bfqg_print_rwstat_recursive,
1358 	},
1359 	{
1360 		.name = "bfq.io_merged_recursive",
1361 		.private = offsetof(struct bfq_group, stats.merged),
1362 		.seq_show = bfqg_print_rwstat_recursive,
1363 	},
1364 	{
1365 		.name = "bfq.io_queued_recursive",
1366 		.private = offsetof(struct bfq_group, stats.queued),
1367 		.seq_show = bfqg_print_rwstat_recursive,
1368 	},
1369 	{
1370 		.name = "bfq.avg_queue_size",
1371 		.seq_show = bfqg_print_avg_queue_size,
1372 	},
1373 	{
1374 		.name = "bfq.group_wait_time",
1375 		.private = offsetof(struct bfq_group, stats.group_wait_time),
1376 		.seq_show = bfqg_print_stat,
1377 	},
1378 	{
1379 		.name = "bfq.idle_time",
1380 		.private = offsetof(struct bfq_group, stats.idle_time),
1381 		.seq_show = bfqg_print_stat,
1382 	},
1383 	{
1384 		.name = "bfq.empty_time",
1385 		.private = offsetof(struct bfq_group, stats.empty_time),
1386 		.seq_show = bfqg_print_stat,
1387 	},
1388 	{
1389 		.name = "bfq.dequeue",
1390 		.private = offsetof(struct bfq_group, stats.dequeue),
1391 		.seq_show = bfqg_print_stat,
1392 	},
1393 #endif	/* CONFIG_BFQ_CGROUP_DEBUG */
1394 	{ }	/* terminate */
1395 };
1396 
1397 struct cftype bfq_blkg_files[] = {
1398 	{
1399 		.name = "bfq.weight",
1400 		.flags = CFTYPE_NOT_ON_ROOT,
1401 		.seq_show = bfq_io_show_weight,
1402 		.write = bfq_io_set_weight,
1403 	},
1404 	{} /* terminate */
1405 };
1406 
1407 #else	/* CONFIG_BFQ_GROUP_IOSCHED */
1408 
bfq_bfqq_move(struct bfq_data * bfqd,struct bfq_queue * bfqq,struct bfq_group * bfqg)1409 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1410 		   struct bfq_group *bfqg) {}
1411 
bfq_init_entity(struct bfq_entity * entity,struct bfq_group * bfqg)1412 void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1413 {
1414 	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1415 
1416 	entity->weight = entity->new_weight;
1417 	entity->orig_weight = entity->new_weight;
1418 	if (bfqq) {
1419 		bfqq->ioprio = bfqq->new_ioprio;
1420 		bfqq->ioprio_class = bfqq->new_ioprio_class;
1421 	}
1422 	entity->sched_data = &bfqg->sched_data;
1423 }
1424 
bfq_bic_update_cgroup(struct bfq_io_cq * bic,struct bio * bio)1425 void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1426 
bfq_end_wr_async(struct bfq_data * bfqd)1427 void bfq_end_wr_async(struct bfq_data *bfqd)
1428 {
1429 	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1430 }
1431 
bfq_bio_bfqg(struct bfq_data * bfqd,struct bio * bio)1432 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1433 {
1434 	return bfqd->root_group;
1435 }
1436 
bfqq_group(struct bfq_queue * bfqq)1437 struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1438 {
1439 	return bfqq->bfqd->root_group;
1440 }
1441 
bfqg_and_blkg_get(struct bfq_group * bfqg)1442 void bfqg_and_blkg_get(struct bfq_group *bfqg) {}
1443 
bfqg_and_blkg_put(struct bfq_group * bfqg)1444 void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1445 
bfq_create_group_hierarchy(struct bfq_data * bfqd,int node)1446 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1447 {
1448 	struct bfq_group *bfqg;
1449 	int i;
1450 
1451 	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1452 	if (!bfqg)
1453 		return NULL;
1454 
1455 	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1456 		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1457 
1458 	return bfqg;
1459 }
1460 #endif	/* CONFIG_BFQ_GROUP_IOSCHED */
1461