• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * net/sched/sch_mqprio.c
4  *
5  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <linux/module.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_cls.h>
19 
20 struct mqprio_sched {
21 	struct Qdisc		**qdiscs;
22 	u16 mode;
23 	u16 shaper;
24 	int hw_offload;
25 	u32 flags;
26 	u64 min_rate[TC_QOPT_MAX_QUEUE];
27 	u64 max_rate[TC_QOPT_MAX_QUEUE];
28 };
29 
mqprio_destroy(struct Qdisc * sch)30 static void mqprio_destroy(struct Qdisc *sch)
31 {
32 	struct net_device *dev = qdisc_dev(sch);
33 	struct mqprio_sched *priv = qdisc_priv(sch);
34 	unsigned int ntx;
35 
36 	if (priv->qdiscs) {
37 		for (ntx = 0;
38 		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
39 		     ntx++)
40 			qdisc_put(priv->qdiscs[ntx]);
41 		kfree(priv->qdiscs);
42 	}
43 
44 	if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
45 		struct tc_mqprio_qopt_offload mqprio = { { 0 } };
46 
47 		switch (priv->mode) {
48 		case TC_MQPRIO_MODE_DCB:
49 		case TC_MQPRIO_MODE_CHANNEL:
50 			dev->netdev_ops->ndo_setup_tc(dev,
51 						      TC_SETUP_QDISC_MQPRIO,
52 						      &mqprio);
53 			break;
54 		default:
55 			return;
56 		}
57 	} else {
58 		netdev_set_num_tc(dev, 0);
59 	}
60 }
61 
mqprio_parse_opt(struct net_device * dev,struct tc_mqprio_qopt * qopt)62 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
63 {
64 	int i, j;
65 
66 	/* Verify num_tc is not out of max range */
67 	if (qopt->num_tc > TC_MAX_QUEUE)
68 		return -EINVAL;
69 
70 	/* Verify priority mapping uses valid tcs */
71 	for (i = 0; i < TC_BITMASK + 1; i++) {
72 		if (qopt->prio_tc_map[i] >= qopt->num_tc)
73 			return -EINVAL;
74 	}
75 
76 	/* Limit qopt->hw to maximum supported offload value.  Drivers have
77 	 * the option of overriding this later if they don't support the a
78 	 * given offload type.
79 	 */
80 	if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
81 		qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
82 
83 	/* If hardware offload is requested we will leave it to the device
84 	 * to either populate the queue counts itself or to validate the
85 	 * provided queue counts.  If ndo_setup_tc is not present then
86 	 * hardware doesn't support offload and we should return an error.
87 	 */
88 	if (qopt->hw)
89 		return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
90 
91 	for (i = 0; i < qopt->num_tc; i++) {
92 		unsigned int last = qopt->offset[i] + qopt->count[i];
93 
94 		/* Verify the queue count is in tx range being equal to the
95 		 * real_num_tx_queues indicates the last queue is in use.
96 		 */
97 		if (qopt->offset[i] >= dev->real_num_tx_queues ||
98 		    !qopt->count[i] ||
99 		    last > dev->real_num_tx_queues)
100 			return -EINVAL;
101 
102 		/* Verify that the offset and counts do not overlap */
103 		for (j = i + 1; j < qopt->num_tc; j++) {
104 			if (last > qopt->offset[j])
105 				return -EINVAL;
106 		}
107 	}
108 
109 	return 0;
110 }
111 
112 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
113 	[TCA_MQPRIO_MODE]	= { .len = sizeof(u16) },
114 	[TCA_MQPRIO_SHAPER]	= { .len = sizeof(u16) },
115 	[TCA_MQPRIO_MIN_RATE64]	= { .type = NLA_NESTED },
116 	[TCA_MQPRIO_MAX_RATE64]	= { .type = NLA_NESTED },
117 };
118 
parse_attr(struct nlattr * tb[],int maxtype,struct nlattr * nla,const struct nla_policy * policy,int len)119 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
120 		      const struct nla_policy *policy, int len)
121 {
122 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
123 
124 	if (nested_len >= nla_attr_size(0))
125 		return nla_parse_deprecated(tb, maxtype,
126 					    nla_data(nla) + NLA_ALIGN(len),
127 					    nested_len, policy, NULL);
128 
129 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
130 	return 0;
131 }
132 
mqprio_parse_nlattr(struct Qdisc * sch,struct tc_mqprio_qopt * qopt,struct nlattr * opt,struct netlink_ext_ack * extack)133 static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt,
134 			       struct nlattr *opt,
135 			       struct netlink_ext_ack *extack)
136 {
137 	struct mqprio_sched *priv = qdisc_priv(sch);
138 	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
139 	struct nlattr *attr;
140 	int i, rem, err;
141 
142 	err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
143 			 sizeof(*qopt));
144 	if (err < 0)
145 		return err;
146 
147 	if (!qopt->hw) {
148 		NL_SET_ERR_MSG(extack,
149 			       "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode");
150 		return -EINVAL;
151 	}
152 
153 	if (tb[TCA_MQPRIO_MODE]) {
154 		priv->flags |= TC_MQPRIO_F_MODE;
155 		priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
156 	}
157 
158 	if (tb[TCA_MQPRIO_SHAPER]) {
159 		priv->flags |= TC_MQPRIO_F_SHAPER;
160 		priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
161 	}
162 
163 	if (tb[TCA_MQPRIO_MIN_RATE64]) {
164 		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
165 			NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64],
166 					    "min_rate accepted only when shaper is in bw_rlimit mode");
167 			return -EINVAL;
168 		}
169 		i = 0;
170 		nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
171 				    rem) {
172 			if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) {
173 				NL_SET_ERR_MSG_ATTR(extack, attr,
174 						    "Attribute type expected to be TCA_MQPRIO_MIN_RATE64");
175 				return -EINVAL;
176 			}
177 
178 			if (nla_len(attr) != sizeof(u64)) {
179 				NL_SET_ERR_MSG_ATTR(extack, attr,
180 						    "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length");
181 				return -EINVAL;
182 			}
183 
184 			if (i >= qopt->num_tc)
185 				break;
186 			priv->min_rate[i] = *(u64 *)nla_data(attr);
187 			i++;
188 		}
189 		priv->flags |= TC_MQPRIO_F_MIN_RATE;
190 	}
191 
192 	if (tb[TCA_MQPRIO_MAX_RATE64]) {
193 		if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
194 			NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64],
195 					    "max_rate accepted only when shaper is in bw_rlimit mode");
196 			return -EINVAL;
197 		}
198 		i = 0;
199 		nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
200 				    rem) {
201 			if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) {
202 				NL_SET_ERR_MSG_ATTR(extack, attr,
203 						    "Attribute type expected to be TCA_MQPRIO_MAX_RATE64");
204 				return -EINVAL;
205 			}
206 
207 			if (nla_len(attr) != sizeof(u64)) {
208 				NL_SET_ERR_MSG_ATTR(extack, attr,
209 						    "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length");
210 				return -EINVAL;
211 			}
212 
213 			if (i >= qopt->num_tc)
214 				break;
215 			priv->max_rate[i] = *(u64 *)nla_data(attr);
216 			i++;
217 		}
218 		priv->flags |= TC_MQPRIO_F_MAX_RATE;
219 	}
220 
221 	return 0;
222 }
223 
mqprio_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)224 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
225 		       struct netlink_ext_ack *extack)
226 {
227 	struct net_device *dev = qdisc_dev(sch);
228 	struct mqprio_sched *priv = qdisc_priv(sch);
229 	struct netdev_queue *dev_queue;
230 	struct Qdisc *qdisc;
231 	int i, err = -EOPNOTSUPP;
232 	struct tc_mqprio_qopt *qopt = NULL;
233 	int len;
234 
235 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
236 	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
237 
238 	if (sch->parent != TC_H_ROOT)
239 		return -EOPNOTSUPP;
240 
241 	if (!netif_is_multiqueue(dev))
242 		return -EOPNOTSUPP;
243 
244 	/* make certain can allocate enough classids to handle queues */
245 	if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
246 		return -ENOMEM;
247 
248 	if (!opt || nla_len(opt) < sizeof(*qopt))
249 		return -EINVAL;
250 
251 	qopt = nla_data(opt);
252 	if (mqprio_parse_opt(dev, qopt))
253 		return -EINVAL;
254 
255 	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
256 	if (len > 0) {
257 		err = mqprio_parse_nlattr(sch, qopt, opt, extack);
258 		if (err)
259 			return err;
260 	}
261 
262 	/* pre-allocate qdisc, attachment can't fail */
263 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
264 			       GFP_KERNEL);
265 	if (!priv->qdiscs)
266 		return -ENOMEM;
267 
268 	for (i = 0; i < dev->num_tx_queues; i++) {
269 		dev_queue = netdev_get_tx_queue(dev, i);
270 		qdisc = qdisc_create_dflt(dev_queue,
271 					  get_default_qdisc_ops(dev, i),
272 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
273 						    TC_H_MIN(i + 1)), extack);
274 		if (!qdisc)
275 			return -ENOMEM;
276 
277 		priv->qdiscs[i] = qdisc;
278 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
279 	}
280 
281 	/* If the mqprio options indicate that hardware should own
282 	 * the queue mapping then run ndo_setup_tc otherwise use the
283 	 * supplied and verified mapping
284 	 */
285 	if (qopt->hw) {
286 		struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
287 
288 		switch (priv->mode) {
289 		case TC_MQPRIO_MODE_DCB:
290 			if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
291 				return -EINVAL;
292 			break;
293 		case TC_MQPRIO_MODE_CHANNEL:
294 			mqprio.flags = priv->flags;
295 			if (priv->flags & TC_MQPRIO_F_MODE)
296 				mqprio.mode = priv->mode;
297 			if (priv->flags & TC_MQPRIO_F_SHAPER)
298 				mqprio.shaper = priv->shaper;
299 			if (priv->flags & TC_MQPRIO_F_MIN_RATE)
300 				for (i = 0; i < mqprio.qopt.num_tc; i++)
301 					mqprio.min_rate[i] = priv->min_rate[i];
302 			if (priv->flags & TC_MQPRIO_F_MAX_RATE)
303 				for (i = 0; i < mqprio.qopt.num_tc; i++)
304 					mqprio.max_rate[i] = priv->max_rate[i];
305 			break;
306 		default:
307 			return -EINVAL;
308 		}
309 		err = dev->netdev_ops->ndo_setup_tc(dev,
310 						    TC_SETUP_QDISC_MQPRIO,
311 						    &mqprio);
312 		if (err)
313 			return err;
314 
315 		priv->hw_offload = mqprio.qopt.hw;
316 	} else {
317 		netdev_set_num_tc(dev, qopt->num_tc);
318 		for (i = 0; i < qopt->num_tc; i++)
319 			netdev_set_tc_queue(dev, i,
320 					    qopt->count[i], qopt->offset[i]);
321 	}
322 
323 	/* Always use supplied priority mappings */
324 	for (i = 0; i < TC_BITMASK + 1; i++)
325 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
326 
327 	sch->flags |= TCQ_F_MQROOT;
328 	return 0;
329 }
330 
mqprio_attach(struct Qdisc * sch)331 static void mqprio_attach(struct Qdisc *sch)
332 {
333 	struct net_device *dev = qdisc_dev(sch);
334 	struct mqprio_sched *priv = qdisc_priv(sch);
335 	struct Qdisc *qdisc, *old;
336 	unsigned int ntx;
337 
338 	/* Attach underlying qdisc */
339 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
340 		qdisc = priv->qdiscs[ntx];
341 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
342 		if (old)
343 			qdisc_put(old);
344 		if (ntx < dev->real_num_tx_queues)
345 			qdisc_hash_add(qdisc, false);
346 	}
347 	kfree(priv->qdiscs);
348 	priv->qdiscs = NULL;
349 }
350 
mqprio_queue_get(struct Qdisc * sch,unsigned long cl)351 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
352 					     unsigned long cl)
353 {
354 	struct net_device *dev = qdisc_dev(sch);
355 	unsigned long ntx = cl - 1;
356 
357 	if (ntx >= dev->num_tx_queues)
358 		return NULL;
359 	return netdev_get_tx_queue(dev, ntx);
360 }
361 
mqprio_graft(struct Qdisc * sch,unsigned long cl,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)362 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
363 			struct Qdisc **old, struct netlink_ext_ack *extack)
364 {
365 	struct net_device *dev = qdisc_dev(sch);
366 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
367 
368 	if (!dev_queue)
369 		return -EINVAL;
370 
371 	if (dev->flags & IFF_UP)
372 		dev_deactivate(dev);
373 
374 	*old = dev_graft_qdisc(dev_queue, new);
375 
376 	if (new)
377 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
378 
379 	if (dev->flags & IFF_UP)
380 		dev_activate(dev);
381 
382 	return 0;
383 }
384 
dump_rates(struct mqprio_sched * priv,struct tc_mqprio_qopt * opt,struct sk_buff * skb)385 static int dump_rates(struct mqprio_sched *priv,
386 		      struct tc_mqprio_qopt *opt, struct sk_buff *skb)
387 {
388 	struct nlattr *nest;
389 	int i;
390 
391 	if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
392 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
393 		if (!nest)
394 			goto nla_put_failure;
395 
396 		for (i = 0; i < opt->num_tc; i++) {
397 			if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
398 				    sizeof(priv->min_rate[i]),
399 				    &priv->min_rate[i]))
400 				goto nla_put_failure;
401 		}
402 		nla_nest_end(skb, nest);
403 	}
404 
405 	if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
406 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
407 		if (!nest)
408 			goto nla_put_failure;
409 
410 		for (i = 0; i < opt->num_tc; i++) {
411 			if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
412 				    sizeof(priv->max_rate[i]),
413 				    &priv->max_rate[i]))
414 				goto nla_put_failure;
415 		}
416 		nla_nest_end(skb, nest);
417 	}
418 	return 0;
419 
420 nla_put_failure:
421 	nla_nest_cancel(skb, nest);
422 	return -1;
423 }
424 
mqprio_dump(struct Qdisc * sch,struct sk_buff * skb)425 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
426 {
427 	struct net_device *dev = qdisc_dev(sch);
428 	struct mqprio_sched *priv = qdisc_priv(sch);
429 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
430 	struct tc_mqprio_qopt opt = { 0 };
431 	struct Qdisc *qdisc;
432 	unsigned int ntx, tc;
433 
434 	sch->q.qlen = 0;
435 	memset(&sch->bstats, 0, sizeof(sch->bstats));
436 	memset(&sch->qstats, 0, sizeof(sch->qstats));
437 
438 	/* MQ supports lockless qdiscs. However, statistics accounting needs
439 	 * to account for all, none, or a mix of locked and unlocked child
440 	 * qdiscs. Percpu stats are added to counters in-band and locking
441 	 * qdisc totals are added at end.
442 	 */
443 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
444 		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
445 		spin_lock_bh(qdisc_lock(qdisc));
446 
447 		if (qdisc_is_percpu_stats(qdisc)) {
448 			__u32 qlen = qdisc_qlen_sum(qdisc);
449 
450 			__gnet_stats_copy_basic(NULL, &sch->bstats,
451 						qdisc->cpu_bstats,
452 						&qdisc->bstats);
453 			__gnet_stats_copy_queue(&sch->qstats,
454 						qdisc->cpu_qstats,
455 						&qdisc->qstats, qlen);
456 			sch->q.qlen		+= qlen;
457 		} else {
458 			sch->q.qlen		+= qdisc->q.qlen;
459 			sch->bstats.bytes	+= qdisc->bstats.bytes;
460 			sch->bstats.packets	+= qdisc->bstats.packets;
461 			sch->qstats.backlog	+= qdisc->qstats.backlog;
462 			sch->qstats.drops	+= qdisc->qstats.drops;
463 			sch->qstats.requeues	+= qdisc->qstats.requeues;
464 			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
465 		}
466 
467 		spin_unlock_bh(qdisc_lock(qdisc));
468 	}
469 
470 	opt.num_tc = netdev_get_num_tc(dev);
471 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
472 	opt.hw = priv->hw_offload;
473 
474 	for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
475 		opt.count[tc] = dev->tc_to_txq[tc].count;
476 		opt.offset[tc] = dev->tc_to_txq[tc].offset;
477 	}
478 
479 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
480 		goto nla_put_failure;
481 
482 	if ((priv->flags & TC_MQPRIO_F_MODE) &&
483 	    nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
484 		goto nla_put_failure;
485 
486 	if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
487 	    nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
488 		goto nla_put_failure;
489 
490 	if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
491 	     priv->flags & TC_MQPRIO_F_MAX_RATE) &&
492 	    (dump_rates(priv, &opt, skb) != 0))
493 		goto nla_put_failure;
494 
495 	return nla_nest_end(skb, nla);
496 nla_put_failure:
497 	nlmsg_trim(skb, nla);
498 	return -1;
499 }
500 
mqprio_leaf(struct Qdisc * sch,unsigned long cl)501 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
502 {
503 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
504 
505 	if (!dev_queue)
506 		return NULL;
507 
508 	return dev_queue->qdisc_sleeping;
509 }
510 
mqprio_find(struct Qdisc * sch,u32 classid)511 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
512 {
513 	struct net_device *dev = qdisc_dev(sch);
514 	unsigned int ntx = TC_H_MIN(classid);
515 
516 	/* There are essentially two regions here that have valid classid
517 	 * values. The first region will have a classid value of 1 through
518 	 * num_tx_queues. All of these are backed by actual Qdiscs.
519 	 */
520 	if (ntx < TC_H_MIN_PRIORITY)
521 		return (ntx <= dev->num_tx_queues) ? ntx : 0;
522 
523 	/* The second region represents the hardware traffic classes. These
524 	 * are represented by classid values of TC_H_MIN_PRIORITY through
525 	 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
526 	 */
527 	return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
528 }
529 
mqprio_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)530 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
531 			 struct sk_buff *skb, struct tcmsg *tcm)
532 {
533 	if (cl < TC_H_MIN_PRIORITY) {
534 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
535 		struct net_device *dev = qdisc_dev(sch);
536 		int tc = netdev_txq_to_tc(dev, cl - 1);
537 
538 		tcm->tcm_parent = (tc < 0) ? 0 :
539 			TC_H_MAKE(TC_H_MAJ(sch->handle),
540 				  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
541 		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
542 	} else {
543 		tcm->tcm_parent = TC_H_ROOT;
544 		tcm->tcm_info = 0;
545 	}
546 	tcm->tcm_handle |= TC_H_MIN(cl);
547 	return 0;
548 }
549 
mqprio_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)550 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
551 				   struct gnet_dump *d)
552 	__releases(d->lock)
553 	__acquires(d->lock)
554 {
555 	if (cl >= TC_H_MIN_PRIORITY) {
556 		int i;
557 		__u32 qlen = 0;
558 		struct gnet_stats_queue qstats = {0};
559 		struct gnet_stats_basic_packed bstats = {0};
560 		struct net_device *dev = qdisc_dev(sch);
561 		struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
562 
563 		/* Drop lock here it will be reclaimed before touching
564 		 * statistics this is required because the d->lock we
565 		 * hold here is the look on dev_queue->qdisc_sleeping
566 		 * also acquired below.
567 		 */
568 		if (d->lock)
569 			spin_unlock_bh(d->lock);
570 
571 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
572 			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
573 			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
574 
575 			spin_lock_bh(qdisc_lock(qdisc));
576 
577 			if (qdisc_is_percpu_stats(qdisc)) {
578 				qlen = qdisc_qlen_sum(qdisc);
579 
580 				__gnet_stats_copy_basic(NULL, &bstats,
581 							qdisc->cpu_bstats,
582 							&qdisc->bstats);
583 				__gnet_stats_copy_queue(&qstats,
584 							qdisc->cpu_qstats,
585 							&qdisc->qstats,
586 							qlen);
587 			} else {
588 				qlen		+= qdisc->q.qlen;
589 				bstats.bytes	+= qdisc->bstats.bytes;
590 				bstats.packets	+= qdisc->bstats.packets;
591 				qstats.backlog	+= qdisc->qstats.backlog;
592 				qstats.drops	+= qdisc->qstats.drops;
593 				qstats.requeues	+= qdisc->qstats.requeues;
594 				qstats.overlimits += qdisc->qstats.overlimits;
595 			}
596 			spin_unlock_bh(qdisc_lock(qdisc));
597 		}
598 
599 		/* Reclaim root sleeping lock before completing stats */
600 		if (d->lock)
601 			spin_lock_bh(d->lock);
602 		if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
603 		    gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
604 			return -1;
605 	} else {
606 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
607 
608 		sch = dev_queue->qdisc_sleeping;
609 		if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
610 					  sch->cpu_bstats, &sch->bstats) < 0 ||
611 		    qdisc_qstats_copy(d, sch) < 0)
612 			return -1;
613 	}
614 	return 0;
615 }
616 
mqprio_walk(struct Qdisc * sch,struct qdisc_walker * arg)617 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
618 {
619 	struct net_device *dev = qdisc_dev(sch);
620 	unsigned long ntx;
621 
622 	if (arg->stop)
623 		return;
624 
625 	/* Walk hierarchy with a virtual class per tc */
626 	arg->count = arg->skip;
627 	for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
628 		if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
629 			arg->stop = 1;
630 			return;
631 		}
632 		arg->count++;
633 	}
634 
635 	/* Pad the values and skip over unused traffic classes */
636 	if (ntx < TC_MAX_QUEUE) {
637 		arg->count = TC_MAX_QUEUE;
638 		ntx = TC_MAX_QUEUE;
639 	}
640 
641 	/* Reset offset, sort out remaining per-queue qdiscs */
642 	for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
643 		if (arg->fn(sch, ntx + 1, arg) < 0) {
644 			arg->stop = 1;
645 			return;
646 		}
647 		arg->count++;
648 	}
649 }
650 
mqprio_select_queue(struct Qdisc * sch,struct tcmsg * tcm)651 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
652 						struct tcmsg *tcm)
653 {
654 	return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
655 }
656 
657 static const struct Qdisc_class_ops mqprio_class_ops = {
658 	.graft		= mqprio_graft,
659 	.leaf		= mqprio_leaf,
660 	.find		= mqprio_find,
661 	.walk		= mqprio_walk,
662 	.dump		= mqprio_dump_class,
663 	.dump_stats	= mqprio_dump_class_stats,
664 	.select_queue	= mqprio_select_queue,
665 };
666 
667 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
668 	.cl_ops		= &mqprio_class_ops,
669 	.id		= "mqprio",
670 	.priv_size	= sizeof(struct mqprio_sched),
671 	.init		= mqprio_init,
672 	.destroy	= mqprio_destroy,
673 	.attach		= mqprio_attach,
674 	.dump		= mqprio_dump,
675 	.owner		= THIS_MODULE,
676 };
677 
mqprio_module_init(void)678 static int __init mqprio_module_init(void)
679 {
680 	return register_qdisc(&mqprio_qdisc_ops);
681 }
682 
mqprio_module_exit(void)683 static void __exit mqprio_module_exit(void)
684 {
685 	unregister_qdisc(&mqprio_qdisc_ops);
686 }
687 
688 module_init(mqprio_module_init);
689 module_exit(mqprio_module_exit);
690 
691 MODULE_LICENSE("GPL");
692