• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_SCHED_GENERIC_H
3 #define __NET_SCHED_GENERIC_H
4 
5 #include <linux/netdevice.h>
6 #include <linux/types.h>
7 #include <linux/rcupdate.h>
8 #include <linux/pkt_sched.h>
9 #include <linux/pkt_cls.h>
10 #include <linux/percpu.h>
11 #include <linux/dynamic_queue_limits.h>
12 #include <linux/list.h>
13 #include <linux/refcount.h>
14 #include <linux/workqueue.h>
15 #include <linux/mutex.h>
16 #include <linux/rwsem.h>
17 #include <linux/atomic.h>
18 #include <linux/hashtable.h>
19 #include <linux/android_kabi.h>
20 #include <net/gen_stats.h>
21 #include <net/rtnetlink.h>
22 #include <net/flow_offload.h>
23 
24 struct Qdisc_ops;
25 struct qdisc_walker;
26 struct tcf_walker;
27 struct module;
28 struct bpf_flow_keys;
29 
30 struct qdisc_rate_table {
31 	struct tc_ratespec rate;
32 	u32		data[256];
33 	struct qdisc_rate_table *next;
34 	int		refcnt;
35 };
36 
37 enum qdisc_state_t {
38 	__QDISC_STATE_SCHED,
39 	__QDISC_STATE_DEACTIVATED,
40 	__QDISC_STATE_MISSED,
41 	__QDISC_STATE_DRAINING,
42 };
43 
44 enum qdisc_state2_t {
45 	/* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
46 	 * Use qdisc_run_begin/end() or qdisc_is_running() instead.
47 	 */
48 	__QDISC_STATE2_RUNNING,
49 };
50 
51 #define QDISC_STATE_MISSED	BIT(__QDISC_STATE_MISSED)
52 #define QDISC_STATE_DRAINING	BIT(__QDISC_STATE_DRAINING)
53 
54 #define QDISC_STATE_NON_EMPTY	(QDISC_STATE_MISSED | \
55 					QDISC_STATE_DRAINING)
56 
57 struct qdisc_size_table {
58 	struct rcu_head		rcu;
59 	struct list_head	list;
60 	struct tc_sizespec	szopts;
61 	int			refcnt;
62 	u16			data[];
63 };
64 
65 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
66 struct qdisc_skb_head {
67 	struct sk_buff	*head;
68 	struct sk_buff	*tail;
69 	__u32		qlen;
70 	spinlock_t	lock;
71 };
72 
73 struct Qdisc {
74 	int 			(*enqueue)(struct sk_buff *skb,
75 					   struct Qdisc *sch,
76 					   struct sk_buff **to_free);
77 	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
78 	unsigned int		flags;
79 #define TCQ_F_BUILTIN		1
80 #define TCQ_F_INGRESS		2
81 #define TCQ_F_CAN_BYPASS	4
82 #define TCQ_F_MQROOT		8
83 #define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
84 				      * q->dev_queue : It can test
85 				      * netif_xmit_frozen_or_stopped() before
86 				      * dequeueing next packet.
87 				      * Its true for MQ/MQPRIO slaves, or non
88 				      * multiqueue device.
89 				      */
90 #define TCQ_F_WARN_NONWC	(1 << 16)
91 #define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
92 #define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
93 				      * qdisc_tree_decrease_qlen() should stop.
94 				      */
95 #define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
96 #define TCQ_F_NOLOCK		0x100 /* qdisc does not require locking */
97 #define TCQ_F_OFFLOADED		0x200 /* qdisc is offloaded to HW */
98 	u32			limit;
99 	const struct Qdisc_ops	*ops;
100 	struct qdisc_size_table	__rcu *stab;
101 	struct hlist_node       hash;
102 	u32			handle;
103 	u32			parent;
104 
105 	struct netdev_queue	*dev_queue;
106 
107 	struct net_rate_estimator __rcu *rate_est;
108 	struct gnet_stats_basic_sync __percpu *cpu_bstats;
109 	struct gnet_stats_queue	__percpu *cpu_qstats;
110 	int			pad;
111 	refcount_t		refcnt;
112 
113 	/*
114 	 * For performance sake on SMP, we put highly modified fields at the end
115 	 */
116 	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
117 	struct qdisc_skb_head	q;
118 	struct gnet_stats_basic_sync bstats;
119 	struct gnet_stats_queue	qstats;
120 	unsigned long		state;
121 	unsigned long		state2; /* must be written under qdisc spinlock */
122 	struct Qdisc            *next_sched;
123 	struct sk_buff_head	skb_bad_txq;
124 
125 	spinlock_t		busylock ____cacheline_aligned_in_smp;
126 	spinlock_t		seqlock;
127 
128 	struct rcu_head		rcu;
129 	netdevice_tracker	dev_tracker;
130 
131 	ANDROID_KABI_RESERVE(1);
132 
133 	/* private data */
134 	long privdata[] ____cacheline_aligned;
135 };
136 
qdisc_refcount_inc(struct Qdisc * qdisc)137 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
138 {
139 	if (qdisc->flags & TCQ_F_BUILTIN)
140 		return;
141 	refcount_inc(&qdisc->refcnt);
142 }
143 
qdisc_refcount_dec_if_one(struct Qdisc * qdisc)144 static inline bool qdisc_refcount_dec_if_one(struct Qdisc *qdisc)
145 {
146 	if (qdisc->flags & TCQ_F_BUILTIN)
147 		return true;
148 	return refcount_dec_if_one(&qdisc->refcnt);
149 }
150 
151 /* Intended to be used by unlocked users, when concurrent qdisc release is
152  * possible.
153  */
154 
qdisc_refcount_inc_nz(struct Qdisc * qdisc)155 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
156 {
157 	if (qdisc->flags & TCQ_F_BUILTIN)
158 		return qdisc;
159 	if (refcount_inc_not_zero(&qdisc->refcnt))
160 		return qdisc;
161 	return NULL;
162 }
163 
164 /* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
165  * root_lock section, or provide their own memory barriers -- ordering
166  * against qdisc_run_begin/end() atomic bit operations.
167  */
qdisc_is_running(struct Qdisc * qdisc)168 static inline bool qdisc_is_running(struct Qdisc *qdisc)
169 {
170 	if (qdisc->flags & TCQ_F_NOLOCK)
171 		return spin_is_locked(&qdisc->seqlock);
172 	return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
173 }
174 
nolock_qdisc_is_empty(const struct Qdisc * qdisc)175 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
176 {
177 	return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
178 }
179 
qdisc_is_percpu_stats(const struct Qdisc * q)180 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
181 {
182 	return q->flags & TCQ_F_CPUSTATS;
183 }
184 
qdisc_is_empty(const struct Qdisc * qdisc)185 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
186 {
187 	if (qdisc_is_percpu_stats(qdisc))
188 		return nolock_qdisc_is_empty(qdisc);
189 	return !READ_ONCE(qdisc->q.qlen);
190 }
191 
192 /* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
193  * the qdisc root lock acquired.
194  */
qdisc_run_begin(struct Qdisc * qdisc)195 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
196 {
197 	if (qdisc->flags & TCQ_F_NOLOCK) {
198 		if (spin_trylock(&qdisc->seqlock))
199 			return true;
200 
201 		/* No need to insist if the MISSED flag was already set.
202 		 * Note that test_and_set_bit() also gives us memory ordering
203 		 * guarantees wrt potential earlier enqueue() and below
204 		 * spin_trylock(), both of which are necessary to prevent races
205 		 */
206 		if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
207 			return false;
208 
209 		/* Try to take the lock again to make sure that we will either
210 		 * grab it or the CPU that still has it will see MISSED set
211 		 * when testing it in qdisc_run_end()
212 		 */
213 		return spin_trylock(&qdisc->seqlock);
214 	}
215 	return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
216 }
217 
qdisc_run_end(struct Qdisc * qdisc)218 static inline void qdisc_run_end(struct Qdisc *qdisc)
219 {
220 	if (qdisc->flags & TCQ_F_NOLOCK) {
221 		spin_unlock(&qdisc->seqlock);
222 
223 		/* spin_unlock() only has store-release semantic. The unlock
224 		 * and test_bit() ordering is a store-load ordering, so a full
225 		 * memory barrier is needed here.
226 		 */
227 		smp_mb();
228 
229 		if (unlikely(test_bit(__QDISC_STATE_MISSED,
230 				      &qdisc->state)))
231 			__netif_schedule(qdisc);
232 	} else {
233 		__clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
234 	}
235 }
236 
qdisc_may_bulk(const struct Qdisc * qdisc)237 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
238 {
239 	return qdisc->flags & TCQ_F_ONETXQUEUE;
240 }
241 
qdisc_avail_bulklimit(const struct netdev_queue * txq)242 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
243 {
244 #ifdef CONFIG_BQL
245 	/* Non-BQL migrated drivers will return 0, too. */
246 	return dql_avail(&txq->dql);
247 #else
248 	return 0;
249 #endif
250 }
251 
252 struct Qdisc_class_ops {
253 	unsigned int		flags;
254 	/* Child qdisc manipulation */
255 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
256 	int			(*graft)(struct Qdisc *, unsigned long cl,
257 					struct Qdisc *, struct Qdisc **,
258 					struct netlink_ext_ack *extack);
259 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
260 	void			(*qlen_notify)(struct Qdisc *, unsigned long);
261 
262 	/* Class manipulation routines */
263 	unsigned long		(*find)(struct Qdisc *, u32 classid);
264 	int			(*change)(struct Qdisc *, u32, u32,
265 					struct nlattr **, unsigned long *,
266 					struct netlink_ext_ack *);
267 	int			(*delete)(struct Qdisc *, unsigned long,
268 					  struct netlink_ext_ack *);
269 	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
270 
271 	/* Filter manipulation */
272 	struct tcf_block *	(*tcf_block)(struct Qdisc *sch,
273 					     unsigned long arg,
274 					     struct netlink_ext_ack *extack);
275 	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
276 					u32 classid);
277 	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
278 
279 	/* rtnetlink specific */
280 	int			(*dump)(struct Qdisc *, unsigned long,
281 					struct sk_buff *skb, struct tcmsg*);
282 	int			(*dump_stats)(struct Qdisc *, unsigned long,
283 					struct gnet_dump *);
284 
285 	ANDROID_KABI_RESERVE(1);
286 };
287 
288 /* Qdisc_class_ops flag values */
289 
290 /* Implements API that doesn't require rtnl lock */
291 enum qdisc_class_ops_flags {
292 	QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
293 };
294 
295 struct Qdisc_ops {
296 	struct Qdisc_ops	*next;
297 	const struct Qdisc_class_ops	*cl_ops;
298 	char			id[IFNAMSIZ];
299 	int			priv_size;
300 	unsigned int		static_flags;
301 
302 	int 			(*enqueue)(struct sk_buff *skb,
303 					   struct Qdisc *sch,
304 					   struct sk_buff **to_free);
305 	struct sk_buff *	(*dequeue)(struct Qdisc *);
306 	struct sk_buff *	(*peek)(struct Qdisc *);
307 
308 	int			(*init)(struct Qdisc *sch, struct nlattr *arg,
309 					struct netlink_ext_ack *extack);
310 	void			(*reset)(struct Qdisc *);
311 	void			(*destroy)(struct Qdisc *);
312 	int			(*change)(struct Qdisc *sch,
313 					  struct nlattr *arg,
314 					  struct netlink_ext_ack *extack);
315 	void			(*attach)(struct Qdisc *sch);
316 	int			(*change_tx_queue_len)(struct Qdisc *, unsigned int);
317 	void			(*change_real_num_tx)(struct Qdisc *sch,
318 						      unsigned int new_real_tx);
319 
320 	int			(*dump)(struct Qdisc *, struct sk_buff *);
321 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
322 
323 	void			(*ingress_block_set)(struct Qdisc *sch,
324 						     u32 block_index);
325 	void			(*egress_block_set)(struct Qdisc *sch,
326 						    u32 block_index);
327 	u32			(*ingress_block_get)(struct Qdisc *sch);
328 	u32			(*egress_block_get)(struct Qdisc *sch);
329 
330 	struct module		*owner;
331 
332 	ANDROID_KABI_RESERVE(1);
333 };
334 
335 
336 struct tcf_result {
337 	union {
338 		struct {
339 			unsigned long	class;
340 			u32		classid;
341 		};
342 		const struct tcf_proto *goto_tp;
343 
344 	};
345 };
346 
347 struct tcf_chain;
348 
349 struct tcf_proto_ops {
350 	struct list_head	head;
351 	char			kind[IFNAMSIZ];
352 
353 	int			(*classify)(struct sk_buff *,
354 					    const struct tcf_proto *,
355 					    struct tcf_result *);
356 	int			(*init)(struct tcf_proto*);
357 	void			(*destroy)(struct tcf_proto *tp, bool rtnl_held,
358 					   struct netlink_ext_ack *extack);
359 
360 	void*			(*get)(struct tcf_proto*, u32 handle);
361 	void			(*put)(struct tcf_proto *tp, void *f);
362 	int			(*change)(struct net *net, struct sk_buff *,
363 					struct tcf_proto*, unsigned long,
364 					u32 handle, struct nlattr **,
365 					void **, u32,
366 					struct netlink_ext_ack *);
367 	int			(*delete)(struct tcf_proto *tp, void *arg,
368 					  bool *last, bool rtnl_held,
369 					  struct netlink_ext_ack *);
370 	bool			(*delete_empty)(struct tcf_proto *tp);
371 	void			(*walk)(struct tcf_proto *tp,
372 					struct tcf_walker *arg, bool rtnl_held);
373 	int			(*reoffload)(struct tcf_proto *tp, bool add,
374 					     flow_setup_cb_t *cb, void *cb_priv,
375 					     struct netlink_ext_ack *extack);
376 	void			(*hw_add)(struct tcf_proto *tp,
377 					  void *type_data);
378 	void			(*hw_del)(struct tcf_proto *tp,
379 					  void *type_data);
380 	void			(*bind_class)(void *, u32, unsigned long,
381 					      void *, unsigned long);
382 	void *			(*tmplt_create)(struct net *net,
383 						struct tcf_chain *chain,
384 						struct nlattr **tca,
385 						struct netlink_ext_ack *extack);
386 	void			(*tmplt_destroy)(void *tmplt_priv);
387 	void			(*tmplt_reoffload)(struct tcf_chain *chain,
388 						   bool add,
389 						   flow_setup_cb_t *cb,
390 						   void *cb_priv);
391 	struct tcf_exts *	(*get_exts)(const struct tcf_proto *tp,
392 					    u32 handle);
393 
394 	/* rtnetlink specific */
395 	int			(*dump)(struct net*, struct tcf_proto*, void *,
396 					struct sk_buff *skb, struct tcmsg*,
397 					bool);
398 	int			(*terse_dump)(struct net *net,
399 					      struct tcf_proto *tp, void *fh,
400 					      struct sk_buff *skb,
401 					      struct tcmsg *t, bool rtnl_held);
402 	int			(*tmplt_dump)(struct sk_buff *skb,
403 					      struct net *net,
404 					      void *tmplt_priv);
405 
406 	struct module		*owner;
407 	int			flags;
408 };
409 
410 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
411  * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
412  * conditions can occur when filters are inserted/deleted simultaneously.
413  */
414 enum tcf_proto_ops_flags {
415 	TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
416 };
417 
418 struct tcf_proto {
419 	/* Fast access part */
420 	struct tcf_proto __rcu	*next;
421 	void __rcu		*root;
422 
423 	/* called under RCU BH lock*/
424 	int			(*classify)(struct sk_buff *,
425 					    const struct tcf_proto *,
426 					    struct tcf_result *);
427 	__be16			protocol;
428 
429 	/* All the rest */
430 	u32			prio;
431 	void			*data;
432 	const struct tcf_proto_ops	*ops;
433 	struct tcf_chain	*chain;
434 	/* Lock protects tcf_proto shared state and can be used by unlocked
435 	 * classifiers to protect their private data.
436 	 */
437 	spinlock_t		lock;
438 	bool			deleting;
439 	refcount_t		refcnt;
440 	struct rcu_head		rcu;
441 	struct hlist_node	destroy_ht_node;
442 };
443 
444 struct qdisc_skb_cb {
445 	struct {
446 		unsigned int		pkt_len;
447 		u16			slave_dev_queue_mapping;
448 		u16			tc_classid;
449 	};
450 #define QDISC_CB_PRIV_LEN 20
451 	unsigned char		data[QDISC_CB_PRIV_LEN];
452 };
453 
454 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
455 
456 struct tcf_chain {
457 	/* Protects filter_chain. */
458 	struct mutex filter_chain_lock;
459 	struct tcf_proto __rcu *filter_chain;
460 	struct list_head list;
461 	struct tcf_block *block;
462 	u32 index; /* chain index */
463 	unsigned int refcnt;
464 	unsigned int action_refcnt;
465 	bool explicitly_created;
466 	bool flushing;
467 	const struct tcf_proto_ops *tmplt_ops;
468 	void *tmplt_priv;
469 	struct rcu_head rcu;
470 };
471 
472 struct tcf_block {
473 	/* Lock protects tcf_block and lifetime-management data of chains
474 	 * attached to the block (refcnt, action_refcnt, explicitly_created).
475 	 */
476 	struct mutex lock;
477 	struct list_head chain_list;
478 	u32 index; /* block index for shared blocks */
479 	u32 classid; /* which class this block belongs to */
480 	refcount_t refcnt;
481 	struct net *net;
482 	struct Qdisc *q;
483 	struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
484 	struct flow_block flow_block;
485 	struct list_head owner_list;
486 	bool keep_dst;
487 	atomic_t offloadcnt; /* Number of oddloaded filters */
488 	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
489 	unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
490 	struct {
491 		struct tcf_chain *chain;
492 		struct list_head filter_chain_list;
493 	} chain0;
494 	struct rcu_head rcu;
495 	DECLARE_HASHTABLE(proto_destroy_ht, 7);
496 	struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
497 };
498 
lockdep_tcf_chain_is_locked(struct tcf_chain * chain)499 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
500 {
501 	return lockdep_is_held(&chain->filter_chain_lock);
502 }
503 
lockdep_tcf_proto_is_locked(struct tcf_proto * tp)504 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
505 {
506 	return lockdep_is_held(&tp->lock);
507 }
508 
509 #define tcf_chain_dereference(p, chain)					\
510 	rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
511 
512 #define tcf_proto_dereference(p, tp)					\
513 	rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
514 
qdisc_cb_private_validate(const struct sk_buff * skb,int sz)515 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
516 {
517 	struct qdisc_skb_cb *qcb;
518 
519 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
520 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
521 }
522 
qdisc_qlen(const struct Qdisc * q)523 static inline int qdisc_qlen(const struct Qdisc *q)
524 {
525 	return q->q.qlen;
526 }
527 
qdisc_qlen_sum(const struct Qdisc * q)528 static inline int qdisc_qlen_sum(const struct Qdisc *q)
529 {
530 	__u32 qlen = q->qstats.qlen;
531 	int i;
532 
533 	if (qdisc_is_percpu_stats(q)) {
534 		for_each_possible_cpu(i)
535 			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
536 	} else {
537 		qlen += q->q.qlen;
538 	}
539 
540 	return qlen;
541 }
542 
qdisc_skb_cb(const struct sk_buff * skb)543 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
544 {
545 	return (struct qdisc_skb_cb *)skb->cb;
546 }
547 
qdisc_lock(struct Qdisc * qdisc)548 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
549 {
550 	return &qdisc->q.lock;
551 }
552 
qdisc_root(const struct Qdisc * qdisc)553 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
554 {
555 	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
556 
557 	return q;
558 }
559 
qdisc_root_bh(const struct Qdisc * qdisc)560 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
561 {
562 	return rcu_dereference_bh(qdisc->dev_queue->qdisc);
563 }
564 
qdisc_root_sleeping(const struct Qdisc * qdisc)565 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
566 {
567 	return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
568 }
569 
qdisc_root_sleeping_lock(const struct Qdisc * qdisc)570 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
571 {
572 	struct Qdisc *root = qdisc_root_sleeping(qdisc);
573 
574 	ASSERT_RTNL();
575 	return qdisc_lock(root);
576 }
577 
qdisc_dev(const struct Qdisc * qdisc)578 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
579 {
580 	return qdisc->dev_queue->dev;
581 }
582 
sch_tree_lock(struct Qdisc * q)583 static inline void sch_tree_lock(struct Qdisc *q)
584 {
585 	if (q->flags & TCQ_F_MQROOT)
586 		spin_lock_bh(qdisc_lock(q));
587 	else
588 		spin_lock_bh(qdisc_root_sleeping_lock(q));
589 }
590 
sch_tree_unlock(struct Qdisc * q)591 static inline void sch_tree_unlock(struct Qdisc *q)
592 {
593 	if (q->flags & TCQ_F_MQROOT)
594 		spin_unlock_bh(qdisc_lock(q));
595 	else
596 		spin_unlock_bh(qdisc_root_sleeping_lock(q));
597 }
598 
599 extern struct Qdisc noop_qdisc;
600 extern struct Qdisc_ops noop_qdisc_ops;
601 extern struct Qdisc_ops pfifo_fast_ops;
602 extern struct Qdisc_ops mq_qdisc_ops;
603 extern struct Qdisc_ops noqueue_qdisc_ops;
604 extern const struct Qdisc_ops *default_qdisc_ops;
605 static inline const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device * dev,int ntx)606 get_default_qdisc_ops(const struct net_device *dev, int ntx)
607 {
608 	return ntx < dev->real_num_tx_queues ?
609 			default_qdisc_ops : &pfifo_fast_ops;
610 }
611 
612 struct Qdisc_class_common {
613 	u32			classid;
614 	unsigned int		filter_cnt;
615 	struct hlist_node	hnode;
616 };
617 
618 struct Qdisc_class_hash {
619 	struct hlist_head	*hash;
620 	unsigned int		hashsize;
621 	unsigned int		hashmask;
622 	unsigned int		hashelems;
623 };
624 
qdisc_class_hash(u32 id,u32 mask)625 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
626 {
627 	id ^= id >> 8;
628 	id ^= id >> 4;
629 	return id & mask;
630 }
631 
632 static inline struct Qdisc_class_common *
qdisc_class_find(const struct Qdisc_class_hash * hash,u32 id)633 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
634 {
635 	struct Qdisc_class_common *cl;
636 	unsigned int h;
637 
638 	if (!id)
639 		return NULL;
640 
641 	h = qdisc_class_hash(id, hash->hashmask);
642 	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
643 		if (cl->classid == id)
644 			return cl;
645 	}
646 	return NULL;
647 }
648 
qdisc_class_in_use(const struct Qdisc_class_common * cl)649 static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl)
650 {
651 	return cl->filter_cnt > 0;
652 }
653 
qdisc_class_get(struct Qdisc_class_common * cl)654 static inline void qdisc_class_get(struct Qdisc_class_common *cl)
655 {
656 	unsigned int res;
657 
658 	if (check_add_overflow(cl->filter_cnt, 1, &res))
659 		WARN(1, "Qdisc class overflow");
660 
661 	cl->filter_cnt = res;
662 }
663 
qdisc_class_put(struct Qdisc_class_common * cl)664 static inline void qdisc_class_put(struct Qdisc_class_common *cl)
665 {
666 	unsigned int res;
667 
668 	if (check_sub_overflow(cl->filter_cnt, 1, &res))
669 		WARN(1, "Qdisc class underflow");
670 
671 	cl->filter_cnt = res;
672 }
673 
tc_classid_to_hwtc(struct net_device * dev,u32 classid)674 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
675 {
676 	u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
677 
678 	return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
679 }
680 
681 int qdisc_class_hash_init(struct Qdisc_class_hash *);
682 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
683 			     struct Qdisc_class_common *);
684 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
685 			     struct Qdisc_class_common *);
686 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
687 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
688 
689 int dev_qdisc_change_tx_queue_len(struct net_device *dev);
690 void dev_qdisc_change_real_num_tx(struct net_device *dev,
691 				  unsigned int new_real_tx);
692 void dev_init_scheduler(struct net_device *dev);
693 void dev_shutdown(struct net_device *dev);
694 void dev_activate(struct net_device *dev);
695 void dev_deactivate(struct net_device *dev);
696 void dev_deactivate_many(struct list_head *head);
697 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
698 			      struct Qdisc *qdisc);
699 void qdisc_reset(struct Qdisc *qdisc);
700 void qdisc_destroy(struct Qdisc *qdisc);
701 void qdisc_put(struct Qdisc *qdisc);
702 void qdisc_put_unlocked(struct Qdisc *qdisc);
703 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
704 #ifdef CONFIG_NET_SCHED
705 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
706 			      void *type_data);
707 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
708 				struct Qdisc *new, struct Qdisc *old,
709 				enum tc_setup_type type, void *type_data,
710 				struct netlink_ext_ack *extack);
711 #else
712 static inline int
qdisc_offload_dump_helper(struct Qdisc * q,enum tc_setup_type type,void * type_data)713 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
714 			  void *type_data)
715 {
716 	q->flags &= ~TCQ_F_OFFLOADED;
717 	return 0;
718 }
719 
720 static inline void
qdisc_offload_graft_helper(struct net_device * dev,struct Qdisc * sch,struct Qdisc * new,struct Qdisc * old,enum tc_setup_type type,void * type_data,struct netlink_ext_ack * extack)721 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
722 			   struct Qdisc *new, struct Qdisc *old,
723 			   enum tc_setup_type type, void *type_data,
724 			   struct netlink_ext_ack *extack)
725 {
726 }
727 #endif
728 void qdisc_offload_query_caps(struct net_device *dev,
729 			      enum tc_setup_type type,
730 			      void *caps, size_t caps_len);
731 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
732 			  const struct Qdisc_ops *ops,
733 			  struct netlink_ext_ack *extack);
734 void qdisc_free(struct Qdisc *qdisc);
735 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
736 				const struct Qdisc_ops *ops, u32 parentid,
737 				struct netlink_ext_ack *extack);
738 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
739 			       const struct qdisc_size_table *stab);
740 int skb_do_redirect(struct sk_buff *);
741 
skb_at_tc_ingress(const struct sk_buff * skb)742 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
743 {
744 #ifdef CONFIG_NET_XGRESS
745 	return skb->tc_at_ingress;
746 #else
747 	return false;
748 #endif
749 }
750 
skb_skip_tc_classify(struct sk_buff * skb)751 static inline bool skb_skip_tc_classify(struct sk_buff *skb)
752 {
753 #ifdef CONFIG_NET_CLS_ACT
754 	if (skb->tc_skip_classify) {
755 		skb->tc_skip_classify = 0;
756 		return true;
757 	}
758 #endif
759 	return false;
760 }
761 
762 /* Reset all TX qdiscs greater than index of a device.  */
qdisc_reset_all_tx_gt(struct net_device * dev,unsigned int i)763 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
764 {
765 	struct Qdisc *qdisc;
766 
767 	for (; i < dev->num_tx_queues; i++) {
768 		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
769 		if (qdisc) {
770 			spin_lock_bh(qdisc_lock(qdisc));
771 			qdisc_reset(qdisc);
772 			spin_unlock_bh(qdisc_lock(qdisc));
773 		}
774 	}
775 }
776 
777 /* Are all TX queues of the device empty?  */
qdisc_all_tx_empty(const struct net_device * dev)778 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
779 {
780 	unsigned int i;
781 
782 	rcu_read_lock();
783 	for (i = 0; i < dev->num_tx_queues; i++) {
784 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
785 		const struct Qdisc *q = rcu_dereference(txq->qdisc);
786 
787 		if (!qdisc_is_empty(q)) {
788 			rcu_read_unlock();
789 			return false;
790 		}
791 	}
792 	rcu_read_unlock();
793 	return true;
794 }
795 
796 /* Are any of the TX qdiscs changing?  */
qdisc_tx_changing(const struct net_device * dev)797 static inline bool qdisc_tx_changing(const struct net_device *dev)
798 {
799 	unsigned int i;
800 
801 	for (i = 0; i < dev->num_tx_queues; i++) {
802 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
803 
804 		if (rcu_access_pointer(txq->qdisc) !=
805 		    rcu_access_pointer(txq->qdisc_sleeping))
806 			return true;
807 	}
808 	return false;
809 }
810 
811 /* Is the device using the noop qdisc on all queues?  */
qdisc_tx_is_noop(const struct net_device * dev)812 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
813 {
814 	unsigned int i;
815 
816 	for (i = 0; i < dev->num_tx_queues; i++) {
817 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
818 		if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
819 			return false;
820 	}
821 	return true;
822 }
823 
qdisc_pkt_len(const struct sk_buff * skb)824 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
825 {
826 	return qdisc_skb_cb(skb)->pkt_len;
827 }
828 
829 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
830 enum net_xmit_qdisc_t {
831 	__NET_XMIT_STOLEN = 0x00010000,
832 	__NET_XMIT_BYPASS = 0x00020000,
833 };
834 
835 #ifdef CONFIG_NET_CLS_ACT
836 #define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
837 #else
838 #define net_xmit_drop_count(e)	(1)
839 #endif
840 
qdisc_calculate_pkt_len(struct sk_buff * skb,const struct Qdisc * sch)841 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
842 					   const struct Qdisc *sch)
843 {
844 #ifdef CONFIG_NET_SCHED
845 	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
846 
847 	if (stab)
848 		__qdisc_calculate_pkt_len(skb, stab);
849 #endif
850 }
851 
qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)852 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
853 				struct sk_buff **to_free)
854 {
855 	qdisc_calculate_pkt_len(skb, sch);
856 	return sch->enqueue(skb, sch, to_free);
857 }
858 
_bstats_update(struct gnet_stats_basic_sync * bstats,__u64 bytes,__u32 packets)859 static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
860 				  __u64 bytes, __u32 packets)
861 {
862 	u64_stats_update_begin(&bstats->syncp);
863 	u64_stats_add(&bstats->bytes, bytes);
864 	u64_stats_add(&bstats->packets, packets);
865 	u64_stats_update_end(&bstats->syncp);
866 }
867 
bstats_update(struct gnet_stats_basic_sync * bstats,const struct sk_buff * skb)868 static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
869 				 const struct sk_buff *skb)
870 {
871 	_bstats_update(bstats,
872 		       qdisc_pkt_len(skb),
873 		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
874 }
875 
qdisc_bstats_cpu_update(struct Qdisc * sch,const struct sk_buff * skb)876 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
877 					   const struct sk_buff *skb)
878 {
879 	bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
880 }
881 
qdisc_bstats_update(struct Qdisc * sch,const struct sk_buff * skb)882 static inline void qdisc_bstats_update(struct Qdisc *sch,
883 				       const struct sk_buff *skb)
884 {
885 	bstats_update(&sch->bstats, skb);
886 }
887 
qdisc_qstats_backlog_dec(struct Qdisc * sch,const struct sk_buff * skb)888 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
889 					    const struct sk_buff *skb)
890 {
891 	sch->qstats.backlog -= qdisc_pkt_len(skb);
892 }
893 
qdisc_qstats_cpu_backlog_dec(struct Qdisc * sch,const struct sk_buff * skb)894 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
895 						const struct sk_buff *skb)
896 {
897 	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
898 }
899 
qdisc_qstats_backlog_inc(struct Qdisc * sch,const struct sk_buff * skb)900 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
901 					    const struct sk_buff *skb)
902 {
903 	sch->qstats.backlog += qdisc_pkt_len(skb);
904 }
905 
qdisc_qstats_cpu_backlog_inc(struct Qdisc * sch,const struct sk_buff * skb)906 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
907 						const struct sk_buff *skb)
908 {
909 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
910 }
911 
qdisc_qstats_cpu_qlen_inc(struct Qdisc * sch)912 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
913 {
914 	this_cpu_inc(sch->cpu_qstats->qlen);
915 }
916 
qdisc_qstats_cpu_qlen_dec(struct Qdisc * sch)917 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
918 {
919 	this_cpu_dec(sch->cpu_qstats->qlen);
920 }
921 
qdisc_qstats_cpu_requeues_inc(struct Qdisc * sch)922 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
923 {
924 	this_cpu_inc(sch->cpu_qstats->requeues);
925 }
926 
__qdisc_qstats_drop(struct Qdisc * sch,int count)927 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
928 {
929 	sch->qstats.drops += count;
930 }
931 
qstats_drop_inc(struct gnet_stats_queue * qstats)932 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
933 {
934 	qstats->drops++;
935 }
936 
qstats_overlimit_inc(struct gnet_stats_queue * qstats)937 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
938 {
939 	qstats->overlimits++;
940 }
941 
qdisc_qstats_drop(struct Qdisc * sch)942 static inline void qdisc_qstats_drop(struct Qdisc *sch)
943 {
944 	qstats_drop_inc(&sch->qstats);
945 }
946 
qdisc_qstats_cpu_drop(struct Qdisc * sch)947 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
948 {
949 	this_cpu_inc(sch->cpu_qstats->drops);
950 }
951 
qdisc_qstats_overlimit(struct Qdisc * sch)952 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
953 {
954 	sch->qstats.overlimits++;
955 }
956 
qdisc_qstats_copy(struct gnet_dump * d,struct Qdisc * sch)957 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
958 {
959 	__u32 qlen = qdisc_qlen_sum(sch);
960 
961 	return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
962 }
963 
qdisc_qstats_qlen_backlog(struct Qdisc * sch,__u32 * qlen,__u32 * backlog)964 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
965 					     __u32 *backlog)
966 {
967 	struct gnet_stats_queue qstats = { 0 };
968 
969 	gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
970 	*qlen = qstats.qlen + qdisc_qlen(sch);
971 	*backlog = qstats.backlog;
972 }
973 
qdisc_tree_flush_backlog(struct Qdisc * sch)974 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
975 {
976 	__u32 qlen, backlog;
977 
978 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
979 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
980 }
981 
qdisc_purge_queue(struct Qdisc * sch)982 static inline void qdisc_purge_queue(struct Qdisc *sch)
983 {
984 	__u32 qlen, backlog;
985 
986 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
987 	qdisc_reset(sch);
988 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
989 }
990 
__qdisc_enqueue_tail(struct sk_buff * skb,struct qdisc_skb_head * qh)991 static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
992 					struct qdisc_skb_head *qh)
993 {
994 	struct sk_buff *last = qh->tail;
995 
996 	if (last) {
997 		skb->next = NULL;
998 		last->next = skb;
999 		qh->tail = skb;
1000 	} else {
1001 		qh->tail = skb;
1002 		qh->head = skb;
1003 	}
1004 	qh->qlen++;
1005 }
1006 
qdisc_enqueue_tail(struct sk_buff * skb,struct Qdisc * sch)1007 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
1008 {
1009 	__qdisc_enqueue_tail(skb, &sch->q);
1010 	qdisc_qstats_backlog_inc(sch, skb);
1011 	return NET_XMIT_SUCCESS;
1012 }
1013 
__qdisc_enqueue_head(struct sk_buff * skb,struct qdisc_skb_head * qh)1014 static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1015 					struct qdisc_skb_head *qh)
1016 {
1017 	skb->next = qh->head;
1018 
1019 	if (!qh->head)
1020 		qh->tail = skb;
1021 	qh->head = skb;
1022 	qh->qlen++;
1023 }
1024 
__qdisc_dequeue_head(struct qdisc_skb_head * qh)1025 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
1026 {
1027 	struct sk_buff *skb = qh->head;
1028 
1029 	if (likely(skb != NULL)) {
1030 		qh->head = skb->next;
1031 		qh->qlen--;
1032 		if (qh->head == NULL)
1033 			qh->tail = NULL;
1034 		skb->next = NULL;
1035 	}
1036 
1037 	return skb;
1038 }
1039 
qdisc_dequeue_head(struct Qdisc * sch)1040 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1041 {
1042 	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1043 
1044 	if (likely(skb != NULL)) {
1045 		qdisc_qstats_backlog_dec(sch, skb);
1046 		qdisc_bstats_update(sch, skb);
1047 	}
1048 
1049 	return skb;
1050 }
1051 
1052 /* Instead of calling kfree_skb() while root qdisc lock is held,
1053  * queue the skb for future freeing at end of __dev_xmit_skb()
1054  */
__qdisc_drop(struct sk_buff * skb,struct sk_buff ** to_free)1055 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1056 {
1057 	skb->next = *to_free;
1058 	*to_free = skb;
1059 }
1060 
__qdisc_drop_all(struct sk_buff * skb,struct sk_buff ** to_free)1061 static inline void __qdisc_drop_all(struct sk_buff *skb,
1062 				    struct sk_buff **to_free)
1063 {
1064 	if (skb->prev)
1065 		skb->prev->next = *to_free;
1066 	else
1067 		skb->next = *to_free;
1068 	*to_free = skb;
1069 }
1070 
__qdisc_queue_drop_head(struct Qdisc * sch,struct qdisc_skb_head * qh,struct sk_buff ** to_free)1071 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1072 						   struct qdisc_skb_head *qh,
1073 						   struct sk_buff **to_free)
1074 {
1075 	struct sk_buff *skb = __qdisc_dequeue_head(qh);
1076 
1077 	if (likely(skb != NULL)) {
1078 		unsigned int len = qdisc_pkt_len(skb);
1079 
1080 		qdisc_qstats_backlog_dec(sch, skb);
1081 		__qdisc_drop(skb, to_free);
1082 		return len;
1083 	}
1084 
1085 	return 0;
1086 }
1087 
qdisc_peek_head(struct Qdisc * sch)1088 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1089 {
1090 	const struct qdisc_skb_head *qh = &sch->q;
1091 
1092 	return qh->head;
1093 }
1094 
1095 /* generic pseudo peek method for non-work-conserving qdisc */
qdisc_peek_dequeued(struct Qdisc * sch)1096 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1097 {
1098 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
1099 
1100 	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
1101 	if (!skb) {
1102 		skb = sch->dequeue(sch);
1103 
1104 		if (skb) {
1105 			__skb_queue_head(&sch->gso_skb, skb);
1106 			/* it's still part of the queue */
1107 			qdisc_qstats_backlog_inc(sch, skb);
1108 			sch->q.qlen++;
1109 		}
1110 	}
1111 
1112 	return skb;
1113 }
1114 
qdisc_update_stats_at_dequeue(struct Qdisc * sch,struct sk_buff * skb)1115 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1116 						 struct sk_buff *skb)
1117 {
1118 	if (qdisc_is_percpu_stats(sch)) {
1119 		qdisc_qstats_cpu_backlog_dec(sch, skb);
1120 		qdisc_bstats_cpu_update(sch, skb);
1121 		qdisc_qstats_cpu_qlen_dec(sch);
1122 	} else {
1123 		qdisc_qstats_backlog_dec(sch, skb);
1124 		qdisc_bstats_update(sch, skb);
1125 		sch->q.qlen--;
1126 	}
1127 }
1128 
qdisc_update_stats_at_enqueue(struct Qdisc * sch,unsigned int pkt_len)1129 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1130 						 unsigned int pkt_len)
1131 {
1132 	if (qdisc_is_percpu_stats(sch)) {
1133 		qdisc_qstats_cpu_qlen_inc(sch);
1134 		this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1135 	} else {
1136 		sch->qstats.backlog += pkt_len;
1137 		sch->q.qlen++;
1138 	}
1139 }
1140 
1141 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
qdisc_dequeue_peeked(struct Qdisc * sch)1142 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1143 {
1144 	struct sk_buff *skb = skb_peek(&sch->gso_skb);
1145 
1146 	if (skb) {
1147 		skb = __skb_dequeue(&sch->gso_skb);
1148 		if (qdisc_is_percpu_stats(sch)) {
1149 			qdisc_qstats_cpu_backlog_dec(sch, skb);
1150 			qdisc_qstats_cpu_qlen_dec(sch);
1151 		} else {
1152 			qdisc_qstats_backlog_dec(sch, skb);
1153 			sch->q.qlen--;
1154 		}
1155 	} else {
1156 		skb = sch->dequeue(sch);
1157 	}
1158 
1159 	return skb;
1160 }
1161 
__qdisc_reset_queue(struct qdisc_skb_head * qh)1162 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1163 {
1164 	/*
1165 	 * We do not know the backlog in bytes of this list, it
1166 	 * is up to the caller to correct it
1167 	 */
1168 	ASSERT_RTNL();
1169 	if (qh->qlen) {
1170 		rtnl_kfree_skbs(qh->head, qh->tail);
1171 
1172 		qh->head = NULL;
1173 		qh->tail = NULL;
1174 		qh->qlen = 0;
1175 	}
1176 }
1177 
qdisc_reset_queue(struct Qdisc * sch)1178 static inline void qdisc_reset_queue(struct Qdisc *sch)
1179 {
1180 	__qdisc_reset_queue(&sch->q);
1181 }
1182 
qdisc_replace(struct Qdisc * sch,struct Qdisc * new,struct Qdisc ** pold)1183 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1184 					  struct Qdisc **pold)
1185 {
1186 	struct Qdisc *old;
1187 
1188 	sch_tree_lock(sch);
1189 	old = *pold;
1190 	*pold = new;
1191 	if (old != NULL)
1192 		qdisc_purge_queue(old);
1193 	sch_tree_unlock(sch);
1194 
1195 	return old;
1196 }
1197 
rtnl_qdisc_drop(struct sk_buff * skb,struct Qdisc * sch)1198 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1199 {
1200 	rtnl_kfree_skbs(skb, skb);
1201 	qdisc_qstats_drop(sch);
1202 }
1203 
qdisc_drop_cpu(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1204 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1205 				 struct sk_buff **to_free)
1206 {
1207 	__qdisc_drop(skb, to_free);
1208 	qdisc_qstats_cpu_drop(sch);
1209 
1210 	return NET_XMIT_DROP;
1211 }
1212 
qdisc_drop(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1213 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1214 			     struct sk_buff **to_free)
1215 {
1216 	__qdisc_drop(skb, to_free);
1217 	qdisc_qstats_drop(sch);
1218 
1219 	return NET_XMIT_DROP;
1220 }
1221 
qdisc_drop_all(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1222 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1223 				 struct sk_buff **to_free)
1224 {
1225 	__qdisc_drop_all(skb, to_free);
1226 	qdisc_qstats_drop(sch);
1227 
1228 	return NET_XMIT_DROP;
1229 }
1230 
1231 struct psched_ratecfg {
1232 	u64	rate_bytes_ps; /* bytes per second */
1233 	u32	mult;
1234 	u16	overhead;
1235 	u16	mpu;
1236 	u8	linklayer;
1237 	u8	shift;
1238 };
1239 
psched_l2t_ns(const struct psched_ratecfg * r,unsigned int len)1240 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1241 				unsigned int len)
1242 {
1243 	len += r->overhead;
1244 
1245 	if (len < r->mpu)
1246 		len = r->mpu;
1247 
1248 	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1249 		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1250 
1251 	return ((u64)len * r->mult) >> r->shift;
1252 }
1253 
1254 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1255 			       const struct tc_ratespec *conf,
1256 			       u64 rate64);
1257 
psched_ratecfg_getrate(struct tc_ratespec * res,const struct psched_ratecfg * r)1258 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1259 					  const struct psched_ratecfg *r)
1260 {
1261 	memset(res, 0, sizeof(*res));
1262 
1263 	/* legacy struct tc_ratespec has a 32bit @rate field
1264 	 * Qdisc using 64bit rate should add new attributes
1265 	 * in order to maintain compatibility.
1266 	 */
1267 	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1268 
1269 	res->overhead = r->overhead;
1270 	res->mpu = r->mpu;
1271 	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1272 }
1273 
1274 struct psched_pktrate {
1275 	u64	rate_pkts_ps; /* packets per second */
1276 	u32	mult;
1277 	u8	shift;
1278 };
1279 
psched_pkt2t_ns(const struct psched_pktrate * r,unsigned int pkt_num)1280 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
1281 				  unsigned int pkt_num)
1282 {
1283 	return ((u64)pkt_num * r->mult) >> r->shift;
1284 }
1285 
1286 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
1287 
1288 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
1289  * The fast path only needs to access filter list and to update stats
1290  */
1291 struct mini_Qdisc {
1292 	struct tcf_proto *filter_list;
1293 	struct tcf_block *block;
1294 	struct gnet_stats_basic_sync __percpu *cpu_bstats;
1295 	struct gnet_stats_queue	__percpu *cpu_qstats;
1296 	unsigned long rcu_state;
1297 };
1298 
mini_qdisc_bstats_cpu_update(struct mini_Qdisc * miniq,const struct sk_buff * skb)1299 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1300 						const struct sk_buff *skb)
1301 {
1302 	bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1303 }
1304 
mini_qdisc_qstats_cpu_drop(struct mini_Qdisc * miniq)1305 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1306 {
1307 	this_cpu_inc(miniq->cpu_qstats->drops);
1308 }
1309 
1310 struct mini_Qdisc_pair {
1311 	struct mini_Qdisc miniq1;
1312 	struct mini_Qdisc miniq2;
1313 	struct mini_Qdisc __rcu **p_miniq;
1314 };
1315 
1316 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1317 			  struct tcf_proto *tp_head);
1318 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1319 			  struct mini_Qdisc __rcu **p_miniq);
1320 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1321 				struct tcf_block *block);
1322 
1323 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
1324 
1325 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
1326 
1327 /* Make sure qdisc is no longer in SCHED state. */
qdisc_synchronize(const struct Qdisc * q)1328 static inline void qdisc_synchronize(const struct Qdisc *q)
1329 {
1330 	while (test_bit(__QDISC_STATE_SCHED, &q->state))
1331 		msleep(1);
1332 }
1333 
1334 #endif
1335