1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_SCHED_GENERIC_H
3 #define __NET_SCHED_GENERIC_H
4
5 #include <linux/netdevice.h>
6 #include <linux/types.h>
7 #include <linux/rcupdate.h>
8 #include <linux/pkt_sched.h>
9 #include <linux/pkt_cls.h>
10 #include <linux/percpu.h>
11 #include <linux/dynamic_queue_limits.h>
12 #include <linux/list.h>
13 #include <linux/refcount.h>
14 #include <linux/workqueue.h>
15 #include <linux/mutex.h>
16 #include <linux/rwsem.h>
17 #include <linux/atomic.h>
18 #include <linux/hashtable.h>
19 #include <linux/android_kabi.h>
20 #include <net/gen_stats.h>
21 #include <net/rtnetlink.h>
22 #include <net/flow_offload.h>
23
24 struct Qdisc_ops;
25 struct qdisc_walker;
26 struct tcf_walker;
27 struct module;
28 struct bpf_flow_keys;
29
30 struct qdisc_rate_table {
31 struct tc_ratespec rate;
32 u32 data[256];
33 struct qdisc_rate_table *next;
34 int refcnt;
35 };
36
37 enum qdisc_state_t {
38 __QDISC_STATE_SCHED,
39 __QDISC_STATE_DEACTIVATED,
40 __QDISC_STATE_MISSED,
41 __QDISC_STATE_DRAINING,
42 };
43
44 #define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
45 #define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
46
47 #define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
48 QDISC_STATE_DRAINING)
49
50 struct qdisc_size_table {
51 struct rcu_head rcu;
52 struct list_head list;
53 struct tc_sizespec szopts;
54 int refcnt;
55 u16 data[];
56 };
57
58 /* similar to sk_buff_head, but skb->prev pointer is undefined. */
59 struct qdisc_skb_head {
60 struct sk_buff *head;
61 struct sk_buff *tail;
62 __u32 qlen;
63 spinlock_t lock;
64 };
65
66 struct Qdisc {
67 int (*enqueue)(struct sk_buff *skb,
68 struct Qdisc *sch,
69 struct sk_buff **to_free);
70 struct sk_buff * (*dequeue)(struct Qdisc *sch);
71 unsigned int flags;
72 #define TCQ_F_BUILTIN 1
73 #define TCQ_F_INGRESS 2
74 #define TCQ_F_CAN_BYPASS 4
75 #define TCQ_F_MQROOT 8
76 #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
77 * q->dev_queue : It can test
78 * netif_xmit_frozen_or_stopped() before
79 * dequeueing next packet.
80 * Its true for MQ/MQPRIO slaves, or non
81 * multiqueue device.
82 */
83 #define TCQ_F_WARN_NONWC (1 << 16)
84 #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
85 #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
86 * qdisc_tree_decrease_qlen() should stop.
87 */
88 #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
89 #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
90 #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
91 u32 limit;
92 const struct Qdisc_ops *ops;
93 struct qdisc_size_table __rcu *stab;
94 struct hlist_node hash;
95 u32 handle;
96 u32 parent;
97
98 struct netdev_queue *dev_queue;
99
100 struct net_rate_estimator __rcu *rate_est;
101 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
102 struct gnet_stats_queue __percpu *cpu_qstats;
103 int pad;
104 refcount_t refcnt;
105
106 /*
107 * For performance sake on SMP, we put highly modified fields at the end
108 */
109 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
110 struct qdisc_skb_head q;
111 struct gnet_stats_basic_packed bstats;
112 seqcount_t running;
113 struct gnet_stats_queue qstats;
114 unsigned long state;
115 struct Qdisc *next_sched;
116 struct sk_buff_head skb_bad_txq;
117
118 spinlock_t busylock ____cacheline_aligned_in_smp;
119 spinlock_t seqlock;
120
121 struct rcu_head rcu;
122
123 ANDROID_KABI_RESERVE(1);
124
125 /* private data */
126 long privdata[] ____cacheline_aligned;
127 };
128
qdisc_refcount_inc(struct Qdisc * qdisc)129 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
130 {
131 if (qdisc->flags & TCQ_F_BUILTIN)
132 return;
133 refcount_inc(&qdisc->refcnt);
134 }
135
136 /* Intended to be used by unlocked users, when concurrent qdisc release is
137 * possible.
138 */
139
qdisc_refcount_inc_nz(struct Qdisc * qdisc)140 static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
141 {
142 if (qdisc->flags & TCQ_F_BUILTIN)
143 return qdisc;
144 if (refcount_inc_not_zero(&qdisc->refcnt))
145 return qdisc;
146 return NULL;
147 }
148
qdisc_is_running(struct Qdisc * qdisc)149 static inline bool qdisc_is_running(struct Qdisc *qdisc)
150 {
151 if (qdisc->flags & TCQ_F_NOLOCK)
152 return spin_is_locked(&qdisc->seqlock);
153 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
154 }
155
nolock_qdisc_is_empty(const struct Qdisc * qdisc)156 static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
157 {
158 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
159 }
160
qdisc_is_percpu_stats(const struct Qdisc * q)161 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
162 {
163 return q->flags & TCQ_F_CPUSTATS;
164 }
165
qdisc_is_empty(const struct Qdisc * qdisc)166 static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
167 {
168 if (qdisc_is_percpu_stats(qdisc))
169 return nolock_qdisc_is_empty(qdisc);
170 return !READ_ONCE(qdisc->q.qlen);
171 }
172
qdisc_run_begin(struct Qdisc * qdisc)173 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
174 {
175 if (qdisc->flags & TCQ_F_NOLOCK) {
176 if (spin_trylock(&qdisc->seqlock))
177 return true;
178
179 /* No need to insist if the MISSED flag was already set.
180 * Note that test_and_set_bit() also gives us memory ordering
181 * guarantees wrt potential earlier enqueue() and below
182 * spin_trylock(), both of which are necessary to prevent races
183 */
184 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
185 return false;
186
187 /* Try to take the lock again to make sure that we will either
188 * grab it or the CPU that still has it will see MISSED set
189 * when testing it in qdisc_run_end()
190 */
191 return spin_trylock(&qdisc->seqlock);
192 } else if (qdisc_is_running(qdisc)) {
193 return false;
194 }
195 /* Variant of write_seqcount_begin() telling lockdep a trylock
196 * was attempted.
197 */
198 raw_write_seqcount_begin(&qdisc->running);
199 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
200 return true;
201 }
202
qdisc_run_end(struct Qdisc * qdisc)203 static inline void qdisc_run_end(struct Qdisc *qdisc)
204 {
205 if (qdisc->flags & TCQ_F_NOLOCK) {
206 spin_unlock(&qdisc->seqlock);
207
208 /* spin_unlock() only has store-release semantic. The unlock
209 * and test_bit() ordering is a store-load ordering, so a full
210 * memory barrier is needed here.
211 */
212 smp_mb();
213
214 if (unlikely(test_bit(__QDISC_STATE_MISSED,
215 &qdisc->state)))
216 __netif_schedule(qdisc);
217 } else {
218 write_seqcount_end(&qdisc->running);
219 }
220 }
221
qdisc_may_bulk(const struct Qdisc * qdisc)222 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
223 {
224 return qdisc->flags & TCQ_F_ONETXQUEUE;
225 }
226
qdisc_avail_bulklimit(const struct netdev_queue * txq)227 static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
228 {
229 #ifdef CONFIG_BQL
230 /* Non-BQL migrated drivers will return 0, too. */
231 return dql_avail(&txq->dql);
232 #else
233 return 0;
234 #endif
235 }
236
237 struct Qdisc_class_ops {
238 unsigned int flags;
239 /* Child qdisc manipulation */
240 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
241 int (*graft)(struct Qdisc *, unsigned long cl,
242 struct Qdisc *, struct Qdisc **,
243 struct netlink_ext_ack *extack);
244 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
245 void (*qlen_notify)(struct Qdisc *, unsigned long);
246
247 /* Class manipulation routines */
248 unsigned long (*find)(struct Qdisc *, u32 classid);
249 int (*change)(struct Qdisc *, u32, u32,
250 struct nlattr **, unsigned long *,
251 struct netlink_ext_ack *);
252 int (*delete)(struct Qdisc *, unsigned long,
253 struct netlink_ext_ack *);
254 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
255
256 /* Filter manipulation */
257 struct tcf_block * (*tcf_block)(struct Qdisc *sch,
258 unsigned long arg,
259 struct netlink_ext_ack *extack);
260 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
261 u32 classid);
262 void (*unbind_tcf)(struct Qdisc *, unsigned long);
263
264 /* rtnetlink specific */
265 int (*dump)(struct Qdisc *, unsigned long,
266 struct sk_buff *skb, struct tcmsg*);
267 int (*dump_stats)(struct Qdisc *, unsigned long,
268 struct gnet_dump *);
269
270 ANDROID_KABI_RESERVE(1);
271 };
272
273 /* Qdisc_class_ops flag values */
274
275 /* Implements API that doesn't require rtnl lock */
276 enum qdisc_class_ops_flags {
277 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
278 };
279
280 struct Qdisc_ops {
281 struct Qdisc_ops *next;
282 const struct Qdisc_class_ops *cl_ops;
283 char id[IFNAMSIZ];
284 int priv_size;
285 unsigned int static_flags;
286
287 int (*enqueue)(struct sk_buff *skb,
288 struct Qdisc *sch,
289 struct sk_buff **to_free);
290 struct sk_buff * (*dequeue)(struct Qdisc *);
291 struct sk_buff * (*peek)(struct Qdisc *);
292
293 int (*init)(struct Qdisc *sch, struct nlattr *arg,
294 struct netlink_ext_ack *extack);
295 void (*reset)(struct Qdisc *);
296 void (*destroy)(struct Qdisc *);
297 int (*change)(struct Qdisc *sch,
298 struct nlattr *arg,
299 struct netlink_ext_ack *extack);
300 void (*attach)(struct Qdisc *sch);
301 int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
302 void (*change_real_num_tx)(struct Qdisc *sch,
303 unsigned int new_real_tx);
304
305 int (*dump)(struct Qdisc *, struct sk_buff *);
306 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
307
308 void (*ingress_block_set)(struct Qdisc *sch,
309 u32 block_index);
310 void (*egress_block_set)(struct Qdisc *sch,
311 u32 block_index);
312 u32 (*ingress_block_get)(struct Qdisc *sch);
313 u32 (*egress_block_get)(struct Qdisc *sch);
314
315 struct module *owner;
316
317 ANDROID_KABI_RESERVE(1);
318 };
319
320
321 struct tcf_result {
322 union {
323 struct {
324 unsigned long class;
325 u32 classid;
326 };
327 const struct tcf_proto *goto_tp;
328
329 /* used in the skb_tc_reinsert function */
330 struct {
331 bool ingress;
332 struct gnet_stats_queue *qstats;
333 };
334 };
335 };
336
337 struct tcf_chain;
338
339 struct tcf_proto_ops {
340 struct list_head head;
341 char kind[IFNAMSIZ];
342
343 int (*classify)(struct sk_buff *,
344 const struct tcf_proto *,
345 struct tcf_result *);
346 int (*init)(struct tcf_proto*);
347 void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
348 struct netlink_ext_ack *extack);
349
350 void* (*get)(struct tcf_proto*, u32 handle);
351 void (*put)(struct tcf_proto *tp, void *f);
352 int (*change)(struct net *net, struct sk_buff *,
353 struct tcf_proto*, unsigned long,
354 u32 handle, struct nlattr **,
355 void **, u32,
356 struct netlink_ext_ack *);
357 int (*delete)(struct tcf_proto *tp, void *arg,
358 bool *last, bool rtnl_held,
359 struct netlink_ext_ack *);
360 bool (*delete_empty)(struct tcf_proto *tp);
361 void (*walk)(struct tcf_proto *tp,
362 struct tcf_walker *arg, bool rtnl_held);
363 int (*reoffload)(struct tcf_proto *tp, bool add,
364 flow_setup_cb_t *cb, void *cb_priv,
365 struct netlink_ext_ack *extack);
366 void (*hw_add)(struct tcf_proto *tp,
367 void *type_data);
368 void (*hw_del)(struct tcf_proto *tp,
369 void *type_data);
370 void (*bind_class)(void *, u32, unsigned long,
371 void *, unsigned long);
372 void * (*tmplt_create)(struct net *net,
373 struct tcf_chain *chain,
374 struct nlattr **tca,
375 struct netlink_ext_ack *extack);
376 void (*tmplt_destroy)(void *tmplt_priv);
377
378 /* rtnetlink specific */
379 int (*dump)(struct net*, struct tcf_proto*, void *,
380 struct sk_buff *skb, struct tcmsg*,
381 bool);
382 int (*terse_dump)(struct net *net,
383 struct tcf_proto *tp, void *fh,
384 struct sk_buff *skb,
385 struct tcmsg *t, bool rtnl_held);
386 int (*tmplt_dump)(struct sk_buff *skb,
387 struct net *net,
388 void *tmplt_priv);
389
390 struct module *owner;
391 int flags;
392 };
393
394 /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
395 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
396 * conditions can occur when filters are inserted/deleted simultaneously.
397 */
398 enum tcf_proto_ops_flags {
399 TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
400 };
401
402 struct tcf_proto {
403 /* Fast access part */
404 struct tcf_proto __rcu *next;
405 void __rcu *root;
406
407 /* called under RCU BH lock*/
408 int (*classify)(struct sk_buff *,
409 const struct tcf_proto *,
410 struct tcf_result *);
411 __be16 protocol;
412
413 /* All the rest */
414 u32 prio;
415 void *data;
416 const struct tcf_proto_ops *ops;
417 struct tcf_chain *chain;
418 /* Lock protects tcf_proto shared state and can be used by unlocked
419 * classifiers to protect their private data.
420 */
421 spinlock_t lock;
422 bool deleting;
423 refcount_t refcnt;
424 struct rcu_head rcu;
425 struct hlist_node destroy_ht_node;
426 };
427
428 struct qdisc_skb_cb {
429 struct {
430 unsigned int pkt_len;
431 u16 slave_dev_queue_mapping;
432 u16 tc_classid;
433 };
434 #define QDISC_CB_PRIV_LEN 20
435 unsigned char data[QDISC_CB_PRIV_LEN];
436 };
437
438 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
439
440 struct tcf_chain {
441 /* Protects filter_chain. */
442 struct mutex filter_chain_lock;
443 struct tcf_proto __rcu *filter_chain;
444 struct list_head list;
445 struct tcf_block *block;
446 u32 index; /* chain index */
447 unsigned int refcnt;
448 unsigned int action_refcnt;
449 bool explicitly_created;
450 bool flushing;
451 const struct tcf_proto_ops *tmplt_ops;
452 void *tmplt_priv;
453 struct rcu_head rcu;
454 };
455
456 struct tcf_block {
457 /* Lock protects tcf_block and lifetime-management data of chains
458 * attached to the block (refcnt, action_refcnt, explicitly_created).
459 */
460 struct mutex lock;
461 struct list_head chain_list;
462 u32 index; /* block index for shared blocks */
463 u32 classid; /* which class this block belongs to */
464 refcount_t refcnt;
465 struct net *net;
466 struct Qdisc *q;
467 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
468 struct flow_block flow_block;
469 struct list_head owner_list;
470 bool keep_dst;
471 atomic_t offloadcnt; /* Number of oddloaded filters */
472 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
473 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
474 struct {
475 struct tcf_chain *chain;
476 struct list_head filter_chain_list;
477 } chain0;
478 struct rcu_head rcu;
479 DECLARE_HASHTABLE(proto_destroy_ht, 7);
480 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
481 };
482
lockdep_tcf_chain_is_locked(struct tcf_chain * chain)483 static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
484 {
485 return lockdep_is_held(&chain->filter_chain_lock);
486 }
487
lockdep_tcf_proto_is_locked(struct tcf_proto * tp)488 static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
489 {
490 return lockdep_is_held(&tp->lock);
491 }
492
493 #define tcf_chain_dereference(p, chain) \
494 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
495
496 #define tcf_proto_dereference(p, tp) \
497 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
498
qdisc_cb_private_validate(const struct sk_buff * skb,int sz)499 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
500 {
501 struct qdisc_skb_cb *qcb;
502
503 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
504 BUILD_BUG_ON(sizeof(qcb->data) < sz);
505 }
506
qdisc_qlen_cpu(const struct Qdisc * q)507 static inline int qdisc_qlen_cpu(const struct Qdisc *q)
508 {
509 return this_cpu_ptr(q->cpu_qstats)->qlen;
510 }
511
qdisc_qlen(const struct Qdisc * q)512 static inline int qdisc_qlen(const struct Qdisc *q)
513 {
514 return q->q.qlen;
515 }
516
qdisc_qlen_sum(const struct Qdisc * q)517 static inline int qdisc_qlen_sum(const struct Qdisc *q)
518 {
519 __u32 qlen = q->qstats.qlen;
520 int i;
521
522 if (qdisc_is_percpu_stats(q)) {
523 for_each_possible_cpu(i)
524 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
525 } else {
526 qlen += q->q.qlen;
527 }
528
529 return qlen;
530 }
531
qdisc_skb_cb(const struct sk_buff * skb)532 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
533 {
534 return (struct qdisc_skb_cb *)skb->cb;
535 }
536
qdisc_lock(struct Qdisc * qdisc)537 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
538 {
539 return &qdisc->q.lock;
540 }
541
qdisc_root(const struct Qdisc * qdisc)542 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
543 {
544 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
545
546 return q;
547 }
548
qdisc_root_bh(const struct Qdisc * qdisc)549 static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
550 {
551 return rcu_dereference_bh(qdisc->dev_queue->qdisc);
552 }
553
qdisc_root_sleeping(const struct Qdisc * qdisc)554 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
555 {
556 return qdisc->dev_queue->qdisc_sleeping;
557 }
558
559 /* The qdisc root lock is a mechanism by which to top level
560 * of a qdisc tree can be locked from any qdisc node in the
561 * forest. This allows changing the configuration of some
562 * aspect of the qdisc tree while blocking out asynchronous
563 * qdisc access in the packet processing paths.
564 *
565 * It is only legal to do this when the root will not change
566 * on us. Otherwise we'll potentially lock the wrong qdisc
567 * root. This is enforced by holding the RTNL semaphore, which
568 * all users of this lock accessor must do.
569 */
qdisc_root_lock(const struct Qdisc * qdisc)570 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
571 {
572 struct Qdisc *root = qdisc_root(qdisc);
573
574 ASSERT_RTNL();
575 return qdisc_lock(root);
576 }
577
qdisc_root_sleeping_lock(const struct Qdisc * qdisc)578 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
579 {
580 struct Qdisc *root = qdisc_root_sleeping(qdisc);
581
582 ASSERT_RTNL();
583 return qdisc_lock(root);
584 }
585
qdisc_root_sleeping_running(const struct Qdisc * qdisc)586 static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
587 {
588 struct Qdisc *root = qdisc_root_sleeping(qdisc);
589
590 ASSERT_RTNL();
591 return &root->running;
592 }
593
qdisc_dev(const struct Qdisc * qdisc)594 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
595 {
596 return qdisc->dev_queue->dev;
597 }
598
sch_tree_lock(struct Qdisc * q)599 static inline void sch_tree_lock(struct Qdisc *q)
600 {
601 if (q->flags & TCQ_F_MQROOT)
602 spin_lock_bh(qdisc_lock(q));
603 else
604 spin_lock_bh(qdisc_root_sleeping_lock(q));
605 }
606
sch_tree_unlock(struct Qdisc * q)607 static inline void sch_tree_unlock(struct Qdisc *q)
608 {
609 if (q->flags & TCQ_F_MQROOT)
610 spin_unlock_bh(qdisc_lock(q));
611 else
612 spin_unlock_bh(qdisc_root_sleeping_lock(q));
613 }
614
615 extern struct Qdisc noop_qdisc;
616 extern struct Qdisc_ops noop_qdisc_ops;
617 extern struct Qdisc_ops pfifo_fast_ops;
618 extern struct Qdisc_ops mq_qdisc_ops;
619 extern struct Qdisc_ops noqueue_qdisc_ops;
620 extern const struct Qdisc_ops *default_qdisc_ops;
621 static inline const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device * dev,int ntx)622 get_default_qdisc_ops(const struct net_device *dev, int ntx)
623 {
624 return ntx < dev->real_num_tx_queues ?
625 default_qdisc_ops : &pfifo_fast_ops;
626 }
627
628 struct Qdisc_class_common {
629 u32 classid;
630 struct hlist_node hnode;
631 };
632
633 struct Qdisc_class_hash {
634 struct hlist_head *hash;
635 unsigned int hashsize;
636 unsigned int hashmask;
637 unsigned int hashelems;
638 };
639
qdisc_class_hash(u32 id,u32 mask)640 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
641 {
642 id ^= id >> 8;
643 id ^= id >> 4;
644 return id & mask;
645 }
646
647 static inline struct Qdisc_class_common *
qdisc_class_find(const struct Qdisc_class_hash * hash,u32 id)648 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
649 {
650 struct Qdisc_class_common *cl;
651 unsigned int h;
652
653 if (!id)
654 return NULL;
655
656 h = qdisc_class_hash(id, hash->hashmask);
657 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
658 if (cl->classid == id)
659 return cl;
660 }
661 return NULL;
662 }
663
tc_classid_to_hwtc(struct net_device * dev,u32 classid)664 static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
665 {
666 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
667
668 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
669 }
670
671 int qdisc_class_hash_init(struct Qdisc_class_hash *);
672 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
673 struct Qdisc_class_common *);
674 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
675 struct Qdisc_class_common *);
676 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
677 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
678
679 int dev_qdisc_change_tx_queue_len(struct net_device *dev);
680 void dev_qdisc_change_real_num_tx(struct net_device *dev,
681 unsigned int new_real_tx);
682 void dev_init_scheduler(struct net_device *dev);
683 void dev_shutdown(struct net_device *dev);
684 void dev_activate(struct net_device *dev);
685 void dev_deactivate(struct net_device *dev);
686 void dev_deactivate_many(struct list_head *head);
687 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
688 struct Qdisc *qdisc);
689 void qdisc_reset(struct Qdisc *qdisc);
690 void qdisc_put(struct Qdisc *qdisc);
691 void qdisc_put_unlocked(struct Qdisc *qdisc);
692 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
693 #ifdef CONFIG_NET_SCHED
694 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
695 void *type_data);
696 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
697 struct Qdisc *new, struct Qdisc *old,
698 enum tc_setup_type type, void *type_data,
699 struct netlink_ext_ack *extack);
700 #else
701 static inline int
qdisc_offload_dump_helper(struct Qdisc * q,enum tc_setup_type type,void * type_data)702 qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
703 void *type_data)
704 {
705 q->flags &= ~TCQ_F_OFFLOADED;
706 return 0;
707 }
708
709 static inline void
qdisc_offload_graft_helper(struct net_device * dev,struct Qdisc * sch,struct Qdisc * new,struct Qdisc * old,enum tc_setup_type type,void * type_data,struct netlink_ext_ack * extack)710 qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
711 struct Qdisc *new, struct Qdisc *old,
712 enum tc_setup_type type, void *type_data,
713 struct netlink_ext_ack *extack)
714 {
715 }
716 #endif
717 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
718 const struct Qdisc_ops *ops,
719 struct netlink_ext_ack *extack);
720 void qdisc_free(struct Qdisc *qdisc);
721 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
722 const struct Qdisc_ops *ops, u32 parentid,
723 struct netlink_ext_ack *extack);
724 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
725 const struct qdisc_size_table *stab);
726 int skb_do_redirect(struct sk_buff *);
727
skb_at_tc_ingress(const struct sk_buff * skb)728 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
729 {
730 #ifdef CONFIG_NET_CLS_ACT
731 return skb->tc_at_ingress;
732 #else
733 return false;
734 #endif
735 }
736
skb_skip_tc_classify(struct sk_buff * skb)737 static inline bool skb_skip_tc_classify(struct sk_buff *skb)
738 {
739 #ifdef CONFIG_NET_CLS_ACT
740 if (skb->tc_skip_classify) {
741 skb->tc_skip_classify = 0;
742 return true;
743 }
744 #endif
745 return false;
746 }
747
748 /* Reset all TX qdiscs greater than index of a device. */
qdisc_reset_all_tx_gt(struct net_device * dev,unsigned int i)749 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
750 {
751 struct Qdisc *qdisc;
752
753 for (; i < dev->num_tx_queues; i++) {
754 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
755 if (qdisc) {
756 spin_lock_bh(qdisc_lock(qdisc));
757 qdisc_reset(qdisc);
758 spin_unlock_bh(qdisc_lock(qdisc));
759 }
760 }
761 }
762
763 /* Are all TX queues of the device empty? */
qdisc_all_tx_empty(const struct net_device * dev)764 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
765 {
766 unsigned int i;
767
768 rcu_read_lock();
769 for (i = 0; i < dev->num_tx_queues; i++) {
770 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
771 const struct Qdisc *q = rcu_dereference(txq->qdisc);
772
773 if (!qdisc_is_empty(q)) {
774 rcu_read_unlock();
775 return false;
776 }
777 }
778 rcu_read_unlock();
779 return true;
780 }
781
782 /* Are any of the TX qdiscs changing? */
qdisc_tx_changing(const struct net_device * dev)783 static inline bool qdisc_tx_changing(const struct net_device *dev)
784 {
785 unsigned int i;
786
787 for (i = 0; i < dev->num_tx_queues; i++) {
788 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
789 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
790 return true;
791 }
792 return false;
793 }
794
795 /* Is the device using the noop qdisc on all queues? */
qdisc_tx_is_noop(const struct net_device * dev)796 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
797 {
798 unsigned int i;
799
800 for (i = 0; i < dev->num_tx_queues; i++) {
801 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
802 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
803 return false;
804 }
805 return true;
806 }
807
qdisc_pkt_len(const struct sk_buff * skb)808 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
809 {
810 return qdisc_skb_cb(skb)->pkt_len;
811 }
812
813 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
814 enum net_xmit_qdisc_t {
815 __NET_XMIT_STOLEN = 0x00010000,
816 __NET_XMIT_BYPASS = 0x00020000,
817 };
818
819 #ifdef CONFIG_NET_CLS_ACT
820 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
821 #else
822 #define net_xmit_drop_count(e) (1)
823 #endif
824
qdisc_calculate_pkt_len(struct sk_buff * skb,const struct Qdisc * sch)825 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
826 const struct Qdisc *sch)
827 {
828 #ifdef CONFIG_NET_SCHED
829 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
830
831 if (stab)
832 __qdisc_calculate_pkt_len(skb, stab);
833 #endif
834 }
835
qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)836 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
837 struct sk_buff **to_free)
838 {
839 qdisc_calculate_pkt_len(skb, sch);
840 return sch->enqueue(skb, sch, to_free);
841 }
842
_bstats_update(struct gnet_stats_basic_packed * bstats,__u64 bytes,__u32 packets)843 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
844 __u64 bytes, __u32 packets)
845 {
846 bstats->bytes += bytes;
847 bstats->packets += packets;
848 }
849
bstats_update(struct gnet_stats_basic_packed * bstats,const struct sk_buff * skb)850 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
851 const struct sk_buff *skb)
852 {
853 _bstats_update(bstats,
854 qdisc_pkt_len(skb),
855 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
856 }
857
_bstats_cpu_update(struct gnet_stats_basic_cpu * bstats,__u64 bytes,__u32 packets)858 static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
859 __u64 bytes, __u32 packets)
860 {
861 u64_stats_update_begin(&bstats->syncp);
862 _bstats_update(&bstats->bstats, bytes, packets);
863 u64_stats_update_end(&bstats->syncp);
864 }
865
bstats_cpu_update(struct gnet_stats_basic_cpu * bstats,const struct sk_buff * skb)866 static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
867 const struct sk_buff *skb)
868 {
869 u64_stats_update_begin(&bstats->syncp);
870 bstats_update(&bstats->bstats, skb);
871 u64_stats_update_end(&bstats->syncp);
872 }
873
qdisc_bstats_cpu_update(struct Qdisc * sch,const struct sk_buff * skb)874 static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
875 const struct sk_buff *skb)
876 {
877 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
878 }
879
qdisc_bstats_update(struct Qdisc * sch,const struct sk_buff * skb)880 static inline void qdisc_bstats_update(struct Qdisc *sch,
881 const struct sk_buff *skb)
882 {
883 bstats_update(&sch->bstats, skb);
884 }
885
qdisc_qstats_backlog_dec(struct Qdisc * sch,const struct sk_buff * skb)886 static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
887 const struct sk_buff *skb)
888 {
889 sch->qstats.backlog -= qdisc_pkt_len(skb);
890 }
891
qdisc_qstats_cpu_backlog_dec(struct Qdisc * sch,const struct sk_buff * skb)892 static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
893 const struct sk_buff *skb)
894 {
895 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
896 }
897
qdisc_qstats_backlog_inc(struct Qdisc * sch,const struct sk_buff * skb)898 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
899 const struct sk_buff *skb)
900 {
901 sch->qstats.backlog += qdisc_pkt_len(skb);
902 }
903
qdisc_qstats_cpu_backlog_inc(struct Qdisc * sch,const struct sk_buff * skb)904 static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
905 const struct sk_buff *skb)
906 {
907 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
908 }
909
qdisc_qstats_cpu_qlen_inc(struct Qdisc * sch)910 static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
911 {
912 this_cpu_inc(sch->cpu_qstats->qlen);
913 }
914
qdisc_qstats_cpu_qlen_dec(struct Qdisc * sch)915 static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
916 {
917 this_cpu_dec(sch->cpu_qstats->qlen);
918 }
919
qdisc_qstats_cpu_requeues_inc(struct Qdisc * sch)920 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
921 {
922 this_cpu_inc(sch->cpu_qstats->requeues);
923 }
924
__qdisc_qstats_drop(struct Qdisc * sch,int count)925 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
926 {
927 sch->qstats.drops += count;
928 }
929
qstats_drop_inc(struct gnet_stats_queue * qstats)930 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
931 {
932 qstats->drops++;
933 }
934
qstats_overlimit_inc(struct gnet_stats_queue * qstats)935 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
936 {
937 qstats->overlimits++;
938 }
939
qdisc_qstats_drop(struct Qdisc * sch)940 static inline void qdisc_qstats_drop(struct Qdisc *sch)
941 {
942 qstats_drop_inc(&sch->qstats);
943 }
944
qdisc_qstats_cpu_drop(struct Qdisc * sch)945 static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
946 {
947 this_cpu_inc(sch->cpu_qstats->drops);
948 }
949
qdisc_qstats_overlimit(struct Qdisc * sch)950 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
951 {
952 sch->qstats.overlimits++;
953 }
954
qdisc_qstats_copy(struct gnet_dump * d,struct Qdisc * sch)955 static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
956 {
957 __u32 qlen = qdisc_qlen_sum(sch);
958
959 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
960 }
961
qdisc_qstats_qlen_backlog(struct Qdisc * sch,__u32 * qlen,__u32 * backlog)962 static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
963 __u32 *backlog)
964 {
965 struct gnet_stats_queue qstats = { 0 };
966 __u32 len = qdisc_qlen_sum(sch);
967
968 __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
969 *qlen = qstats.qlen;
970 *backlog = qstats.backlog;
971 }
972
qdisc_tree_flush_backlog(struct Qdisc * sch)973 static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
974 {
975 __u32 qlen, backlog;
976
977 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
978 qdisc_tree_reduce_backlog(sch, qlen, backlog);
979 }
980
qdisc_purge_queue(struct Qdisc * sch)981 static inline void qdisc_purge_queue(struct Qdisc *sch)
982 {
983 __u32 qlen, backlog;
984
985 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
986 qdisc_reset(sch);
987 qdisc_tree_reduce_backlog(sch, qlen, backlog);
988 }
989
qdisc_skb_head_init(struct qdisc_skb_head * qh)990 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
991 {
992 qh->head = NULL;
993 qh->tail = NULL;
994 qh->qlen = 0;
995 }
996
__qdisc_enqueue_tail(struct sk_buff * skb,struct qdisc_skb_head * qh)997 static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
998 struct qdisc_skb_head *qh)
999 {
1000 struct sk_buff *last = qh->tail;
1001
1002 if (last) {
1003 skb->next = NULL;
1004 last->next = skb;
1005 qh->tail = skb;
1006 } else {
1007 qh->tail = skb;
1008 qh->head = skb;
1009 }
1010 qh->qlen++;
1011 }
1012
qdisc_enqueue_tail(struct sk_buff * skb,struct Qdisc * sch)1013 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
1014 {
1015 __qdisc_enqueue_tail(skb, &sch->q);
1016 qdisc_qstats_backlog_inc(sch, skb);
1017 return NET_XMIT_SUCCESS;
1018 }
1019
__qdisc_enqueue_head(struct sk_buff * skb,struct qdisc_skb_head * qh)1020 static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1021 struct qdisc_skb_head *qh)
1022 {
1023 skb->next = qh->head;
1024
1025 if (!qh->head)
1026 qh->tail = skb;
1027 qh->head = skb;
1028 qh->qlen++;
1029 }
1030
__qdisc_dequeue_head(struct qdisc_skb_head * qh)1031 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
1032 {
1033 struct sk_buff *skb = qh->head;
1034
1035 if (likely(skb != NULL)) {
1036 qh->head = skb->next;
1037 qh->qlen--;
1038 if (qh->head == NULL)
1039 qh->tail = NULL;
1040 skb->next = NULL;
1041 }
1042
1043 return skb;
1044 }
1045
qdisc_dequeue_head(struct Qdisc * sch)1046 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1047 {
1048 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1049
1050 if (likely(skb != NULL)) {
1051 qdisc_qstats_backlog_dec(sch, skb);
1052 qdisc_bstats_update(sch, skb);
1053 }
1054
1055 return skb;
1056 }
1057
1058 /* Instead of calling kfree_skb() while root qdisc lock is held,
1059 * queue the skb for future freeing at end of __dev_xmit_skb()
1060 */
__qdisc_drop(struct sk_buff * skb,struct sk_buff ** to_free)1061 static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1062 {
1063 skb->next = *to_free;
1064 *to_free = skb;
1065 }
1066
__qdisc_drop_all(struct sk_buff * skb,struct sk_buff ** to_free)1067 static inline void __qdisc_drop_all(struct sk_buff *skb,
1068 struct sk_buff **to_free)
1069 {
1070 if (skb->prev)
1071 skb->prev->next = *to_free;
1072 else
1073 skb->next = *to_free;
1074 *to_free = skb;
1075 }
1076
__qdisc_queue_drop_head(struct Qdisc * sch,struct qdisc_skb_head * qh,struct sk_buff ** to_free)1077 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1078 struct qdisc_skb_head *qh,
1079 struct sk_buff **to_free)
1080 {
1081 struct sk_buff *skb = __qdisc_dequeue_head(qh);
1082
1083 if (likely(skb != NULL)) {
1084 unsigned int len = qdisc_pkt_len(skb);
1085
1086 qdisc_qstats_backlog_dec(sch, skb);
1087 __qdisc_drop(skb, to_free);
1088 return len;
1089 }
1090
1091 return 0;
1092 }
1093
qdisc_peek_head(struct Qdisc * sch)1094 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1095 {
1096 const struct qdisc_skb_head *qh = &sch->q;
1097
1098 return qh->head;
1099 }
1100
1101 /* generic pseudo peek method for non-work-conserving qdisc */
qdisc_peek_dequeued(struct Qdisc * sch)1102 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1103 {
1104 struct sk_buff *skb = skb_peek(&sch->gso_skb);
1105
1106 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
1107 if (!skb) {
1108 skb = sch->dequeue(sch);
1109
1110 if (skb) {
1111 __skb_queue_head(&sch->gso_skb, skb);
1112 /* it's still part of the queue */
1113 qdisc_qstats_backlog_inc(sch, skb);
1114 sch->q.qlen++;
1115 }
1116 }
1117
1118 return skb;
1119 }
1120
qdisc_update_stats_at_dequeue(struct Qdisc * sch,struct sk_buff * skb)1121 static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1122 struct sk_buff *skb)
1123 {
1124 if (qdisc_is_percpu_stats(sch)) {
1125 qdisc_qstats_cpu_backlog_dec(sch, skb);
1126 qdisc_bstats_cpu_update(sch, skb);
1127 qdisc_qstats_cpu_qlen_dec(sch);
1128 } else {
1129 qdisc_qstats_backlog_dec(sch, skb);
1130 qdisc_bstats_update(sch, skb);
1131 sch->q.qlen--;
1132 }
1133 }
1134
qdisc_update_stats_at_enqueue(struct Qdisc * sch,unsigned int pkt_len)1135 static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1136 unsigned int pkt_len)
1137 {
1138 if (qdisc_is_percpu_stats(sch)) {
1139 qdisc_qstats_cpu_qlen_inc(sch);
1140 this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1141 } else {
1142 sch->qstats.backlog += pkt_len;
1143 sch->q.qlen++;
1144 }
1145 }
1146
1147 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
qdisc_dequeue_peeked(struct Qdisc * sch)1148 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1149 {
1150 struct sk_buff *skb = skb_peek(&sch->gso_skb);
1151
1152 if (skb) {
1153 skb = __skb_dequeue(&sch->gso_skb);
1154 if (qdisc_is_percpu_stats(sch)) {
1155 qdisc_qstats_cpu_backlog_dec(sch, skb);
1156 qdisc_qstats_cpu_qlen_dec(sch);
1157 } else {
1158 qdisc_qstats_backlog_dec(sch, skb);
1159 sch->q.qlen--;
1160 }
1161 } else {
1162 skb = sch->dequeue(sch);
1163 }
1164
1165 return skb;
1166 }
1167
__qdisc_reset_queue(struct qdisc_skb_head * qh)1168 static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1169 {
1170 /*
1171 * We do not know the backlog in bytes of this list, it
1172 * is up to the caller to correct it
1173 */
1174 ASSERT_RTNL();
1175 if (qh->qlen) {
1176 rtnl_kfree_skbs(qh->head, qh->tail);
1177
1178 qh->head = NULL;
1179 qh->tail = NULL;
1180 qh->qlen = 0;
1181 }
1182 }
1183
qdisc_reset_queue(struct Qdisc * sch)1184 static inline void qdisc_reset_queue(struct Qdisc *sch)
1185 {
1186 __qdisc_reset_queue(&sch->q);
1187 }
1188
qdisc_replace(struct Qdisc * sch,struct Qdisc * new,struct Qdisc ** pold)1189 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1190 struct Qdisc **pold)
1191 {
1192 struct Qdisc *old;
1193
1194 sch_tree_lock(sch);
1195 old = *pold;
1196 *pold = new;
1197 if (old != NULL)
1198 qdisc_purge_queue(old);
1199 sch_tree_unlock(sch);
1200
1201 return old;
1202 }
1203
rtnl_qdisc_drop(struct sk_buff * skb,struct Qdisc * sch)1204 static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1205 {
1206 rtnl_kfree_skbs(skb, skb);
1207 qdisc_qstats_drop(sch);
1208 }
1209
qdisc_drop_cpu(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1210 static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1211 struct sk_buff **to_free)
1212 {
1213 __qdisc_drop(skb, to_free);
1214 qdisc_qstats_cpu_drop(sch);
1215
1216 return NET_XMIT_DROP;
1217 }
1218
qdisc_drop(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1219 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1220 struct sk_buff **to_free)
1221 {
1222 __qdisc_drop(skb, to_free);
1223 qdisc_qstats_drop(sch);
1224
1225 return NET_XMIT_DROP;
1226 }
1227
qdisc_drop_all(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)1228 static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1229 struct sk_buff **to_free)
1230 {
1231 __qdisc_drop_all(skb, to_free);
1232 qdisc_qstats_drop(sch);
1233
1234 return NET_XMIT_DROP;
1235 }
1236
1237 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
1238 long it will take to send a packet given its size.
1239 */
qdisc_l2t(struct qdisc_rate_table * rtab,unsigned int pktlen)1240 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
1241 {
1242 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
1243 if (slot < 0)
1244 slot = 0;
1245 slot >>= rtab->rate.cell_log;
1246 if (slot > 255)
1247 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
1248 return rtab->data[slot];
1249 }
1250
1251 struct psched_ratecfg {
1252 u64 rate_bytes_ps; /* bytes per second */
1253 u32 mult;
1254 u16 overhead;
1255 u16 mpu;
1256 u8 linklayer;
1257 u8 shift;
1258 };
1259
psched_l2t_ns(const struct psched_ratecfg * r,unsigned int len)1260 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1261 unsigned int len)
1262 {
1263 len += r->overhead;
1264
1265 if (len < r->mpu)
1266 len = r->mpu;
1267
1268 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1269 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1270
1271 return ((u64)len * r->mult) >> r->shift;
1272 }
1273
1274 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1275 const struct tc_ratespec *conf,
1276 u64 rate64);
1277
psched_ratecfg_getrate(struct tc_ratespec * res,const struct psched_ratecfg * r)1278 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1279 const struct psched_ratecfg *r)
1280 {
1281 memset(res, 0, sizeof(*res));
1282
1283 /* legacy struct tc_ratespec has a 32bit @rate field
1284 * Qdisc using 64bit rate should add new attributes
1285 * in order to maintain compatibility.
1286 */
1287 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1288
1289 res->overhead = r->overhead;
1290 res->mpu = r->mpu;
1291 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1292 }
1293
1294 struct psched_pktrate {
1295 u64 rate_pkts_ps; /* packets per second */
1296 u32 mult;
1297 u8 shift;
1298 };
1299
psched_pkt2t_ns(const struct psched_pktrate * r,unsigned int pkt_num)1300 static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
1301 unsigned int pkt_num)
1302 {
1303 return ((u64)pkt_num * r->mult) >> r->shift;
1304 }
1305
1306 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
1307
1308 /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
1309 * The fast path only needs to access filter list and to update stats
1310 */
1311 struct mini_Qdisc {
1312 struct tcf_proto *filter_list;
1313 struct tcf_block *block;
1314 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
1315 struct gnet_stats_queue __percpu *cpu_qstats;
1316 struct rcu_head rcu;
1317 };
1318
mini_qdisc_bstats_cpu_update(struct mini_Qdisc * miniq,const struct sk_buff * skb)1319 static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1320 const struct sk_buff *skb)
1321 {
1322 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
1323 }
1324
mini_qdisc_qstats_cpu_drop(struct mini_Qdisc * miniq)1325 static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1326 {
1327 this_cpu_inc(miniq->cpu_qstats->drops);
1328 }
1329
1330 struct mini_Qdisc_pair {
1331 struct mini_Qdisc miniq1;
1332 struct mini_Qdisc miniq2;
1333 struct mini_Qdisc __rcu **p_miniq;
1334 };
1335
1336 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1337 struct tcf_proto *tp_head);
1338 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1339 struct mini_Qdisc __rcu **p_miniq);
1340 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1341 struct tcf_block *block);
1342
1343 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
1344
1345 /* Make sure qdisc is no longer in SCHED state. */
qdisc_synchronize(const struct Qdisc * q)1346 static inline void qdisc_synchronize(const struct Qdisc *q)
1347 {
1348 while (test_bit(__QDISC_STATE_SCHED, &q->state))
1349 msleep(1);
1350 }
1351
1352 #endif
1353