1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_NET_QUEUES_H
3 #define _LINUX_NET_QUEUES_H
4
5 #include <linux/netdevice.h>
6
7 /* See the netdev.yaml spec for definition of each statistic */
8 struct netdev_queue_stats_rx {
9 u64 bytes;
10 u64 packets;
11 u64 alloc_fail;
12
13 u64 hw_drops;
14 u64 hw_drop_overruns;
15
16 u64 csum_unnecessary;
17 u64 csum_none;
18 u64 csum_bad;
19
20 u64 hw_gro_packets;
21 u64 hw_gro_bytes;
22 u64 hw_gro_wire_packets;
23 u64 hw_gro_wire_bytes;
24
25 u64 hw_drop_ratelimits;
26 };
27
28 struct netdev_queue_stats_tx {
29 u64 bytes;
30 u64 packets;
31
32 u64 hw_drops;
33 u64 hw_drop_errors;
34
35 u64 csum_none;
36 u64 needs_csum;
37
38 u64 hw_gso_packets;
39 u64 hw_gso_bytes;
40 u64 hw_gso_wire_packets;
41 u64 hw_gso_wire_bytes;
42
43 u64 hw_drop_ratelimits;
44
45 u64 stop;
46 u64 wake;
47 };
48
49 /**
50 * struct netdev_stat_ops - netdev ops for fine grained stats
51 * @get_queue_stats_rx: get stats for a given Rx queue
52 * @get_queue_stats_tx: get stats for a given Tx queue
53 * @get_base_stats: get base stats (not belonging to any live instance)
54 *
55 * Query stats for a given object. The values of the statistics are undefined
56 * on entry (specifically they are *not* zero-initialized). Drivers should
57 * assign values only to the statistics they collect. Statistics which are not
58 * collected must be left undefined.
59 *
60 * Queue objects are not necessarily persistent, and only currently active
61 * queues are queried by the per-queue callbacks. This means that per-queue
62 * statistics will not generally add up to the total number of events for
63 * the device. The @get_base_stats callback allows filling in the delta
64 * between events for currently live queues and overall device history.
65 * @get_base_stats can also be used to report any miscellaneous packets
66 * transferred outside of the main set of queues used by the networking stack.
67 * When the statistics for the entire device are queried, first @get_base_stats
68 * is issued to collect the delta, and then a series of per-queue callbacks.
69 * Only statistics which are set in @get_base_stats will be reported
70 * at the device level, meaning that unlike in queue callbacks, setting
71 * a statistic to zero in @get_base_stats is a legitimate thing to do.
72 * This is because @get_base_stats has a second function of designating which
73 * statistics are in fact correct for the entire device (e.g. when history
74 * for some of the events is not maintained, and reliable "total" cannot
75 * be provided).
76 *
77 * Device drivers can assume that when collecting total device stats,
78 * the @get_base_stats and subsequent per-queue calls are performed
79 * "atomically" (without releasing the rtnl_lock).
80 *
81 * Device drivers are encouraged to reset the per-queue statistics when
82 * number of queues change. This is because the primary use case for
83 * per-queue statistics is currently to detect traffic imbalance.
84 */
85 struct netdev_stat_ops {
86 void (*get_queue_stats_rx)(struct net_device *dev, int idx,
87 struct netdev_queue_stats_rx *stats);
88 void (*get_queue_stats_tx)(struct net_device *dev, int idx,
89 struct netdev_queue_stats_tx *stats);
90 void (*get_base_stats)(struct net_device *dev,
91 struct netdev_queue_stats_rx *rx,
92 struct netdev_queue_stats_tx *tx);
93 };
94
95 void netdev_stat_queue_sum(struct net_device *netdev,
96 int rx_start, int rx_end,
97 struct netdev_queue_stats_rx *rx_sum,
98 int tx_start, int tx_end,
99 struct netdev_queue_stats_tx *tx_sum);
100
101 /**
102 * struct netdev_queue_mgmt_ops - netdev ops for queue management
103 *
104 * @ndo_queue_mem_size: Size of the struct that describes a queue's memory.
105 *
106 * @ndo_queue_mem_alloc: Allocate memory for an RX queue at the specified index.
107 * The new memory is written at the specified address.
108 *
109 * @ndo_queue_mem_free: Free memory from an RX queue.
110 *
111 * @ndo_queue_start: Start an RX queue with the specified memory and at the
112 * specified index.
113 *
114 * @ndo_queue_stop: Stop the RX queue at the specified index. The stopped
115 * queue's memory is written at the specified address.
116 */
117 struct netdev_queue_mgmt_ops {
118 size_t ndo_queue_mem_size;
119 int (*ndo_queue_mem_alloc)(struct net_device *dev,
120 void *per_queue_mem,
121 int idx);
122 void (*ndo_queue_mem_free)(struct net_device *dev,
123 void *per_queue_mem);
124 int (*ndo_queue_start)(struct net_device *dev,
125 void *per_queue_mem,
126 int idx);
127 int (*ndo_queue_stop)(struct net_device *dev,
128 void *per_queue_mem,
129 int idx);
130 };
131
132 /**
133 * DOC: Lockless queue stopping / waking helpers.
134 *
135 * The netif_txq_maybe_stop() and __netif_txq_completed_wake()
136 * macros are designed to safely implement stopping
137 * and waking netdev queues without full lock protection.
138 *
139 * We assume that there can be no concurrent stop attempts and no concurrent
140 * wake attempts. The try-stop should happen from the xmit handler,
141 * while wake up should be triggered from NAPI poll context.
142 * The two may run concurrently (single producer, single consumer).
143 *
144 * The try-stop side is expected to run from the xmit handler and therefore
145 * it does not reschedule Tx (netif_tx_start_queue() instead of
146 * netif_tx_wake_queue()). Uses of the ``stop`` macros outside of the xmit
147 * handler may lead to xmit queue being enabled but not run.
148 * The waking side does not have similar context restrictions.
149 *
150 * The macros guarantee that rings will not remain stopped if there's
151 * space available, but they do *not* prevent false wake ups when
152 * the ring is full! Drivers should check for ring full at the start
153 * for the xmit handler.
154 *
155 * All descriptor ring indexes (and other relevant shared state) must
156 * be updated before invoking the macros.
157 */
158
159 #define netif_txq_try_stop(txq, get_desc, start_thrs) \
160 ({ \
161 int _res; \
162 \
163 netif_tx_stop_queue(txq); \
164 /* Producer index and stop bit must be visible \
165 * to consumer before we recheck. \
166 * Pairs with a barrier in __netif_txq_completed_wake(). \
167 */ \
168 smp_mb__after_atomic(); \
169 \
170 /* We need to check again in a case another \
171 * CPU has just made room available. \
172 */ \
173 _res = 0; \
174 if (unlikely(get_desc >= start_thrs)) { \
175 netif_tx_start_queue(txq); \
176 _res = -1; \
177 } \
178 _res; \
179 }) \
180
181 /**
182 * netif_txq_maybe_stop() - locklessly stop a Tx queue, if needed
183 * @txq: struct netdev_queue to stop/start
184 * @get_desc: get current number of free descriptors (see requirements below!)
185 * @stop_thrs: minimal number of available descriptors for queue to be left
186 * enabled
187 * @start_thrs: minimal number of descriptors to re-enable the queue, can be
188 * equal to @stop_thrs or higher to avoid frequent waking
189 *
190 * All arguments may be evaluated multiple times, beware of side effects.
191 * @get_desc must be a formula or a function call, it must always
192 * return up-to-date information when evaluated!
193 * Expected to be used from ndo_start_xmit, see the comment on top of the file.
194 *
195 * Returns:
196 * 0 if the queue was stopped
197 * 1 if the queue was left enabled
198 * -1 if the queue was re-enabled (raced with waking)
199 */
200 #define netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs) \
201 ({ \
202 int _res; \
203 \
204 _res = 1; \
205 if (unlikely(get_desc < stop_thrs)) \
206 _res = netif_txq_try_stop(txq, get_desc, start_thrs); \
207 _res; \
208 }) \
209
210 /* Variant of netdev_tx_completed_queue() which guarantees smp_mb() if
211 * @bytes != 0, regardless of kernel config.
212 */
213 static inline void
netdev_txq_completed_mb(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)214 netdev_txq_completed_mb(struct netdev_queue *dev_queue,
215 unsigned int pkts, unsigned int bytes)
216 {
217 if (IS_ENABLED(CONFIG_BQL))
218 netdev_tx_completed_queue(dev_queue, pkts, bytes);
219 else if (bytes)
220 smp_mb();
221 }
222
223 /**
224 * __netif_txq_completed_wake() - locklessly wake a Tx queue, if needed
225 * @txq: struct netdev_queue to stop/start
226 * @pkts: number of packets completed
227 * @bytes: number of bytes completed
228 * @get_desc: get current number of free descriptors (see requirements below!)
229 * @start_thrs: minimal number of descriptors to re-enable the queue
230 * @down_cond: down condition, predicate indicating that the queue should
231 * not be woken up even if descriptors are available
232 *
233 * All arguments may be evaluated multiple times.
234 * @get_desc must be a formula or a function call, it must always
235 * return up-to-date information when evaluated!
236 * Reports completed pkts/bytes to BQL.
237 *
238 * Returns:
239 * 0 if the queue was woken up
240 * 1 if the queue was already enabled (or disabled but @down_cond is true)
241 * -1 if the queue was left unchanged (@start_thrs not reached)
242 */
243 #define __netif_txq_completed_wake(txq, pkts, bytes, \
244 get_desc, start_thrs, down_cond) \
245 ({ \
246 int _res; \
247 \
248 /* Report to BQL and piggy back on its barrier. \
249 * Barrier makes sure that anybody stopping the queue \
250 * after this point sees the new consumer index. \
251 * Pairs with barrier in netif_txq_try_stop(). \
252 */ \
253 netdev_txq_completed_mb(txq, pkts, bytes); \
254 \
255 _res = -1; \
256 if (pkts && likely(get_desc >= start_thrs)) { \
257 _res = 1; \
258 if (unlikely(netif_tx_queue_stopped(txq)) && \
259 !(down_cond)) { \
260 netif_tx_wake_queue(txq); \
261 _res = 0; \
262 } \
263 } \
264 _res; \
265 })
266
267 #define netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs) \
268 __netif_txq_completed_wake(txq, pkts, bytes, get_desc, start_thrs, false)
269
270 /* subqueue variants follow */
271
272 #define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs) \
273 ({ \
274 struct netdev_queue *txq; \
275 \
276 txq = netdev_get_tx_queue(dev, idx); \
277 netif_txq_try_stop(txq, get_desc, start_thrs); \
278 })
279
280 #define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
281 ({ \
282 struct netdev_queue *txq; \
283 \
284 txq = netdev_get_tx_queue(dev, idx); \
285 netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
286 })
287
288 #define netif_subqueue_completed_wake(dev, idx, pkts, bytes, \
289 get_desc, start_thrs) \
290 ({ \
291 struct netdev_queue *txq; \
292 \
293 txq = netdev_get_tx_queue(dev, idx); \
294 netif_txq_completed_wake(txq, pkts, bytes, \
295 get_desc, start_thrs); \
296 })
297
298 #endif
299