• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_EN_STATS_H__
34 #define __MLX5_EN_STATS_H__
35 
36 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
37 	(*(u64 *)((char *)ptr + dsc[i].offset))
38 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
39 	be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
40 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
41 	(*(u32 *)((char *)ptr + dsc[i].offset))
42 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
43 	be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
44 
45 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
46 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
47 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
48 #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
49 #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
50 #define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
51 #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
52 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
53 
54 struct counter_desc {
55 	char		format[ETH_GSTRING_LEN];
56 	size_t		offset; /* Byte offset */
57 };
58 
59 enum {
60 	MLX5E_NDO_UPDATE_STATS = BIT(0x1),
61 };
62 
63 struct mlx5e_priv;
64 struct mlx5e_stats_grp {
65 	u16 update_stats_mask;
66 	int (*get_num_stats)(struct mlx5e_priv *priv);
67 	int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
68 	int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
69 	void (*update_stats)(struct mlx5e_priv *priv);
70 };
71 
72 typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
73 
74 #define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
75 
76 #define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
77 	int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
78 
79 #define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
80 	void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
81 
82 #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
83 	int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
84 
85 #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
86 	int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
87 
88 #define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
89 
90 #define MLX5E_DECLARE_STATS_GRP(grp) \
91 	const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
92 
93 #define MLX5E_DEFINE_STATS_GRP(grp, mask) \
94 MLX5E_DECLARE_STATS_GRP(grp) = { \
95 	.get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
96 	.fill_stats    = MLX5E_STATS_GRP_OP(grp, fill_stats), \
97 	.fill_strings  = MLX5E_STATS_GRP_OP(grp, fill_strings), \
98 	.update_stats  = MLX5E_STATS_GRP_OP(grp, update_stats), \
99 	.update_stats_mask = mask, \
100 }
101 
102 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
103 void mlx5e_stats_update(struct mlx5e_priv *priv);
104 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
105 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
106 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
107 
108 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
109 			   struct ethtool_pause_stats *pause_stats);
110 
111 /* Concrete NIC Stats */
112 
113 struct mlx5e_sw_stats {
114 	u64 rx_packets;
115 	u64 rx_bytes;
116 	u64 tx_packets;
117 	u64 tx_bytes;
118 	u64 tx_tso_packets;
119 	u64 tx_tso_bytes;
120 	u64 tx_tso_inner_packets;
121 	u64 tx_tso_inner_bytes;
122 	u64 tx_added_vlan_packets;
123 	u64 tx_nop;
124 	u64 tx_mpwqe_blks;
125 	u64 tx_mpwqe_pkts;
126 	u64 rx_lro_packets;
127 	u64 rx_lro_bytes;
128 	u64 rx_mcast_packets;
129 	u64 rx_ecn_mark;
130 	u64 rx_removed_vlan_packets;
131 	u64 rx_csum_unnecessary;
132 	u64 rx_csum_none;
133 	u64 rx_csum_complete;
134 	u64 rx_csum_complete_tail;
135 	u64 rx_csum_complete_tail_slow;
136 	u64 rx_csum_unnecessary_inner;
137 	u64 rx_xdp_drop;
138 	u64 rx_xdp_redirect;
139 	u64 rx_xdp_tx_xmit;
140 	u64 rx_xdp_tx_mpwqe;
141 	u64 rx_xdp_tx_inlnw;
142 	u64 rx_xdp_tx_nops;
143 	u64 rx_xdp_tx_full;
144 	u64 rx_xdp_tx_err;
145 	u64 rx_xdp_tx_cqe;
146 	u64 tx_csum_none;
147 	u64 tx_csum_partial;
148 	u64 tx_csum_partial_inner;
149 	u64 tx_queue_stopped;
150 	u64 tx_queue_dropped;
151 	u64 tx_xmit_more;
152 	u64 tx_recover;
153 	u64 tx_cqes;
154 	u64 tx_queue_wake;
155 	u64 tx_cqe_err;
156 	u64 tx_xdp_xmit;
157 	u64 tx_xdp_mpwqe;
158 	u64 tx_xdp_inlnw;
159 	u64 tx_xdp_nops;
160 	u64 tx_xdp_full;
161 	u64 tx_xdp_err;
162 	u64 tx_xdp_cqes;
163 	u64 rx_wqe_err;
164 	u64 rx_mpwqe_filler_cqes;
165 	u64 rx_mpwqe_filler_strides;
166 	u64 rx_oversize_pkts_sw_drop;
167 	u64 rx_buff_alloc_err;
168 	u64 rx_cqe_compress_blks;
169 	u64 rx_cqe_compress_pkts;
170 	u64 rx_cache_reuse;
171 	u64 rx_cache_full;
172 	u64 rx_cache_empty;
173 	u64 rx_cache_busy;
174 	u64 rx_cache_waive;
175 	u64 rx_congst_umr;
176 	u64 rx_arfs_err;
177 	u64 rx_recover;
178 	u64 ch_events;
179 	u64 ch_poll;
180 	u64 ch_arm;
181 	u64 ch_aff_change;
182 	u64 ch_force_irq;
183 	u64 ch_eq_rearm;
184 
185 #ifdef CONFIG_MLX5_EN_TLS
186 	u64 tx_tls_encrypted_packets;
187 	u64 tx_tls_encrypted_bytes;
188 	u64 tx_tls_ctx;
189 	u64 tx_tls_ooo;
190 	u64 tx_tls_dump_packets;
191 	u64 tx_tls_dump_bytes;
192 	u64 tx_tls_resync_bytes;
193 	u64 tx_tls_skip_no_sync_data;
194 	u64 tx_tls_drop_no_sync_data;
195 	u64 tx_tls_drop_bypass_req;
196 
197 	u64 rx_tls_decrypted_packets;
198 	u64 rx_tls_decrypted_bytes;
199 	u64 rx_tls_ctx;
200 	u64 rx_tls_del;
201 	u64 rx_tls_resync_req_pkt;
202 	u64 rx_tls_resync_req_start;
203 	u64 rx_tls_resync_req_end;
204 	u64 rx_tls_resync_req_skip;
205 	u64 rx_tls_resync_res_ok;
206 	u64 rx_tls_resync_res_skip;
207 	u64 rx_tls_err;
208 #endif
209 
210 	u64 rx_xsk_packets;
211 	u64 rx_xsk_bytes;
212 	u64 rx_xsk_csum_complete;
213 	u64 rx_xsk_csum_unnecessary;
214 	u64 rx_xsk_csum_unnecessary_inner;
215 	u64 rx_xsk_csum_none;
216 	u64 rx_xsk_ecn_mark;
217 	u64 rx_xsk_removed_vlan_packets;
218 	u64 rx_xsk_xdp_drop;
219 	u64 rx_xsk_xdp_redirect;
220 	u64 rx_xsk_wqe_err;
221 	u64 rx_xsk_mpwqe_filler_cqes;
222 	u64 rx_xsk_mpwqe_filler_strides;
223 	u64 rx_xsk_oversize_pkts_sw_drop;
224 	u64 rx_xsk_buff_alloc_err;
225 	u64 rx_xsk_cqe_compress_blks;
226 	u64 rx_xsk_cqe_compress_pkts;
227 	u64 rx_xsk_congst_umr;
228 	u64 rx_xsk_arfs_err;
229 	u64 tx_xsk_xmit;
230 	u64 tx_xsk_mpwqe;
231 	u64 tx_xsk_inlnw;
232 	u64 tx_xsk_full;
233 	u64 tx_xsk_err;
234 	u64 tx_xsk_cqes;
235 };
236 
237 struct mlx5e_qcounter_stats {
238 	u32 rx_out_of_buffer;
239 	u32 rx_if_down_packets;
240 };
241 
242 struct mlx5e_vnic_env_stats {
243 	__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
244 };
245 
246 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
247 						vstats->query_vport_out, c)
248 
249 struct mlx5e_vport_stats {
250 	__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
251 };
252 
253 #define PPORT_802_3_GET(pstats, c) \
254 	MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
255 		   counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
256 #define PPORT_2863_GET(pstats, c) \
257 	MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
258 		   counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
259 #define PPORT_2819_GET(pstats, c) \
260 	MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
261 		   counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
262 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \
263 	MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
264 		   counter_set.phys_layer_statistical_cntrs.c##_high)
265 #define PPORT_PER_PRIO_GET(pstats, prio, c) \
266 	MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
267 		   counter_set.eth_per_prio_grp_data_layout.c##_high)
268 #define NUM_PPORT_PRIO				8
269 #define PPORT_ETH_EXT_GET(pstats, c) \
270 	MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
271 		   counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
272 
273 struct mlx5e_pport_stats {
274 	__be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
275 	__be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
276 	__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
277 	__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
278 	__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
279 	__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
280 	__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
281 	__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
282 	__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
283 };
284 
285 #define PCIE_PERF_GET(pcie_stats, c) \
286 	MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
287 		 counter_set.pcie_perf_cntrs_grp_data_layout.c)
288 
289 #define PCIE_PERF_GET64(pcie_stats, c) \
290 	MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
291 		   counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
292 
293 struct mlx5e_pcie_stats {
294 	__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
295 };
296 
297 struct mlx5e_rq_stats {
298 	u64 packets;
299 	u64 bytes;
300 	u64 csum_complete;
301 	u64 csum_complete_tail;
302 	u64 csum_complete_tail_slow;
303 	u64 csum_unnecessary;
304 	u64 csum_unnecessary_inner;
305 	u64 csum_none;
306 	u64 lro_packets;
307 	u64 lro_bytes;
308 	u64 mcast_packets;
309 	u64 ecn_mark;
310 	u64 removed_vlan_packets;
311 	u64 xdp_drop;
312 	u64 xdp_redirect;
313 	u64 wqe_err;
314 	u64 mpwqe_filler_cqes;
315 	u64 mpwqe_filler_strides;
316 	u64 oversize_pkts_sw_drop;
317 	u64 buff_alloc_err;
318 	u64 cqe_compress_blks;
319 	u64 cqe_compress_pkts;
320 	u64 cache_reuse;
321 	u64 cache_full;
322 	u64 cache_empty;
323 	u64 cache_busy;
324 	u64 cache_waive;
325 	u64 congst_umr;
326 	u64 arfs_err;
327 	u64 recover;
328 #ifdef CONFIG_MLX5_EN_TLS
329 	u64 tls_decrypted_packets;
330 	u64 tls_decrypted_bytes;
331 	u64 tls_ctx;
332 	u64 tls_del;
333 	u64 tls_resync_req_pkt;
334 	u64 tls_resync_req_start;
335 	u64 tls_resync_req_end;
336 	u64 tls_resync_req_skip;
337 	u64 tls_resync_res_ok;
338 	u64 tls_resync_res_skip;
339 	u64 tls_err;
340 #endif
341 };
342 
343 struct mlx5e_sq_stats {
344 	/* commonly accessed in data path */
345 	u64 packets;
346 	u64 bytes;
347 	u64 xmit_more;
348 	u64 tso_packets;
349 	u64 tso_bytes;
350 	u64 tso_inner_packets;
351 	u64 tso_inner_bytes;
352 	u64 csum_partial;
353 	u64 csum_partial_inner;
354 	u64 added_vlan_packets;
355 	u64 nop;
356 	u64 mpwqe_blks;
357 	u64 mpwqe_pkts;
358 #ifdef CONFIG_MLX5_EN_TLS
359 	u64 tls_encrypted_packets;
360 	u64 tls_encrypted_bytes;
361 	u64 tls_ctx;
362 	u64 tls_ooo;
363 	u64 tls_dump_packets;
364 	u64 tls_dump_bytes;
365 	u64 tls_resync_bytes;
366 	u64 tls_skip_no_sync_data;
367 	u64 tls_drop_no_sync_data;
368 	u64 tls_drop_bypass_req;
369 #endif
370 	/* less likely accessed in data path */
371 	u64 csum_none;
372 	u64 stopped;
373 	u64 dropped;
374 	u64 recover;
375 	/* dirtied @completion */
376 	u64 cqes ____cacheline_aligned_in_smp;
377 	u64 wake;
378 	u64 cqe_err;
379 };
380 
381 struct mlx5e_xdpsq_stats {
382 	u64 xmit;
383 	u64 mpwqe;
384 	u64 inlnw;
385 	u64 nops;
386 	u64 full;
387 	u64 err;
388 	/* dirtied @completion */
389 	u64 cqes ____cacheline_aligned_in_smp;
390 };
391 
392 struct mlx5e_ch_stats {
393 	u64 events;
394 	u64 poll;
395 	u64 arm;
396 	u64 aff_change;
397 	u64 force_irq;
398 	u64 eq_rearm;
399 };
400 
401 struct mlx5e_stats {
402 	struct mlx5e_sw_stats sw;
403 	struct mlx5e_qcounter_stats qcnt;
404 	struct mlx5e_vnic_env_stats vnic;
405 	struct mlx5e_vport_stats vport;
406 	struct mlx5e_pport_stats pport;
407 	struct rtnl_link_stats64 vf_vport;
408 	struct mlx5e_pcie_stats pcie;
409 };
410 
411 extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
412 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
413 
414 extern MLX5E_DECLARE_STATS_GRP(sw);
415 extern MLX5E_DECLARE_STATS_GRP(qcnt);
416 extern MLX5E_DECLARE_STATS_GRP(vnic_env);
417 extern MLX5E_DECLARE_STATS_GRP(vport);
418 extern MLX5E_DECLARE_STATS_GRP(802_3);
419 extern MLX5E_DECLARE_STATS_GRP(2863);
420 extern MLX5E_DECLARE_STATS_GRP(2819);
421 extern MLX5E_DECLARE_STATS_GRP(phy);
422 extern MLX5E_DECLARE_STATS_GRP(eth_ext);
423 extern MLX5E_DECLARE_STATS_GRP(pcie);
424 extern MLX5E_DECLARE_STATS_GRP(per_prio);
425 extern MLX5E_DECLARE_STATS_GRP(pme);
426 extern MLX5E_DECLARE_STATS_GRP(channels);
427 extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
428 extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
429 extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
430 
431 #endif /* __MLX5_EN_STATS_H__ */
432