1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #ifndef __MLX5_EN_STATS_H__ 33 #define __MLX5_EN_STATS_H__ 34 35 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \ 36 (*(u64 *)((char *)ptr + dsc[i].offset)) 37 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \ 38 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset)) 39 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ 40 (*(u32 *)((char *)ptr + dsc[i].offset)) 41 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ 42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) 43 44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) 45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) 46 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) 47 48 struct counter_desc { 49 char format[ETH_GSTRING_LEN]; 50 size_t offset; /* Byte offset */ 51 }; 52 53 struct mlx5e_sw_stats { 54 u64 rx_packets; 55 u64 rx_bytes; 56 u64 tx_packets; 57 u64 tx_bytes; 58 u64 tx_tso_packets; 59 u64 tx_tso_bytes; 60 u64 tx_tso_inner_packets; 61 u64 tx_tso_inner_bytes; 62 u64 rx_lro_packets; 63 u64 rx_lro_bytes; 64 u64 rx_csum_unnecessary; 65 u64 rx_csum_none; 66 u64 rx_csum_complete; 67 u64 rx_csum_unnecessary_inner; 68 u64 rx_xdp_drop; 69 u64 rx_xdp_tx; 70 u64 rx_xdp_tx_full; 71 u64 tx_csum_none; 72 u64 tx_csum_partial; 73 u64 tx_csum_partial_inner; 74 u64 tx_queue_stopped; 75 u64 tx_queue_wake; 76 u64 tx_queue_dropped; 77 u64 tx_xmit_more; 78 u64 rx_wqe_err; 79 u64 rx_mpwqe_filler; 80 u64 rx_buff_alloc_err; 81 u64 rx_cqe_compress_blks; 82 u64 rx_cqe_compress_pkts; 83 u64 rx_page_reuse; 84 u64 rx_cache_reuse; 85 u64 rx_cache_full; 86 u64 rx_cache_empty; 87 u64 rx_cache_busy; 88 u64 rx_cache_waive; 89 90 /* Special handling counters */ 91 u64 link_down_events_phy; 92 }; 93 94 static const struct counter_desc sw_stats_desc[] = { 95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, 100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, 101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, 102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, 103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, 105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, 106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, 111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, 119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, 121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) }, 125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, 126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, 127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, 128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, 129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, 130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, 131 }; 132 133 struct mlx5e_qcounter_stats { 134 u32 rx_out_of_buffer; 135 }; 136 137 static const struct counter_desc q_stats_desc[] = { 138 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, 139 }; 140 141 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) 142 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ 143 vstats->query_vport_out, c) 144 145 struct mlx5e_vport_stats { 146 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)]; 147 }; 148 149 static const struct counter_desc vport_stats_desc[] = { 150 { "rx_vport_unicast_packets", 151 VPORT_COUNTER_OFF(received_eth_unicast.packets) }, 152 { "rx_vport_unicast_bytes", 153 VPORT_COUNTER_OFF(received_eth_unicast.octets) }, 154 { "tx_vport_unicast_packets", 155 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, 156 { "tx_vport_unicast_bytes", 157 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, 158 { "rx_vport_multicast_packets", 159 VPORT_COUNTER_OFF(received_eth_multicast.packets) }, 160 { "rx_vport_multicast_bytes", 161 VPORT_COUNTER_OFF(received_eth_multicast.octets) }, 162 { "tx_vport_multicast_packets", 163 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, 164 { "tx_vport_multicast_bytes", 165 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, 166 { "rx_vport_broadcast_packets", 167 VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, 168 { "rx_vport_broadcast_bytes", 169 VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, 170 { "tx_vport_broadcast_packets", 171 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, 172 { "tx_vport_broadcast_bytes", 173 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, 174 { "rx_vport_rdma_unicast_packets", 175 VPORT_COUNTER_OFF(received_ib_unicast.packets) }, 176 { "rx_vport_rdma_unicast_bytes", 177 VPORT_COUNTER_OFF(received_ib_unicast.octets) }, 178 { "tx_vport_rdma_unicast_packets", 179 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, 180 { "tx_vport_rdma_unicast_bytes", 181 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, 182 { "rx_vport_rdma_multicast_packets", 183 VPORT_COUNTER_OFF(received_ib_multicast.packets) }, 184 { "rx_vport_rdma_multicast_bytes", 185 VPORT_COUNTER_OFF(received_ib_multicast.octets) }, 186 { "tx_vport_rdma_multicast_packets", 187 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, 188 { "tx_vport_rdma_multicast_bytes", 189 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, 190 }; 191 192 #define PPORT_802_3_OFF(c) \ 193 MLX5_BYTE_OFF(ppcnt_reg, \ 194 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) 195 #define PPORT_802_3_GET(pstats, c) \ 196 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \ 197 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) 198 #define PPORT_2863_OFF(c) \ 199 MLX5_BYTE_OFF(ppcnt_reg, \ 200 counter_set.eth_2863_cntrs_grp_data_layout.c##_high) 201 #define PPORT_2863_GET(pstats, c) \ 202 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \ 203 counter_set.eth_2863_cntrs_grp_data_layout.c##_high) 204 #define PPORT_2819_OFF(c) \ 205 MLX5_BYTE_OFF(ppcnt_reg, \ 206 counter_set.eth_2819_cntrs_grp_data_layout.c##_high) 207 #define PPORT_2819_GET(pstats, c) \ 208 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ 209 counter_set.eth_2819_cntrs_grp_data_layout.c##_high) 210 #define PPORT_PHY_STATISTICAL_OFF(c) \ 211 MLX5_BYTE_OFF(ppcnt_reg, \ 212 counter_set.phys_layer_statistical_cntrs.c##_high) 213 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \ 214 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ 215 counter_set.phys_layer_statistical_cntrs.c##_high) 216 #define PPORT_PER_PRIO_OFF(c) \ 217 MLX5_BYTE_OFF(ppcnt_reg, \ 218 counter_set.eth_per_prio_grp_data_layout.c##_high) 219 #define PPORT_PER_PRIO_GET(pstats, prio, c) \ 220 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ 221 counter_set.eth_per_prio_grp_data_layout.c##_high) 222 #define NUM_PPORT_PRIO 8 223 #define PPORT_ETH_EXT_OFF(c) \ 224 MLX5_BYTE_OFF(ppcnt_reg, \ 225 counter_set.eth_extended_cntrs_grp_data_layout.c##_high) 226 #define PPORT_ETH_EXT_GET(pstats, c) \ 227 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \ 228 counter_set.eth_extended_cntrs_grp_data_layout.c##_high) 229 230 struct mlx5e_pport_stats { 231 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; 232 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; 233 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; 234 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; 235 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; 236 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; 237 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; 238 }; 239 240 static const struct counter_desc pport_802_3_stats_desc[] = { 241 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, 242 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, 243 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, 244 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, 245 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, 246 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, 247 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, 248 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, 249 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, 250 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, 251 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, 252 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, 253 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, 254 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, 255 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, 256 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, 257 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, 258 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, 259 }; 260 261 static const struct counter_desc pport_2863_stats_desc[] = { 262 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, 263 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, 264 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, 265 }; 266 267 static const struct counter_desc pport_2819_stats_desc[] = { 268 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, 269 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, 270 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, 271 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, 272 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, 273 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, 274 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, 275 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, 276 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, 277 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, 278 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, 279 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, 280 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, 281 }; 282 283 static const struct counter_desc pport_phy_statistical_stats_desc[] = { 284 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, 285 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, 286 }; 287 288 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { 289 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, 290 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, 291 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, 292 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, 293 }; 294 295 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { 296 /* %s is "global" or "prio{i}" */ 297 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, 298 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, 299 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, 300 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, 301 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 302 }; 303 304 static const struct counter_desc pport_eth_ext_stats_desc[] = { 305 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, 306 }; 307 308 #define PCIE_PERF_OFF(c) \ 309 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) 310 #define PCIE_PERF_GET(pcie_stats, c) \ 311 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ 312 counter_set.pcie_perf_cntrs_grp_data_layout.c) 313 314 #define PCIE_PERF_OFF64(c) \ 315 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) 316 #define PCIE_PERF_GET64(pcie_stats, c) \ 317 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ 318 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) 319 320 struct mlx5e_pcie_stats { 321 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; 322 }; 323 324 static const struct counter_desc pcie_perf_stats_desc[] = { 325 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, 326 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, 327 }; 328 329 static const struct counter_desc pcie_perf_stats_desc64[] = { 330 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, 331 }; 332 333 static const struct counter_desc pcie_perf_stall_stats_desc[] = { 334 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, 335 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, 336 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, 337 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, 338 }; 339 340 struct mlx5e_rq_stats { 341 u64 packets; 342 u64 bytes; 343 u64 csum_complete; 344 u64 csum_unnecessary; 345 u64 csum_unnecessary_inner; 346 u64 csum_none; 347 u64 lro_packets; 348 u64 lro_bytes; 349 u64 xdp_drop; 350 u64 xdp_tx; 351 u64 xdp_tx_full; 352 u64 wqe_err; 353 u64 mpwqe_filler; 354 u64 buff_alloc_err; 355 u64 cqe_compress_blks; 356 u64 cqe_compress_pkts; 357 u64 page_reuse; 358 u64 cache_reuse; 359 u64 cache_full; 360 u64 cache_empty; 361 u64 cache_busy; 362 u64 cache_waive; 363 }; 364 365 static const struct counter_desc rq_stats_desc[] = { 366 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 367 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 368 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 369 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 370 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 371 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 372 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 373 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, 374 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, 375 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, 376 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, 377 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 378 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, 379 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 380 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 381 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 382 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) }, 383 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, 384 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, 385 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, 386 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, 387 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, 388 }; 389 390 struct mlx5e_sq_stats { 391 /* commonly accessed in data path */ 392 u64 packets; 393 u64 bytes; 394 u64 xmit_more; 395 u64 tso_packets; 396 u64 tso_bytes; 397 u64 tso_inner_packets; 398 u64 tso_inner_bytes; 399 u64 csum_partial; 400 u64 csum_partial_inner; 401 u64 nop; 402 /* less likely accessed in data path */ 403 u64 csum_none; 404 u64 stopped; 405 u64 wake; 406 u64 dropped; 407 }; 408 409 static const struct counter_desc sq_stats_desc[] = { 410 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, 411 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, 412 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 413 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 414 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 415 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 416 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 417 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 418 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 419 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 420 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 421 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, 422 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, 423 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 424 }; 425 426 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 427 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) 428 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) 429 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 430 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 431 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 432 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ 433 (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ 434 MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) 435 #define NUM_PCIE_PERF_COUNTERS(priv) \ 436 (ARRAY_SIZE(pcie_perf_stats_desc) * \ 437 MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 438 #define NUM_PCIE_PERF_COUNTERS64(priv) \ 439 (ARRAY_SIZE(pcie_perf_stats_desc64) * \ 440 MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 441 #define NUM_PCIE_PERF_STALL_COUNTERS(priv) \ 442 (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \ 443 MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 444 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ 445 ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 446 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ 447 ARRAY_SIZE(pport_per_prio_pfc_stats_desc) 448 #define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ 449 (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ 450 MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 451 #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ 452 NUM_PPORT_2863_COUNTERS + \ 453 NUM_PPORT_2819_COUNTERS + \ 454 NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ 455 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ 456 NUM_PPORT_PRIO + \ 457 NUM_PPORT_ETH_EXT_COUNTERS(priv)) 458 #define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ 459 NUM_PCIE_PERF_COUNTERS64(priv) +\ 460 NUM_PCIE_PERF_STALL_COUNTERS(priv)) 461 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 462 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 463 464 struct mlx5e_stats { 465 struct mlx5e_sw_stats sw; 466 struct mlx5e_qcounter_stats qcnt; 467 struct mlx5e_vport_stats vport; 468 struct mlx5e_pport_stats pport; 469 struct rtnl_link_stats64 vf_vport; 470 struct mlx5e_pcie_stats pcie; 471 }; 472 473 static const struct counter_desc mlx5e_pme_status_desc[] = { 474 { "module_unplug", 8 }, 475 }; 476 477 static const struct counter_desc mlx5e_pme_error_desc[] = { 478 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ 479 { "module_high_temp", 48 }, /* high temperature */ 480 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ 481 }; 482 483 #endif /* __MLX5_EN_STATS_H__ */ 484