Lines Matching +full:down +full:- +full:counters
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
8 #include "counters.h"
78 static int mlx5_ib_read_counters(struct ib_counters *counters, in mlx5_ib_read_counters() argument
82 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in mlx5_ib_read_counters()
87 mutex_lock(&mcounters->mcntrs_mutex); in mlx5_ib_read_counters()
88 if (mcounters->cntrs_max_index > read_attr->ncounters) { in mlx5_ib_read_counters()
89 ret = -EINVAL; in mlx5_ib_read_counters()
93 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64), in mlx5_ib_read_counters()
96 ret = -ENOMEM; in mlx5_ib_read_counters()
100 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl; in mlx5_ib_read_counters()
101 mread_attr.flags = read_attr->flags; in mlx5_ib_read_counters()
102 ret = mcounters->read_counters(counters->device, &mread_attr); in mlx5_ib_read_counters()
106 /* do the pass over the counters data array to assign according to the in mlx5_ib_read_counters()
109 desc = mcounters->counters_data; in mlx5_ib_read_counters()
110 for (i = 0; i < mcounters->ncounters; i++) in mlx5_ib_read_counters()
111 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description]; in mlx5_ib_read_counters()
116 mutex_unlock(&mcounters->mcntrs_mutex); in mlx5_ib_read_counters()
120 static int mlx5_ib_destroy_counters(struct ib_counters *counters) in mlx5_ib_destroy_counters() argument
122 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in mlx5_ib_destroy_counters()
124 mlx5_ib_counters_clear_description(counters); in mlx5_ib_destroy_counters()
125 if (mcounters->hw_cntrs_hndl) in mlx5_ib_destroy_counters()
126 mlx5_fc_destroy(to_mdev(counters->device)->mdev, in mlx5_ib_destroy_counters()
127 mcounters->hw_cntrs_hndl); in mlx5_ib_destroy_counters()
131 static int mlx5_ib_create_counters(struct ib_counters *counters, in mlx5_ib_create_counters() argument
134 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in mlx5_ib_create_counters()
136 mutex_init(&mcounters->mcntrs_mutex); in mlx5_ib_create_counters()
144 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == in is_mdev_switchdev_mode()
151 return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts : in get_counters()
152 &dev->port[port_num].cnts; in get_counters()
156 * mlx5_ib_get_counters_id - Returns counters id to use for device+port
160 * mlx5_ib_get_counters_id() Returns counters set id to use for given
168 return cnts->set_id; in mlx5_ib_get_counters_id()
176 bool is_switchdev = is_mdev_switchdev_mode(dev->mdev); in mlx5_ib_alloc_hw_stats()
181 cnts = get_counters(dev, port_num - 1); in mlx5_ib_alloc_hw_stats()
183 return rdma_alloc_hw_stats_struct(cnts->names, in mlx5_ib_alloc_hw_stats()
184 cnts->num_q_counters + in mlx5_ib_alloc_hw_stats()
185 cnts->num_cong_counters + in mlx5_ib_alloc_hw_stats()
186 cnts->num_ext_ppcnt_counters, in mlx5_ib_alloc_hw_stats()
206 for (i = 0; i < cnts->num_q_counters; i++) { in mlx5_ib_query_q_counters()
207 val = *(__be32 *)((void *)out + cnts->offsets[i]); in mlx5_ib_query_q_counters()
208 stats->value[i] = (u64)be32_to_cpu(val); in mlx5_ib_query_q_counters()
218 int offset = cnts->num_q_counters + cnts->num_cong_counters; in mlx5_ib_query_ext_ppcnt_counters()
226 return -ENOMEM; in mlx5_ib_query_ext_ppcnt_counters()
230 ret = mlx5_core_access_reg(dev->mdev, in, sz, out, sz, MLX5_REG_PPCNT, in mlx5_ib_query_ext_ppcnt_counters()
235 for (i = 0; i < cnts->num_ext_ppcnt_counters; i++) in mlx5_ib_query_ext_ppcnt_counters()
236 stats->value[i + offset] = in mlx5_ib_query_ext_ppcnt_counters()
238 cnts->offsets[i + offset])); in mlx5_ib_query_ext_ppcnt_counters()
249 const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); in mlx5_ib_get_hw_stats()
255 return -EINVAL; in mlx5_ib_get_hw_stats()
257 num_counters = cnts->num_q_counters + in mlx5_ib_get_hw_stats()
258 cnts->num_cong_counters + in mlx5_ib_get_hw_stats()
259 cnts->num_ext_ppcnt_counters; in mlx5_ib_get_hw_stats()
262 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id); in mlx5_ib_get_hw_stats()
266 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { in mlx5_ib_get_hw_stats()
272 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_get_hw_stats()
276 /* If port is not affiliated yet, its in down state in mlx5_ib_get_hw_stats()
277 * which doesn't have any counters yet, so it would be in mlx5_ib_get_hw_stats()
282 ret = mlx5_lag_query_cong_counters(dev->mdev, in mlx5_ib_get_hw_stats()
283 stats->value + in mlx5_ib_get_hw_stats()
284 cnts->num_q_counters, in mlx5_ib_get_hw_stats()
285 cnts->num_cong_counters, in mlx5_ib_get_hw_stats()
286 cnts->offsets + in mlx5_ib_get_hw_stats()
287 cnts->num_q_counters); in mlx5_ib_get_hw_stats()
301 struct mlx5_ib_dev *dev = to_mdev(counter->device); in mlx5_ib_counter_alloc_stats()
303 get_counters(dev, counter->port - 1); in mlx5_ib_counter_alloc_stats()
305 return rdma_alloc_hw_stats_struct(cnts->names, in mlx5_ib_counter_alloc_stats()
306 cnts->num_q_counters + in mlx5_ib_counter_alloc_stats()
307 cnts->num_cong_counters + in mlx5_ib_counter_alloc_stats()
308 cnts->num_ext_ppcnt_counters, in mlx5_ib_counter_alloc_stats()
314 struct mlx5_ib_dev *dev = to_mdev(counter->device); in mlx5_ib_counter_update_stats()
316 get_counters(dev, counter->port - 1); in mlx5_ib_counter_update_stats()
318 return mlx5_ib_query_q_counters(dev->mdev, cnts, in mlx5_ib_counter_update_stats()
319 counter->stats, counter->id); in mlx5_ib_counter_update_stats()
324 struct mlx5_ib_dev *dev = to_mdev(counter->device); in mlx5_ib_counter_dealloc()
327 if (!counter->id) in mlx5_ib_counter_dealloc()
332 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id); in mlx5_ib_counter_dealloc()
333 return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); in mlx5_ib_counter_dealloc()
339 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_counter_bind_qp()
342 if (!counter->id) { in mlx5_ib_counter_bind_qp()
349 err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out); in mlx5_ib_counter_bind_qp()
352 counter->id = in mlx5_ib_counter_bind_qp()
364 counter->id = 0; in mlx5_ib_counter_bind_qp()
387 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { in mlx5_ib_fill_counters()
394 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { in mlx5_ib_fill_counters()
401 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { in mlx5_ib_fill_counters()
408 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { in mlx5_ib_fill_counters()
415 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_fill_counters()
422 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { in mlx5_ib_fill_counters()
438 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) in __mlx5_ib_alloc_counters()
441 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) in __mlx5_ib_alloc_counters()
444 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) in __mlx5_ib_alloc_counters()
447 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) in __mlx5_ib_alloc_counters()
450 cnts->num_q_counters = num_counters; in __mlx5_ib_alloc_counters()
452 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in __mlx5_ib_alloc_counters()
453 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); in __mlx5_ib_alloc_counters()
456 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { in __mlx5_ib_alloc_counters()
457 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); in __mlx5_ib_alloc_counters()
460 cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL); in __mlx5_ib_alloc_counters()
461 if (!cnts->names) in __mlx5_ib_alloc_counters()
462 return -ENOMEM; in __mlx5_ib_alloc_counters()
464 cnts->offsets = kcalloc(num_counters, in __mlx5_ib_alloc_counters()
465 sizeof(*cnts->offsets), GFP_KERNEL); in __mlx5_ib_alloc_counters()
466 if (!cnts->offsets) in __mlx5_ib_alloc_counters()
472 kfree(cnts->names); in __mlx5_ib_alloc_counters()
473 cnts->names = NULL; in __mlx5_ib_alloc_counters()
474 return -ENOMEM; in __mlx5_ib_alloc_counters()
483 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; in mlx5_ib_dealloc_counters()
489 if (dev->port[i].cnts.set_id) { in mlx5_ib_dealloc_counters()
491 dev->port[i].cnts.set_id); in mlx5_ib_dealloc_counters()
492 mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); in mlx5_ib_dealloc_counters()
494 kfree(dev->port[i].cnts.names); in mlx5_ib_dealloc_counters()
495 kfree(dev->port[i].cnts.offsets); in mlx5_ib_dealloc_counters()
509 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; in mlx5_ib_alloc_counters()
510 num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; in mlx5_ib_alloc_counters()
513 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); in mlx5_ib_alloc_counters()
517 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, in mlx5_ib_alloc_counters()
518 dev->port[i].cnts.offsets); in mlx5_ib_alloc_counters()
523 err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out); in mlx5_ib_alloc_counters()
531 dev->port[i].cnts.set_id = in mlx5_ib_alloc_counters()
544 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl; in read_flow_counters()
547 return mlx5_fc_query(dev->mdev, fc, in read_flow_counters()
548 &read_attr->out[IB_COUNTER_PACKETS], in read_flow_counters()
549 &read_attr->out[IB_COUNTER_BYTES]); in read_flow_counters()
552 /* flow counters currently expose two counters packets and bytes */
555 struct ib_counters *counters, enum mlx5_ib_counters_type counters_type, in counters_set_description() argument
558 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in counters_set_description()
563 return -EINVAL; in counters_set_description()
566 mcounters->type = counters_type; in counters_set_description()
567 mcounters->read_counters = read_flow_counters; in counters_set_description()
568 mcounters->counters_num = FLOW_COUNTERS_NUM; in counters_set_description()
569 mcounters->ncounters = ncounters; in counters_set_description()
573 return -EINVAL; in counters_set_description()
579 mutex_lock(&mcounters->mcntrs_mutex); in counters_set_description()
580 mcounters->counters_data = desc_data; in counters_set_description()
581 mcounters->cntrs_max_index = cntrs_max_index; in counters_set_description()
582 mutex_unlock(&mcounters->mcntrs_mutex); in counters_set_description()
597 if (ucmd && ucmd->ncounters_data != 0) { in mlx5_ib_flow_counters_set_data()
598 cntrs_data = ucmd->data; in mlx5_ib_flow_counters_set_data()
599 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) in mlx5_ib_flow_counters_set_data()
600 return -EINVAL; in mlx5_ib_flow_counters_set_data()
602 desc_data = kcalloc(cntrs_data->ncounters, in mlx5_ib_flow_counters_set_data()
606 return -ENOMEM; in mlx5_ib_flow_counters_set_data()
609 u64_to_user_ptr(cntrs_data->counters_data), in mlx5_ib_flow_counters_set_data()
610 sizeof(*desc_data) * cntrs_data->ncounters)) { in mlx5_ib_flow_counters_set_data()
611 ret = -EFAULT; in mlx5_ib_flow_counters_set_data()
616 if (!mcounters->hw_cntrs_hndl) { in mlx5_ib_flow_counters_set_data()
617 mcounters->hw_cntrs_hndl = mlx5_fc_create( in mlx5_ib_flow_counters_set_data()
618 to_mdev(ibcounters->device)->mdev, false); in mlx5_ib_flow_counters_set_data()
619 if (IS_ERR(mcounters->hw_cntrs_hndl)) { in mlx5_ib_flow_counters_set_data()
620 ret = PTR_ERR(mcounters->hw_cntrs_hndl); in mlx5_ib_flow_counters_set_data()
627 /* counters already bound to at least one flow */ in mlx5_ib_flow_counters_set_data()
628 if (mcounters->cntrs_max_index) { in mlx5_ib_flow_counters_set_data()
629 ret = -EINVAL; in mlx5_ib_flow_counters_set_data()
636 cntrs_data->ncounters); in mlx5_ib_flow_counters_set_data()
640 } else if (!mcounters->cntrs_max_index) { in mlx5_ib_flow_counters_set_data()
641 /* counters not bound yet, must have udata passed */ in mlx5_ib_flow_counters_set_data()
642 ret = -EINVAL; in mlx5_ib_flow_counters_set_data()
650 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev, in mlx5_ib_flow_counters_set_data()
651 mcounters->hw_cntrs_hndl); in mlx5_ib_flow_counters_set_data()
652 mcounters->hw_cntrs_hndl = NULL; in mlx5_ib_flow_counters_set_data()
659 void mlx5_ib_counters_clear_description(struct ib_counters *counters) in mlx5_ib_counters_clear_description() argument
663 if (!counters || atomic_read(&counters->usecnt) != 1) in mlx5_ib_counters_clear_description()
666 mcounters = to_mcounters(counters); in mlx5_ib_counters_clear_description()
668 mutex_lock(&mcounters->mcntrs_mutex); in mlx5_ib_counters_clear_description()
669 kfree(mcounters->counters_data); in mlx5_ib_counters_clear_description()
670 mcounters->counters_data = NULL; in mlx5_ib_counters_clear_description()
671 mcounters->cntrs_max_index = 0; in mlx5_ib_counters_clear_description()
672 mutex_unlock(&mcounters->mcntrs_mutex); in mlx5_ib_counters_clear_description()
695 ib_set_device_ops(&dev->ib_dev, &counters_ops); in mlx5_ib_counters_init()
697 if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) in mlx5_ib_counters_init()
700 ib_set_device_ops(&dev->ib_dev, &hw_stats_ops); in mlx5_ib_counters_init()
706 if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) in mlx5_ib_counters_cleanup()