1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <rdma/uverbs_ioctl.h>
7 #include <rdma/mlx5_user_ioctl_cmds.h>
8 #include <rdma/mlx5_user_ioctl_verbs.h>
9 #include <linux/mlx5/driver.h>
10 #include <linux/mlx5/eswitch.h>
11 #include <linux/mlx5/vport.h>
12 #include "mlx5_ib.h"
13
14 #define UVERBS_MODULE_NAME mlx5_ib
15 #include <rdma/uverbs_named_ioctl.h>
16
UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)17 static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
18 struct uverbs_attr_bundle *attrs)
19 {
20 struct ib_pd *pd =
21 uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_PD_HANDLE);
22 struct mlx5_ib_pd *mpd = to_mpd(pd);
23
24 return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
25 &mpd->pdn, sizeof(mpd->pdn));
26 }
27
fill_vport_icm_addr(struct mlx5_core_dev * mdev,u16 vport,struct mlx5_ib_uapi_query_port * info)28 static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
29 struct mlx5_ib_uapi_query_port *info)
30 {
31 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
32 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
33 bool sw_owner_supp;
34 u64 icm_rx;
35 u64 icm_tx;
36 int err;
37
38 sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
39 MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
40
41 if (vport == MLX5_VPORT_UPLINK) {
42 icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
43 sw_steering_uplink_icm_address_rx);
44 icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
45 sw_steering_uplink_icm_address_tx);
46 } else {
47 MLX5_SET(query_esw_vport_context_in, in, opcode,
48 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
49 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
50 MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
51
52 err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
53 out);
54
55 if (err)
56 return err;
57
58 icm_rx = MLX5_GET64(
59 query_esw_vport_context_out, out,
60 esw_vport_context.sw_steering_vport_icm_address_rx);
61
62 icm_tx = MLX5_GET64(
63 query_esw_vport_context_out, out,
64 esw_vport_context.sw_steering_vport_icm_address_tx);
65 }
66
67 if (sw_owner_supp && icm_rx) {
68 info->vport_steering_icm_rx = icm_rx;
69 info->flags |=
70 MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
71 }
72
73 if (sw_owner_supp && icm_tx) {
74 info->vport_steering_icm_tx = icm_tx;
75 info->flags |=
76 MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
77 }
78
79 return 0;
80 }
81
fill_vport_vhca_id(struct mlx5_core_dev * mdev,u16 vport,struct mlx5_ib_uapi_query_port * info)82 static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
83 struct mlx5_ib_uapi_query_port *info)
84 {
85 size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
86 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
87 void *out;
88 int err;
89
90 out = kzalloc(out_sz, GFP_KERNEL);
91 if (!out)
92 return -ENOMEM;
93
94 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
95 MLX5_SET(query_hca_cap_in, in, other_function, true);
96 MLX5_SET(query_hca_cap_in, in, function_id, vport);
97 MLX5_SET(query_hca_cap_in, in, op_mod,
98 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
99 HCA_CAP_OPMOD_GET_CUR);
100
101 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
102 if (err)
103 goto out;
104
105 info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
106 capability.cmd_hca_cap.vhca_id);
107
108 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
109 out:
110 kfree(out);
111 return err;
112 }
113
fill_switchdev_info(struct mlx5_ib_dev * dev,u32 port_num,struct mlx5_ib_uapi_query_port * info)114 static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
115 struct mlx5_ib_uapi_query_port *info)
116 {
117 struct mlx5_eswitch_rep *rep;
118 struct mlx5_core_dev *mdev;
119 int err;
120
121 rep = dev->port[port_num - 1].rep;
122 if (!rep)
123 return -EOPNOTSUPP;
124
125 mdev = mlx5_eswitch_get_core_dev(rep->esw);
126 if (!mdev)
127 return -EINVAL;
128
129 info->vport = rep->vport;
130 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
131
132 if (rep->vport != MLX5_VPORT_UPLINK) {
133 err = fill_vport_vhca_id(mdev, rep->vport, info);
134 if (err)
135 return err;
136 }
137
138 info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
139 info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
140
141 err = fill_vport_icm_addr(mdev, rep->vport, info);
142 if (err)
143 return err;
144
145 if (mlx5_eswitch_vport_match_metadata_enabled(rep->esw)) {
146 info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
147 rep->esw, rep->vport);
148 info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
149 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
150 }
151
152 return 0;
153 }
154
UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)155 static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
156 struct uverbs_attr_bundle *attrs)
157 {
158 struct mlx5_ib_uapi_query_port info = {};
159 struct mlx5_ib_ucontext *c;
160 struct mlx5_ib_dev *dev;
161 u32 port_num;
162 int ret;
163
164 if (uverbs_copy_from(&port_num, attrs,
165 MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
166 return -EFAULT;
167
168 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
169 if (IS_ERR(c))
170 return PTR_ERR(c);
171 dev = to_mdev(c->ibucontext.device);
172
173 if (!rdma_is_port_valid(&dev->ib_dev, port_num))
174 return -EINVAL;
175
176 if (mlx5_eswitch_mode(dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
177 ret = fill_switchdev_info(dev, port_num, &info);
178 if (ret)
179 return ret;
180 }
181
182 return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
183 sizeof(info));
184 }
185
186 DECLARE_UVERBS_NAMED_METHOD(
187 MLX5_IB_METHOD_QUERY_PORT,
188 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
189 UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
190 UVERBS_ATTR_PTR_OUT(
191 MLX5_IB_ATTR_QUERY_PORT,
192 UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
193 reg_c0),
194 UA_MANDATORY));
195
196 ADD_UVERBS_METHODS(mlx5_ib_device,
197 UVERBS_OBJECT_DEVICE,
198 &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT));
199
200 DECLARE_UVERBS_NAMED_METHOD(
201 MLX5_IB_METHOD_PD_QUERY,
202 UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
203 UVERBS_OBJECT_PD,
204 UVERBS_ACCESS_READ,
205 UA_MANDATORY),
206 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
207 UVERBS_ATTR_TYPE(u32),
208 UA_MANDATORY));
209
210 ADD_UVERBS_METHODS(mlx5_ib_pd,
211 UVERBS_OBJECT_PD,
212 &UVERBS_METHOD(MLX5_IB_METHOD_PD_QUERY));
213
214 const struct uapi_definition mlx5_ib_std_types_defs[] = {
215 UAPI_DEF_CHAIN_OBJ_TREE(
216 UVERBS_OBJECT_PD,
217 &mlx5_ib_pd),
218 UAPI_DEF_CHAIN_OBJ_TREE(
219 UVERBS_OBJECT_DEVICE,
220 &mlx5_ib_device),
221 {},
222 };
223