• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 /* Inter-Driver Communication */
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8 
9 /**
10  * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
11  * @pf: pointer to PF struct
12  *
13  * This function has to be called with a device_lock on the
14  * pf->adev.dev to avoid race conditions.
15  */
ice_get_auxiliary_drv(struct ice_pf * pf)16 static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
17 {
18 	struct auxiliary_device *adev;
19 
20 	adev = pf->adev;
21 	if (!adev || !adev->dev.driver)
22 		return NULL;
23 
24 	return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
25 			    adrv.driver);
26 }
27 
28 /**
29  * ice_send_event_to_aux - send event to RDMA AUX driver
30  * @pf: pointer to PF struct
31  * @event: event struct
32  */
ice_send_event_to_aux(struct ice_pf * pf,struct iidc_event * event)33 void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
34 {
35 	struct iidc_auxiliary_drv *iadrv;
36 
37 	if (WARN_ON_ONCE(!in_task()))
38 		return;
39 
40 	mutex_lock(&pf->adev_mutex);
41 	if (!pf->adev)
42 		goto finish;
43 
44 	device_lock(&pf->adev->dev);
45 	iadrv = ice_get_auxiliary_drv(pf);
46 	if (iadrv && iadrv->event_handler)
47 		iadrv->event_handler(pf, event);
48 	device_unlock(&pf->adev->dev);
49 finish:
50 	mutex_unlock(&pf->adev_mutex);
51 }
52 
53 /**
54  * ice_find_vsi - Find the VSI from VSI ID
55  * @pf: The PF pointer to search in
56  * @vsi_num: The VSI ID to search for
57  */
ice_find_vsi(struct ice_pf * pf,u16 vsi_num)58 static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
59 {
60 	int i;
61 
62 	ice_for_each_vsi(pf, i)
63 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
64 			return  pf->vsi[i];
65 	return NULL;
66 }
67 
68 /**
69  * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
70  * @pf: PF struct
71  * @qset: Resource to be allocated
72  */
ice_add_rdma_qset(struct ice_pf * pf,struct iidc_rdma_qset_params * qset)73 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
74 {
75 	u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
76 	struct ice_vsi *vsi;
77 	struct device *dev;
78 	u32 qset_teid;
79 	u16 qs_handle;
80 	int status;
81 	int i;
82 
83 	if (WARN_ON(!pf || !qset))
84 		return -EINVAL;
85 
86 	dev = ice_pf_to_dev(pf);
87 
88 	if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
89 		return -EINVAL;
90 
91 	vsi = ice_get_main_vsi(pf);
92 	if (!vsi) {
93 		dev_err(dev, "RDMA QSet invalid VSI\n");
94 		return -EINVAL;
95 	}
96 
97 	ice_for_each_traffic_class(i)
98 		max_rdmaqs[i] = 0;
99 
100 	max_rdmaqs[qset->tc]++;
101 	qs_handle = qset->qs_handle;
102 
103 	status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
104 				  max_rdmaqs);
105 	if (status) {
106 		dev_err(dev, "Failed VSI RDMA Qset config\n");
107 		return status;
108 	}
109 
110 	status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
111 				       &qs_handle, 1, &qset_teid);
112 	if (status) {
113 		dev_err(dev, "Failed VSI RDMA Qset enable\n");
114 		return status;
115 	}
116 	vsi->qset_handle[qset->tc] = qset->qs_handle;
117 	qset->teid = qset_teid;
118 
119 	return 0;
120 }
121 EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
122 
123 /**
124  * ice_del_rdma_qset - Delete leaf node for RDMA Qset
125  * @pf: PF struct
126  * @qset: Resource to be freed
127  */
ice_del_rdma_qset(struct ice_pf * pf,struct iidc_rdma_qset_params * qset)128 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
129 {
130 	struct ice_vsi *vsi;
131 	u32 teid;
132 	u16 q_id;
133 
134 	if (WARN_ON(!pf || !qset))
135 		return -EINVAL;
136 
137 	vsi = ice_find_vsi(pf, qset->vport_id);
138 	if (!vsi) {
139 		dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
140 		return -EINVAL;
141 	}
142 
143 	q_id = qset->qs_handle;
144 	teid = qset->teid;
145 
146 	vsi->qset_handle[qset->tc] = 0;
147 
148 	return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
149 }
150 EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
151 
152 /**
153  * ice_rdma_request_reset - accept request from RDMA to perform a reset
154  * @pf: struct for PF
155  * @reset_type: type of reset
156  */
ice_rdma_request_reset(struct ice_pf * pf,enum iidc_reset_type reset_type)157 int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
158 {
159 	enum ice_reset_req reset;
160 
161 	if (WARN_ON(!pf))
162 		return -EINVAL;
163 
164 	switch (reset_type) {
165 	case IIDC_PFR:
166 		reset = ICE_RESET_PFR;
167 		break;
168 	case IIDC_CORER:
169 		reset = ICE_RESET_CORER;
170 		break;
171 	case IIDC_GLOBR:
172 		reset = ICE_RESET_GLOBR;
173 		break;
174 	default:
175 		dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
176 		return -EINVAL;
177 	}
178 
179 	return ice_schedule_reset(pf, reset);
180 }
181 EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
182 
183 /**
184  * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
185  * @pf: pointer to struct for PF
186  * @vsi_id: VSI HW idx to update filter on
187  * @enable: bool whether to enable or disable filters
188  */
ice_rdma_update_vsi_filter(struct ice_pf * pf,u16 vsi_id,bool enable)189 int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
190 {
191 	struct ice_vsi *vsi;
192 	int status;
193 
194 	if (WARN_ON(!pf))
195 		return -EINVAL;
196 
197 	vsi = ice_find_vsi(pf, vsi_id);
198 	if (!vsi)
199 		return -EINVAL;
200 
201 	status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
202 	if (status) {
203 		dev_err(ice_pf_to_dev(pf), "Failed to  %sable RDMA filtering\n",
204 			enable ? "en" : "dis");
205 	} else {
206 		if (enable)
207 			vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
208 		else
209 			vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
210 	}
211 
212 	return status;
213 }
214 EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
215 
216 /**
217  * ice_get_qos_params - parse QoS params for RDMA consumption
218  * @pf: pointer to PF struct
219  * @qos: set of QoS values
220  */
ice_get_qos_params(struct ice_pf * pf,struct iidc_qos_params * qos)221 void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
222 {
223 	struct ice_dcbx_cfg *dcbx_cfg;
224 	unsigned int i;
225 	u32 up2tc;
226 
227 	dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
228 	up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
229 
230 	qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
231 	for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
232 		qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
233 
234 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
235 		qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
236 }
237 EXPORT_SYMBOL_GPL(ice_get_qos_params);
238 
239 /**
240  * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
241  * @pf: board private structure to initialize
242  */
ice_reserve_rdma_qvector(struct ice_pf * pf)243 static int ice_reserve_rdma_qvector(struct ice_pf *pf)
244 {
245 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
246 		int index;
247 
248 		index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
249 				    ICE_RES_RDMA_VEC_ID);
250 		if (index < 0)
251 			return index;
252 		pf->num_avail_sw_msix -= pf->num_rdma_msix;
253 		pf->rdma_base_vector = (u16)index;
254 	}
255 	return 0;
256 }
257 
258 /**
259  * ice_adev_release - function to be mapped to AUX dev's release op
260  * @dev: pointer to device to free
261  */
ice_adev_release(struct device * dev)262 static void ice_adev_release(struct device *dev)
263 {
264 	struct iidc_auxiliary_dev *iadev;
265 
266 	iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
267 	kfree(iadev);
268 }
269 
270 /**
271  * ice_plug_aux_dev - allocate and register AUX device
272  * @pf: pointer to pf struct
273  */
ice_plug_aux_dev(struct ice_pf * pf)274 int ice_plug_aux_dev(struct ice_pf *pf)
275 {
276 	struct iidc_auxiliary_dev *iadev;
277 	struct auxiliary_device *adev;
278 	int ret;
279 
280 	/* if this PF doesn't support a technology that requires auxiliary
281 	 * devices, then gracefully exit
282 	 */
283 	if (!ice_is_aux_ena(pf))
284 		return 0;
285 
286 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
287 	if (!iadev)
288 		return -ENOMEM;
289 
290 	adev = &iadev->adev;
291 	iadev->pf = pf;
292 
293 	adev->id = pf->aux_idx;
294 	adev->dev.release = ice_adev_release;
295 	adev->dev.parent = &pf->pdev->dev;
296 	adev->name = IIDC_RDMA_ROCE_NAME;
297 
298 	ret = auxiliary_device_init(adev);
299 	if (ret) {
300 		kfree(iadev);
301 		return ret;
302 	}
303 
304 	ret = auxiliary_device_add(adev);
305 	if (ret) {
306 		auxiliary_device_uninit(adev);
307 		return ret;
308 	}
309 
310 	mutex_lock(&pf->adev_mutex);
311 	pf->adev = adev;
312 	mutex_unlock(&pf->adev_mutex);
313 
314 	return 0;
315 }
316 
317 /* ice_unplug_aux_dev - unregister and free AUX device
318  * @pf: pointer to pf struct
319  */
ice_unplug_aux_dev(struct ice_pf * pf)320 void ice_unplug_aux_dev(struct ice_pf *pf)
321 {
322 	struct auxiliary_device *adev;
323 
324 	mutex_lock(&pf->adev_mutex);
325 	adev = pf->adev;
326 	pf->adev = NULL;
327 	mutex_unlock(&pf->adev_mutex);
328 
329 	if (adev) {
330 		auxiliary_device_delete(adev);
331 		auxiliary_device_uninit(adev);
332 	}
333 }
334 
335 /**
336  * ice_init_rdma - initializes PF for RDMA use
337  * @pf: ptr to ice_pf
338  */
ice_init_rdma(struct ice_pf * pf)339 int ice_init_rdma(struct ice_pf *pf)
340 {
341 	struct device *dev = &pf->pdev->dev;
342 	int ret;
343 
344 	/* Reserve vector resources */
345 	ret = ice_reserve_rdma_qvector(pf);
346 	if (ret < 0) {
347 		dev_err(dev, "failed to reserve vectors for RDMA\n");
348 		return ret;
349 	}
350 
351 	return ice_plug_aux_dev(pf);
352 }
353