• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Intel Ethernet Switch Host Interface Driver
2  * Copyright(c) 2013 - 2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * The full GNU General Public License is included in this distribution in
14  * the file called "COPYING".
15  *
16  * Contact Information:
17  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19  */
20 
21 #include "fm10k.h"
22 #include "fm10k_vf.h"
23 #include "fm10k_pf.h"
24 
fm10k_iov_msg_error(struct fm10k_hw * hw,u32 ** results,struct fm10k_mbx_info * mbx)25 static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
26 			       struct fm10k_mbx_info *mbx)
27 {
28 	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
29 	struct fm10k_intfc *interface = hw->back;
30 	struct pci_dev *pdev = interface->pdev;
31 
32 	dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
33 		**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
34 
35 	return fm10k_tlv_msg_error(hw, results, mbx);
36 }
37 
38 static const struct fm10k_msg_data iov_mbx_data[] = {
39 	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
40 	FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
41 	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
42 	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
43 	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
44 };
45 
fm10k_iov_event(struct fm10k_intfc * interface)46 s32 fm10k_iov_event(struct fm10k_intfc *interface)
47 {
48 	struct fm10k_hw *hw = &interface->hw;
49 	struct fm10k_iov_data *iov_data;
50 	s64 mbicr, vflre;
51 	int i;
52 
53 	/* if there is no iov_data then there is no mailboxes to process */
54 	if (!ACCESS_ONCE(interface->iov_data))
55 		return 0;
56 
57 	rcu_read_lock();
58 
59 	iov_data = interface->iov_data;
60 
61 	/* check again now that we are in the RCU block */
62 	if (!iov_data)
63 		goto read_unlock;
64 
65 	if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
66 		goto process_mbx;
67 
68 	/* read VFLRE to determine if any VFs have been reset */
69 	do {
70 		vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
71 		vflre <<= 32;
72 		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
73 		vflre = (vflre << 32) | (vflre >> 32);
74 		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
75 
76 		i = iov_data->num_vfs;
77 
78 		for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
79 			struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
80 
81 			if (vflre >= 0)
82 				continue;
83 
84 			hw->iov.ops.reset_resources(hw, vf_info);
85 			vf_info->mbx.ops.connect(hw, &vf_info->mbx);
86 		}
87 	} while (i != iov_data->num_vfs);
88 
89 process_mbx:
90 	/* read MBICR to determine which VFs require attention */
91 	mbicr = fm10k_read_reg(hw, FM10K_MBICR(1));
92 	mbicr <<= 32;
93 	mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0));
94 
95 	i = iov_data->next_vf_mbx ? : iov_data->num_vfs;
96 
97 	for (mbicr <<= 64 - i; i--; mbicr += mbicr) {
98 		struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx;
99 
100 		if (mbicr >= 0)
101 			continue;
102 
103 		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
104 			break;
105 
106 		mbx->ops.process(hw, mbx);
107 	}
108 
109 	if (i >= 0) {
110 		iov_data->next_vf_mbx = i + 1;
111 	} else if (iov_data->next_vf_mbx) {
112 		iov_data->next_vf_mbx = 0;
113 		goto process_mbx;
114 	}
115 read_unlock:
116 	rcu_read_unlock();
117 
118 	return 0;
119 }
120 
fm10k_iov_mbx(struct fm10k_intfc * interface)121 s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
122 {
123 	struct fm10k_hw *hw = &interface->hw;
124 	struct fm10k_iov_data *iov_data;
125 	int i;
126 
127 	/* if there is no iov_data then there is no mailboxes to process */
128 	if (!ACCESS_ONCE(interface->iov_data))
129 		return 0;
130 
131 	rcu_read_lock();
132 
133 	iov_data = interface->iov_data;
134 
135 	/* check again now that we are in the RCU block */
136 	if (!iov_data)
137 		goto read_unlock;
138 
139 	/* lock the mailbox for transmit and receive */
140 	fm10k_mbx_lock(interface);
141 
142 process_mbx:
143 	for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
144 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
145 		struct fm10k_mbx_info *mbx = &vf_info->mbx;
146 		u16 glort = vf_info->glort;
147 
148 		/* process the SM mailbox first to drain outgoing messages */
149 		hw->mbx.ops.process(hw, &hw->mbx);
150 
151 		/* verify port mapping is valid, if not reset port */
152 		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
153 			hw->iov.ops.reset_lport(hw, vf_info);
154 
155 		/* reset VFs that have mailbox timed out */
156 		if (!mbx->timeout) {
157 			hw->iov.ops.reset_resources(hw, vf_info);
158 			mbx->ops.connect(hw, mbx);
159 		}
160 
161 		/* no work pending, then just continue */
162 		if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx))
163 			continue;
164 
165 		/* guarantee we have free space in the SM mailbox */
166 		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
167 			break;
168 
169 		/* cleanup mailbox and process received messages */
170 		mbx->ops.process(hw, mbx);
171 	}
172 
173 	if (i >= 0) {
174 		iov_data->next_vf_mbx = i + 1;
175 	} else if (iov_data->next_vf_mbx) {
176 		iov_data->next_vf_mbx = 0;
177 		goto process_mbx;
178 	}
179 
180 	/* free the lock */
181 	fm10k_mbx_unlock(interface);
182 
183 read_unlock:
184 	rcu_read_unlock();
185 
186 	return 0;
187 }
188 
fm10k_iov_suspend(struct pci_dev * pdev)189 void fm10k_iov_suspend(struct pci_dev *pdev)
190 {
191 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
192 	struct fm10k_iov_data *iov_data = interface->iov_data;
193 	struct fm10k_hw *hw = &interface->hw;
194 	int num_vfs, i;
195 
196 	/* pull out num_vfs from iov_data */
197 	num_vfs = iov_data ? iov_data->num_vfs : 0;
198 
199 	/* shut down queue mapping for VFs */
200 	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
201 			FM10K_DGLORTMAP_NONE);
202 
203 	/* Stop any active VFs and reset their resources */
204 	for (i = 0; i < num_vfs; i++) {
205 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
206 
207 		hw->iov.ops.reset_resources(hw, vf_info);
208 		hw->iov.ops.reset_lport(hw, vf_info);
209 	}
210 }
211 
fm10k_iov_resume(struct pci_dev * pdev)212 int fm10k_iov_resume(struct pci_dev *pdev)
213 {
214 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
215 	struct fm10k_iov_data *iov_data = interface->iov_data;
216 	struct fm10k_dglort_cfg dglort = { 0 };
217 	struct fm10k_hw *hw = &interface->hw;
218 	int num_vfs, i;
219 
220 	/* pull out num_vfs from iov_data */
221 	num_vfs = iov_data ? iov_data->num_vfs : 0;
222 
223 	/* return error if iov_data is not already populated */
224 	if (!iov_data)
225 		return -ENOMEM;
226 
227 	/* allocate hardware resources for the VFs */
228 	hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
229 
230 	/* configure DGLORT mapping for RSS */
231 	dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
232 	dglort.idx = fm10k_dglort_vf_rss;
233 	dglort.inner_rss = 1;
234 	dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
235 	dglort.queue_b = fm10k_vf_queue_index(hw, 0);
236 	dglort.vsi_l = fls(hw->iov.total_vfs - 1);
237 	dglort.vsi_b = 1;
238 
239 	hw->mac.ops.configure_dglort_map(hw, &dglort);
240 
241 	/* assign resources to the device */
242 	for (i = 0; i < num_vfs; i++) {
243 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
244 
245 		/* allocate all but the last GLORT to the VFs */
246 		if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
247 			break;
248 
249 		/* assign GLORT to VF, and restrict it to multicast */
250 		hw->iov.ops.set_lport(hw, vf_info, i,
251 				      FM10K_VF_FLAG_MULTI_CAPABLE);
252 
253 		/* assign our default vid to the VF following reset */
254 		vf_info->sw_vid = hw->mac.default_vid;
255 
256 		/* mailbox is disconnected so we don't send a message */
257 		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
258 
259 		/* now we are ready so we can connect */
260 		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
261 	}
262 
263 	return 0;
264 }
265 
fm10k_iov_update_pvid(struct fm10k_intfc * interface,u16 glort,u16 pvid)266 s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
267 {
268 	struct fm10k_iov_data *iov_data = interface->iov_data;
269 	struct fm10k_hw *hw = &interface->hw;
270 	struct fm10k_vf_info *vf_info;
271 	u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
272 
273 	/* no IOV support, not our message to process */
274 	if (!iov_data)
275 		return FM10K_ERR_PARAM;
276 
277 	/* glort outside our range, not our message to process */
278 	if (vf_idx >= iov_data->num_vfs)
279 		return FM10K_ERR_PARAM;
280 
281 	/* determine if an update has occured and if so notify the VF */
282 	vf_info = &iov_data->vf_info[vf_idx];
283 	if (vf_info->sw_vid != pvid) {
284 		vf_info->sw_vid = pvid;
285 		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
286 	}
287 
288 	return 0;
289 }
290 
fm10k_iov_free_data(struct pci_dev * pdev)291 static void fm10k_iov_free_data(struct pci_dev *pdev)
292 {
293 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
294 
295 	if (!interface->iov_data)
296 		return;
297 
298 	/* reclaim hardware resources */
299 	fm10k_iov_suspend(pdev);
300 
301 	/* drop iov_data from interface */
302 	kfree_rcu(interface->iov_data, rcu);
303 	interface->iov_data = NULL;
304 }
305 
fm10k_iov_alloc_data(struct pci_dev * pdev,int num_vfs)306 static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
307 {
308 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
309 	struct fm10k_iov_data *iov_data = interface->iov_data;
310 	struct fm10k_hw *hw = &interface->hw;
311 	size_t size;
312 	int i, err;
313 
314 	/* return error if iov_data is already populated */
315 	if (iov_data)
316 		return -EBUSY;
317 
318 	/* The PF should always be able to assign resources */
319 	if (!hw->iov.ops.assign_resources)
320 		return -ENODEV;
321 
322 	/* nothing to do if no VFs are requested */
323 	if (!num_vfs)
324 		return 0;
325 
326 	/* allocate memory for VF storage */
327 	size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
328 	iov_data = kzalloc(size, GFP_KERNEL);
329 	if (!iov_data)
330 		return -ENOMEM;
331 
332 	/* record number of VFs */
333 	iov_data->num_vfs = num_vfs;
334 
335 	/* loop through vf_info structures initializing each entry */
336 	for (i = 0; i < num_vfs; i++) {
337 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
338 
339 		/* Record VF VSI value */
340 		vf_info->vsi = i + 1;
341 		vf_info->vf_idx = i;
342 
343 		/* initialize mailbox memory */
344 		err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
345 		if (err) {
346 			dev_err(&pdev->dev,
347 				"Unable to initialize SR-IOV mailbox\n");
348 			kfree(iov_data);
349 			return err;
350 		}
351 	}
352 
353 	/* assign iov_data to interface */
354 	interface->iov_data = iov_data;
355 
356 	/* allocate hardware resources for the VFs */
357 	fm10k_iov_resume(pdev);
358 
359 	return 0;
360 }
361 
fm10k_iov_disable(struct pci_dev * pdev)362 void fm10k_iov_disable(struct pci_dev *pdev)
363 {
364 	if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
365 		dev_err(&pdev->dev,
366 			"Cannot disable SR-IOV while VFs are assigned\n");
367 	else
368 		pci_disable_sriov(pdev);
369 
370 	fm10k_iov_free_data(pdev);
371 }
372 
fm10k_disable_aer_comp_abort(struct pci_dev * pdev)373 static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
374 {
375 	u32 err_sev;
376 	int pos;
377 
378 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
379 	if (!pos)
380 		return;
381 
382 	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
383 	err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
384 	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
385 }
386 
fm10k_iov_configure(struct pci_dev * pdev,int num_vfs)387 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
388 {
389 	int current_vfs = pci_num_vf(pdev);
390 	int err = 0;
391 
392 	if (current_vfs && pci_vfs_assigned(pdev)) {
393 		dev_err(&pdev->dev,
394 			"Cannot modify SR-IOV while VFs are assigned\n");
395 		num_vfs = current_vfs;
396 	} else {
397 		pci_disable_sriov(pdev);
398 		fm10k_iov_free_data(pdev);
399 	}
400 
401 	/* allocate resources for the VFs */
402 	err = fm10k_iov_alloc_data(pdev, num_vfs);
403 	if (err)
404 		return err;
405 
406 	/* allocate VFs if not already allocated */
407 	if (num_vfs && (num_vfs != current_vfs)) {
408 		/* Disable completer abort error reporting as
409 		 * the VFs can trigger this any time they read a queue
410 		 * that they don't own.
411 		 */
412 		fm10k_disable_aer_comp_abort(pdev);
413 
414 		err = pci_enable_sriov(pdev, num_vfs);
415 		if (err) {
416 			dev_err(&pdev->dev,
417 				"Enable PCI SR-IOV failed: %d\n", err);
418 			return err;
419 		}
420 	}
421 
422 	return num_vfs;
423 }
424 
fm10k_ndo_set_vf_mac(struct net_device * netdev,int vf_idx,u8 * mac)425 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
426 {
427 	struct fm10k_intfc *interface = netdev_priv(netdev);
428 	struct fm10k_iov_data *iov_data = interface->iov_data;
429 	struct fm10k_hw *hw = &interface->hw;
430 	struct fm10k_vf_info *vf_info;
431 
432 	/* verify SR-IOV is active and that vf idx is valid */
433 	if (!iov_data || vf_idx >= iov_data->num_vfs)
434 		return -EINVAL;
435 
436 	/* verify MAC addr is valid */
437 	if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
438 		return -EINVAL;
439 
440 	/* record new MAC address */
441 	vf_info = &iov_data->vf_info[vf_idx];
442 	ether_addr_copy(vf_info->mac, mac);
443 
444 	/* assigning the MAC will send a mailbox message so lock is needed */
445 	fm10k_mbx_lock(interface);
446 
447 	/* assign MAC address to VF */
448 	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
449 
450 	fm10k_mbx_unlock(interface);
451 
452 	return 0;
453 }
454 
fm10k_ndo_set_vf_vlan(struct net_device * netdev,int vf_idx,u16 vid,u8 qos)455 int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
456 			  u8 qos)
457 {
458 	struct fm10k_intfc *interface = netdev_priv(netdev);
459 	struct fm10k_iov_data *iov_data = interface->iov_data;
460 	struct fm10k_hw *hw = &interface->hw;
461 	struct fm10k_vf_info *vf_info;
462 
463 	/* verify SR-IOV is active and that vf idx is valid */
464 	if (!iov_data || vf_idx >= iov_data->num_vfs)
465 		return -EINVAL;
466 
467 	/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
468 	if (qos || (vid > (VLAN_VID_MASK - 1)))
469 		return -EINVAL;
470 
471 	vf_info = &iov_data->vf_info[vf_idx];
472 
473 	/* exit if there is nothing to do */
474 	if (vf_info->pf_vid == vid)
475 		return 0;
476 
477 	/* record default VLAN ID for VF */
478 	vf_info->pf_vid = vid;
479 
480 	/* assigning the VLAN will send a mailbox message so lock is needed */
481 	fm10k_mbx_lock(interface);
482 
483 	/* Clear the VLAN table for the VF */
484 	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
485 
486 	/* Update VF assignment and trigger reset */
487 	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
488 
489 	fm10k_mbx_unlock(interface);
490 
491 	return 0;
492 }
493 
fm10k_ndo_set_vf_bw(struct net_device * netdev,int vf_idx,int unused,int rate)494 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused,
495 			int rate)
496 {
497 	struct fm10k_intfc *interface = netdev_priv(netdev);
498 	struct fm10k_iov_data *iov_data = interface->iov_data;
499 	struct fm10k_hw *hw = &interface->hw;
500 
501 	/* verify SR-IOV is active and that vf idx is valid */
502 	if (!iov_data || vf_idx >= iov_data->num_vfs)
503 		return -EINVAL;
504 
505 	/* rate limit cannot be less than 10Mbs or greater than link speed */
506 	if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
507 		return -EINVAL;
508 
509 	/* store values */
510 	iov_data->vf_info[vf_idx].rate = rate;
511 
512 	/* update hardware configuration */
513 	hw->iov.ops.configure_tc(hw, vf_idx, rate);
514 
515 	return 0;
516 }
517 
fm10k_ndo_get_vf_config(struct net_device * netdev,int vf_idx,struct ifla_vf_info * ivi)518 int fm10k_ndo_get_vf_config(struct net_device *netdev,
519 			    int vf_idx, struct ifla_vf_info *ivi)
520 {
521 	struct fm10k_intfc *interface = netdev_priv(netdev);
522 	struct fm10k_iov_data *iov_data = interface->iov_data;
523 	struct fm10k_vf_info *vf_info;
524 
525 	/* verify SR-IOV is active and that vf idx is valid */
526 	if (!iov_data || vf_idx >= iov_data->num_vfs)
527 		return -EINVAL;
528 
529 	vf_info = &iov_data->vf_info[vf_idx];
530 
531 	ivi->vf = vf_idx;
532 	ivi->max_tx_rate = vf_info->rate;
533 	ivi->min_tx_rate = 0;
534 	ether_addr_copy(ivi->mac, vf_info->mac);
535 	ivi->vlan = vf_info->pf_vid;
536 	ivi->qos = 0;
537 
538 	return 0;
539 }
540