1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
5 */
6 #include <linux/init.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/device.h>
11 #include <linux/highmem.h>
12 #include <linux/crc32.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/kthread.h>
16 #include <linux/phylink.h>
17 #include <scsi/libfc.h>
18 #include <scsi/scsi_host.h>
19 #include <scsi/fc_frame.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/cpu.h>
23 #include "qedf.h"
24 #include "qedf_dbg.h"
25 #include <uapi/linux/pci_regs.h>
26
27 const struct qed_fcoe_ops *qed_ops;
28
29 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
30 static void qedf_remove(struct pci_dev *pdev);
31 static void qedf_shutdown(struct pci_dev *pdev);
32 static void qedf_schedule_recovery_handler(void *dev);
33 static void qedf_recovery_handler(struct work_struct *work);
34
35 /*
36 * Driver module parameters.
37 */
38 static unsigned int qedf_dev_loss_tmo = 60;
39 module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
40 MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
41 "remote ports (default 60)");
42
43 uint qedf_debug = QEDF_LOG_INFO;
44 module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
46 " mask");
47
48 static uint qedf_fipvlan_retries = 60;
49 module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
50 MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
51 "before giving up (default 60)");
52
53 static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
54 module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
55 MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
56 "(default 1002).");
57
58 static int qedf_default_prio = -1;
59 module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
60 MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE"
61 " traffic (value between 0 and 7, default 3).");
62
63 uint qedf_dump_frames;
64 module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
66 "(default off)");
67
68 static uint qedf_queue_depth;
69 module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
70 MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
71 "by the qedf driver. Default is 0 (use OS default).");
72
73 uint qedf_io_tracing;
74 module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
75 MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
76 "into trace buffer. (default off).");
77
78 static uint qedf_max_lun = MAX_FIBRE_LUNS;
79 module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
80 MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
81 "supports. (default 0xffffffff)");
82
83 uint qedf_link_down_tmo;
84 module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
85 MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
86 "link is down by N seconds.");
87
88 bool qedf_retry_delay;
89 module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
91 "delay handling (default off).");
92
93 static bool qedf_dcbx_no_wait;
94 module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start "
96 "sending FIP VLAN requests on link up (Default: off).");
97
98 static uint qedf_dp_module;
99 module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
100 MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
101 "qed module during probe.");
102
103 static uint qedf_dp_level = QED_LEVEL_NOTICE;
104 module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
105 MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
106 "during probe (0-3: 0 more verbose).");
107
108 static bool qedf_enable_recovery = true;
109 module_param_named(enable_recovery, qedf_enable_recovery,
110 bool, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
112 "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
113
114 struct workqueue_struct *qedf_io_wq;
115
116 static struct fcoe_percpu_s qedf_global;
117 static DEFINE_SPINLOCK(qedf_global_lock);
118
119 static struct kmem_cache *qedf_io_work_cache;
120
qedf_set_vlan_id(struct qedf_ctx * qedf,int vlan_id)121 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
122 {
123 int vlan_id_tmp = 0;
124
125 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
126 qedf->vlan_id = vlan_id_tmp;
127 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
128 "Setting vlan_id=0x%04x prio=%d.\n",
129 vlan_id_tmp, qedf->prio);
130 }
131
132 /* Returns true if we have a valid vlan, false otherwise */
qedf_initiate_fipvlan_req(struct qedf_ctx * qedf)133 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
134 {
135
136 while (qedf->fipvlan_retries--) {
137 /* This is to catch if link goes down during fipvlan retries */
138 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
139 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
140 return false;
141 }
142
143 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
144 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
145 return false;
146 }
147
148 if (qedf->vlan_id > 0) {
149 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
150 "vlan = 0x%x already set, calling ctlr_link_up.\n",
151 qedf->vlan_id);
152 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
153 fcoe_ctlr_link_up(&qedf->ctlr);
154 return true;
155 }
156
157 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
158 "Retry %d.\n", qedf->fipvlan_retries);
159 init_completion(&qedf->fipvlan_compl);
160 qedf_fcoe_send_vlan_req(qedf);
161 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
162 }
163
164 return false;
165 }
166
qedf_handle_link_update(struct work_struct * work)167 static void qedf_handle_link_update(struct work_struct *work)
168 {
169 struct qedf_ctx *qedf =
170 container_of(work, struct qedf_ctx, link_update.work);
171 int rc;
172
173 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
174 atomic_read(&qedf->link_state));
175
176 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
177 rc = qedf_initiate_fipvlan_req(qedf);
178 if (rc)
179 return;
180
181 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
182 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
183 "Link is down, resetting vlan_id.\n");
184 qedf->vlan_id = 0;
185 return;
186 }
187
188 /*
189 * If we get here then we never received a repsonse to our
190 * fip vlan request so set the vlan_id to the default and
191 * tell FCoE that the link is up
192 */
193 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
194 "response, falling back to default VLAN %d.\n",
195 qedf_fallback_vlan);
196 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
197
198 /*
199 * Zero out data_src_addr so we'll update it with the new
200 * lport port_id
201 */
202 eth_zero_addr(qedf->data_src_addr);
203 fcoe_ctlr_link_up(&qedf->ctlr);
204 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
205 /*
206 * If we hit here and link_down_tmo_valid is still 1 it means
207 * that link_down_tmo timed out so set it to 0 to make sure any
208 * other readers have accurate state.
209 */
210 atomic_set(&qedf->link_down_tmo_valid, 0);
211 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
212 "Calling fcoe_ctlr_link_down().\n");
213 fcoe_ctlr_link_down(&qedf->ctlr);
214 if (qedf_wait_for_upload(qedf) == false)
215 QEDF_ERR(&qedf->dbg_ctx,
216 "Could not upload all sessions.\n");
217 /* Reset the number of FIP VLAN retries */
218 qedf->fipvlan_retries = qedf_fipvlan_retries;
219 }
220 }
221
222 #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1
223 #define QEDF_FCOE_MAC_METHOD_FCF_MAP 2
224 #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3
qedf_set_data_src_addr(struct qedf_ctx * qedf,struct fc_frame * fp)225 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
226 {
227 u8 *granted_mac;
228 struct fc_frame_header *fh = fc_frame_header_get(fp);
229 u8 fc_map[3];
230 int method = 0;
231
232 /* Get granted MAC address from FIP FLOGI payload */
233 granted_mac = fr_cb(fp)->granted_mac;
234
235 /*
236 * We set the source MAC for FCoE traffic based on the Granted MAC
237 * address from the switch.
238 *
239 * If granted_mac is non-zero, we used that.
240 * If the granted_mac is zeroed out, created the FCoE MAC based on
241 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
242 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
243 * d_id of the FLOGI frame.
244 */
245 if (!is_zero_ether_addr(granted_mac)) {
246 ether_addr_copy(qedf->data_src_addr, granted_mac);
247 method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
248 } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
249 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
250 qedf->data_src_addr[0] = fc_map[0];
251 qedf->data_src_addr[1] = fc_map[1];
252 qedf->data_src_addr[2] = fc_map[2];
253 qedf->data_src_addr[3] = fh->fh_d_id[0];
254 qedf->data_src_addr[4] = fh->fh_d_id[1];
255 qedf->data_src_addr[5] = fh->fh_d_id[2];
256 method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
257 } else {
258 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
259 method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
260 }
261
262 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
263 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
264 }
265
qedf_flogi_resp(struct fc_seq * seq,struct fc_frame * fp,void * arg)266 static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
267 void *arg)
268 {
269 struct fc_exch *exch = fc_seq_exch(seq);
270 struct fc_lport *lport = exch->lp;
271 struct qedf_ctx *qedf = lport_priv(lport);
272
273 if (!qedf) {
274 QEDF_ERR(NULL, "qedf is NULL.\n");
275 return;
276 }
277
278 /*
279 * If ERR_PTR is set then don't try to stat anything as it will cause
280 * a crash when we access fp.
281 */
282 if (IS_ERR(fp)) {
283 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
284 "fp has IS_ERR() set.\n");
285 goto skip_stat;
286 }
287
288 /* Log stats for FLOGI reject */
289 if (fc_frame_payload_op(fp) == ELS_LS_RJT)
290 qedf->flogi_failed++;
291 else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
292 /* Set the source MAC we will use for FCoE traffic */
293 qedf_set_data_src_addr(qedf, fp);
294 qedf->flogi_pending = 0;
295 }
296
297 /* Complete flogi_compl so we can proceed to sending ADISCs */
298 complete(&qedf->flogi_compl);
299
300 skip_stat:
301 /* Report response to libfc */
302 fc_lport_flogi_resp(seq, fp, lport);
303 }
304
qedf_elsct_send(struct fc_lport * lport,u32 did,struct fc_frame * fp,unsigned int op,void (* resp)(struct fc_seq *,struct fc_frame *,void *),void * arg,u32 timeout)305 static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
306 struct fc_frame *fp, unsigned int op,
307 void (*resp)(struct fc_seq *,
308 struct fc_frame *,
309 void *),
310 void *arg, u32 timeout)
311 {
312 struct qedf_ctx *qedf = lport_priv(lport);
313
314 /*
315 * Intercept FLOGI for statistic purposes. Note we use the resp
316 * callback to tell if this is really a flogi.
317 */
318 if (resp == fc_lport_flogi_resp) {
319 qedf->flogi_cnt++;
320 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
321 schedule_delayed_work(&qedf->stag_work, 2);
322 return NULL;
323 }
324 qedf->flogi_pending++;
325 return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
326 arg, timeout);
327 }
328
329 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
330 }
331
qedf_send_flogi(struct qedf_ctx * qedf)332 int qedf_send_flogi(struct qedf_ctx *qedf)
333 {
334 struct fc_lport *lport;
335 struct fc_frame *fp;
336
337 lport = qedf->lport;
338
339 if (!lport->tt.elsct_send) {
340 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
341 return -EINVAL;
342 }
343
344 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
345 if (!fp) {
346 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
347 return -ENOMEM;
348 }
349
350 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
351 "Sending FLOGI to reestablish session with switch.\n");
352 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
353 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
354
355 init_completion(&qedf->flogi_compl);
356
357 return 0;
358 }
359
360 /*
361 * This function is called if link_down_tmo is in use. If we get a link up and
362 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
363 * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
364 */
qedf_link_recovery(struct work_struct * work)365 static void qedf_link_recovery(struct work_struct *work)
366 {
367 struct qedf_ctx *qedf =
368 container_of(work, struct qedf_ctx, link_recovery.work);
369 struct fc_lport *lport = qedf->lport;
370 struct fc_rport_priv *rdata;
371 bool rc;
372 int retries = 30;
373 int rval, i;
374 struct list_head rdata_login_list;
375
376 INIT_LIST_HEAD(&rdata_login_list);
377
378 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
379 "Link down tmo did not expire.\n");
380
381 /*
382 * Essentially reset the fcoe_ctlr here without affecting the state
383 * of the libfc structs.
384 */
385 qedf->ctlr.state = FIP_ST_LINK_WAIT;
386 fcoe_ctlr_link_down(&qedf->ctlr);
387
388 /*
389 * Bring the link up before we send the fipvlan request so libfcoe
390 * can select a new fcf in parallel
391 */
392 fcoe_ctlr_link_up(&qedf->ctlr);
393
394 /* Since the link when down and up to verify which vlan we're on */
395 qedf->fipvlan_retries = qedf_fipvlan_retries;
396 rc = qedf_initiate_fipvlan_req(qedf);
397 /* If getting the VLAN fails, set the VLAN to the fallback one */
398 if (!rc)
399 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
400
401 /*
402 * We need to wait for an FCF to be selected due to the
403 * fcoe_ctlr_link_up other the FLOGI will be rejected.
404 */
405 while (retries > 0) {
406 if (qedf->ctlr.sel_fcf) {
407 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
408 "FCF reselected, proceeding with FLOGI.\n");
409 break;
410 }
411 msleep(500);
412 retries--;
413 }
414
415 if (retries < 1) {
416 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
417 "FCF selection.\n");
418 return;
419 }
420
421 rval = qedf_send_flogi(qedf);
422 if (rval)
423 return;
424
425 /* Wait for FLOGI completion before proceeding with sending ADISCs */
426 i = wait_for_completion_timeout(&qedf->flogi_compl,
427 qedf->lport->r_a_tov);
428 if (i == 0) {
429 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
430 return;
431 }
432
433 /*
434 * Call lport->tt.rport_login which will cause libfc to send an
435 * ADISC since the rport is in state ready.
436 */
437 mutex_lock(&lport->disc.disc_mutex);
438 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
439 if (kref_get_unless_zero(&rdata->kref)) {
440 fc_rport_login(rdata);
441 kref_put(&rdata->kref, fc_rport_destroy);
442 }
443 }
444 mutex_unlock(&lport->disc.disc_mutex);
445 }
446
qedf_update_link_speed(struct qedf_ctx * qedf,struct qed_link_output * link)447 static void qedf_update_link_speed(struct qedf_ctx *qedf,
448 struct qed_link_output *link)
449 {
450 __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
451 struct fc_lport *lport = qedf->lport;
452
453 lport->link_speed = FC_PORTSPEED_UNKNOWN;
454 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
455
456 /* Set fc_host link speed */
457 switch (link->speed) {
458 case 10000:
459 lport->link_speed = FC_PORTSPEED_10GBIT;
460 break;
461 case 25000:
462 lport->link_speed = FC_PORTSPEED_25GBIT;
463 break;
464 case 40000:
465 lport->link_speed = FC_PORTSPEED_40GBIT;
466 break;
467 case 50000:
468 lport->link_speed = FC_PORTSPEED_50GBIT;
469 break;
470 case 100000:
471 lport->link_speed = FC_PORTSPEED_100GBIT;
472 break;
473 case 20000:
474 lport->link_speed = FC_PORTSPEED_20GBIT;
475 break;
476 default:
477 lport->link_speed = FC_PORTSPEED_UNKNOWN;
478 break;
479 }
480
481 /*
482 * Set supported link speed by querying the supported
483 * capabilities of the link.
484 */
485
486 phylink_zero(sup_caps);
487 phylink_set(sup_caps, 10000baseT_Full);
488 phylink_set(sup_caps, 10000baseKX4_Full);
489 phylink_set(sup_caps, 10000baseR_FEC);
490 phylink_set(sup_caps, 10000baseCR_Full);
491 phylink_set(sup_caps, 10000baseSR_Full);
492 phylink_set(sup_caps, 10000baseLR_Full);
493 phylink_set(sup_caps, 10000baseLRM_Full);
494 phylink_set(sup_caps, 10000baseKR_Full);
495
496 if (linkmode_intersects(link->supported_caps, sup_caps))
497 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
498
499 phylink_zero(sup_caps);
500 phylink_set(sup_caps, 25000baseKR_Full);
501 phylink_set(sup_caps, 25000baseCR_Full);
502 phylink_set(sup_caps, 25000baseSR_Full);
503
504 if (linkmode_intersects(link->supported_caps, sup_caps))
505 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
506
507 phylink_zero(sup_caps);
508 phylink_set(sup_caps, 40000baseLR4_Full);
509 phylink_set(sup_caps, 40000baseKR4_Full);
510 phylink_set(sup_caps, 40000baseCR4_Full);
511 phylink_set(sup_caps, 40000baseSR4_Full);
512
513 if (linkmode_intersects(link->supported_caps, sup_caps))
514 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
515
516 phylink_zero(sup_caps);
517 phylink_set(sup_caps, 50000baseKR2_Full);
518 phylink_set(sup_caps, 50000baseCR2_Full);
519 phylink_set(sup_caps, 50000baseSR2_Full);
520
521 if (linkmode_intersects(link->supported_caps, sup_caps))
522 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
523
524 phylink_zero(sup_caps);
525 phylink_set(sup_caps, 100000baseKR4_Full);
526 phylink_set(sup_caps, 100000baseSR4_Full);
527 phylink_set(sup_caps, 100000baseCR4_Full);
528 phylink_set(sup_caps, 100000baseLR4_ER4_Full);
529
530 if (linkmode_intersects(link->supported_caps, sup_caps))
531 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
532
533 phylink_zero(sup_caps);
534 phylink_set(sup_caps, 20000baseKR2_Full);
535
536 if (linkmode_intersects(link->supported_caps, sup_caps))
537 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
538
539 if (lport->host && lport->host->shost_data)
540 fc_host_supported_speeds(lport->host) =
541 lport->link_supported_speeds;
542 }
543
qedf_bw_update(void * dev)544 static void qedf_bw_update(void *dev)
545 {
546 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
547 struct qed_link_output link;
548
549 /* Get the latest status of the link */
550 qed_ops->common->get_link(qedf->cdev, &link);
551
552 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
553 QEDF_ERR(&qedf->dbg_ctx,
554 "Ignore link update, driver getting unload.\n");
555 return;
556 }
557
558 if (link.link_up) {
559 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
560 qedf_update_link_speed(qedf, &link);
561 else
562 QEDF_ERR(&qedf->dbg_ctx,
563 "Ignore bw update, link is down.\n");
564
565 } else {
566 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
567 }
568 }
569
qedf_link_update(void * dev,struct qed_link_output * link)570 static void qedf_link_update(void *dev, struct qed_link_output *link)
571 {
572 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
573
574 /*
575 * Prevent race where we're removing the module and we get link update
576 * for qed.
577 */
578 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
579 QEDF_ERR(&qedf->dbg_ctx,
580 "Ignore link update, driver getting unload.\n");
581 return;
582 }
583
584 if (link->link_up) {
585 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
586 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
587 "Ignoring link up event as link is already up.\n");
588 return;
589 }
590 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
591 link->speed / 1000);
592
593 /* Cancel any pending link down work */
594 cancel_delayed_work(&qedf->link_update);
595
596 atomic_set(&qedf->link_state, QEDF_LINK_UP);
597 qedf_update_link_speed(qedf, link);
598
599 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
600 qedf_dcbx_no_wait) {
601 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
602 "DCBx done.\n");
603 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
604 queue_delayed_work(qedf->link_update_wq,
605 &qedf->link_recovery, 0);
606 else
607 queue_delayed_work(qedf->link_update_wq,
608 &qedf->link_update, 0);
609 atomic_set(&qedf->link_down_tmo_valid, 0);
610 }
611
612 } else {
613 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
614
615 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
616 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
617 /*
618 * Flag that we're waiting for the link to come back up before
619 * informing the fcoe layer of the event.
620 */
621 if (qedf_link_down_tmo > 0) {
622 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
623 "Starting link down tmo.\n");
624 atomic_set(&qedf->link_down_tmo_valid, 1);
625 }
626 qedf->vlan_id = 0;
627 qedf_update_link_speed(qedf, link);
628 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
629 qedf_link_down_tmo * HZ);
630 }
631 }
632
633
qedf_dcbx_handler(void * dev,struct qed_dcbx_get * get,u32 mib_type)634 static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
635 {
636 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
637 u8 tmp_prio;
638
639 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
640 "prio=%d.\n", get->operational.valid, get->operational.enabled,
641 get->operational.app_prio.fcoe);
642
643 if (get->operational.enabled && get->operational.valid) {
644 /* If DCBX was already negotiated on link up then just exit */
645 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
646 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
647 "DCBX already set on link up.\n");
648 return;
649 }
650
651 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
652
653 /*
654 * Set the 8021q priority in the following manner:
655 *
656 * 1. If a modparam is set use that
657 * 2. If the value is not between 0..7 use the default
658 * 3. Use the priority we get from the DCBX app tag
659 */
660 tmp_prio = get->operational.app_prio.fcoe;
661 if (qedf_default_prio > -1)
662 qedf->prio = qedf_default_prio;
663 else if (tmp_prio > 7) {
664 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
665 "FIP/FCoE prio %d out of range, setting to %d.\n",
666 tmp_prio, QEDF_DEFAULT_PRIO);
667 qedf->prio = QEDF_DEFAULT_PRIO;
668 } else
669 qedf->prio = tmp_prio;
670
671 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
672 !qedf_dcbx_no_wait) {
673 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
674 queue_delayed_work(qedf->link_update_wq,
675 &qedf->link_recovery, 0);
676 else
677 queue_delayed_work(qedf->link_update_wq,
678 &qedf->link_update, 0);
679 atomic_set(&qedf->link_down_tmo_valid, 0);
680 }
681 }
682
683 }
684
qedf_get_login_failures(void * cookie)685 static u32 qedf_get_login_failures(void *cookie)
686 {
687 struct qedf_ctx *qedf;
688
689 qedf = (struct qedf_ctx *)cookie;
690 return qedf->flogi_failed;
691 }
692
693 static struct qed_fcoe_cb_ops qedf_cb_ops = {
694 {
695 .link_update = qedf_link_update,
696 .bw_update = qedf_bw_update,
697 .schedule_recovery_handler = qedf_schedule_recovery_handler,
698 .dcbx_aen = qedf_dcbx_handler,
699 .get_generic_tlv_data = qedf_get_generic_tlv_data,
700 .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
701 .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
702 }
703 };
704
705 /*
706 * Various transport templates.
707 */
708
709 static struct scsi_transport_template *qedf_fc_transport_template;
710 static struct scsi_transport_template *qedf_fc_vport_transport_template;
711
712 /*
713 * SCSI EH handlers
714 */
qedf_eh_abort(struct scsi_cmnd * sc_cmd)715 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
716 {
717 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
718 struct fc_lport *lport;
719 struct qedf_ctx *qedf;
720 struct qedf_ioreq *io_req;
721 struct fc_rport_libfc_priv *rp = rport->dd_data;
722 struct fc_rport_priv *rdata;
723 struct qedf_rport *fcport = NULL;
724 int rc = FAILED;
725 int wait_count = 100;
726 int refcount = 0;
727 int rval;
728 int got_ref = 0;
729
730 lport = shost_priv(sc_cmd->device->host);
731 qedf = (struct qedf_ctx *)lport_priv(lport);
732
733 /* rport and tgt are allocated together, so tgt should be non-NULL */
734 fcport = (struct qedf_rport *)&rp[1];
735 rdata = fcport->rdata;
736 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
737 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
738 rc = SUCCESS;
739 goto out;
740 }
741
742
743 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
744 if (!io_req) {
745 QEDF_ERR(&qedf->dbg_ctx,
746 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
747 sc_cmd, sc_cmd->cmnd[0],
748 rdata->ids.port_id);
749 rc = SUCCESS;
750 goto drop_rdata_kref;
751 }
752
753 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
754 if (rval)
755 got_ref = 1;
756
757 /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
758 if (!rval || io_req->sc_cmd != sc_cmd) {
759 QEDF_ERR(&qedf->dbg_ctx,
760 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
761 io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
762
763 goto drop_rdata_kref;
764 }
765
766 if (fc_remote_port_chkready(rport)) {
767 refcount = kref_read(&io_req->refcount);
768 QEDF_ERR(&qedf->dbg_ctx,
769 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
770 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
771 refcount, rdata->ids.port_id);
772
773 goto drop_rdata_kref;
774 }
775
776 rc = fc_block_scsi_eh(sc_cmd);
777 if (rc)
778 goto drop_rdata_kref;
779
780 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
781 QEDF_ERR(&qedf->dbg_ctx,
782 "Connection uploading, xid=0x%x., port_id=%06x\n",
783 io_req->xid, rdata->ids.port_id);
784 while (io_req->sc_cmd && (wait_count != 0)) {
785 msleep(100);
786 wait_count--;
787 }
788 if (wait_count) {
789 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
790 rc = SUCCESS;
791 } else {
792 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
793 rc = FAILED;
794 }
795 goto drop_rdata_kref;
796 }
797
798 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
799 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
800 goto drop_rdata_kref;
801 }
802
803 QEDF_ERR(&qedf->dbg_ctx,
804 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
805 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
806 rdata->ids.port_id);
807
808 if (qedf->stop_io_on_error) {
809 qedf_stop_all_io(qedf);
810 rc = SUCCESS;
811 goto drop_rdata_kref;
812 }
813
814 init_completion(&io_req->abts_done);
815 rval = qedf_initiate_abts(io_req, true);
816 if (rval) {
817 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
818 /*
819 * If we fail to queue the ABTS then return this command to
820 * the SCSI layer as it will own and free the xid
821 */
822 rc = SUCCESS;
823 qedf_scsi_done(qedf, io_req, DID_ERROR);
824 goto drop_rdata_kref;
825 }
826
827 wait_for_completion(&io_req->abts_done);
828
829 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
830 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
831 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
832 /*
833 * If we get a reponse to the abort this is success from
834 * the perspective that all references to the command have
835 * been removed from the driver and firmware
836 */
837 rc = SUCCESS;
838 } else {
839 /* If the abort and cleanup failed then return a failure */
840 rc = FAILED;
841 }
842
843 if (rc == SUCCESS)
844 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
845 io_req->xid);
846 else
847 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
848 io_req->xid);
849
850 drop_rdata_kref:
851 kref_put(&rdata->kref, fc_rport_destroy);
852 out:
853 if (got_ref)
854 kref_put(&io_req->refcount, qedf_release_cmd);
855 return rc;
856 }
857
qedf_eh_target_reset(struct scsi_cmnd * sc_cmd)858 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
859 {
860 QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
861 sc_cmd->device->host->host_no, sc_cmd->device->id,
862 sc_cmd->device->lun);
863 return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
864 }
865
qedf_eh_device_reset(struct scsi_cmnd * sc_cmd)866 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
867 {
868 QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
869 sc_cmd->device->host->host_no, sc_cmd->device->id,
870 sc_cmd->device->lun);
871 return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
872 }
873
qedf_wait_for_upload(struct qedf_ctx * qedf)874 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
875 {
876 struct qedf_rport *fcport = NULL;
877 int wait_cnt = 120;
878
879 while (wait_cnt--) {
880 if (atomic_read(&qedf->num_offloads))
881 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
882 "Waiting for all uploads to complete num_offloads = 0x%x.\n",
883 atomic_read(&qedf->num_offloads));
884 else
885 return true;
886 msleep(500);
887 }
888
889 rcu_read_lock();
890 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
891 if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
892 &fcport->flags)) {
893 if (fcport->rdata)
894 QEDF_ERR(&qedf->dbg_ctx,
895 "Waiting for fcport %p portid=%06x.\n",
896 fcport, fcport->rdata->ids.port_id);
897 } else {
898 QEDF_ERR(&qedf->dbg_ctx,
899 "Waiting for fcport %p.\n", fcport);
900 }
901 }
902 rcu_read_unlock();
903 return false;
904
905 }
906
907 /* Performs soft reset of qedf_ctx by simulating a link down/up */
qedf_ctx_soft_reset(struct fc_lport * lport)908 void qedf_ctx_soft_reset(struct fc_lport *lport)
909 {
910 struct qedf_ctx *qedf;
911 struct qed_link_output if_link;
912
913 if (lport->vport) {
914 QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
915 return;
916 }
917
918 qedf = lport_priv(lport);
919
920 qedf->flogi_pending = 0;
921 /* For host reset, essentially do a soft link up/down */
922 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
923 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
924 "Queuing link down work.\n");
925 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
926 0);
927
928 if (qedf_wait_for_upload(qedf) == false) {
929 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
930 WARN_ON(atomic_read(&qedf->num_offloads));
931 }
932
933 /* Before setting link up query physical link state */
934 qed_ops->common->get_link(qedf->cdev, &if_link);
935 /* Bail if the physical link is not up */
936 if (!if_link.link_up) {
937 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
938 "Physical link is not up.\n");
939 return;
940 }
941 /* Flush and wait to make sure link down is processed */
942 flush_delayed_work(&qedf->link_update);
943 msleep(500);
944
945 atomic_set(&qedf->link_state, QEDF_LINK_UP);
946 qedf->vlan_id = 0;
947 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
948 "Queue link up work.\n");
949 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
950 0);
951 }
952
953 /* Reset the host by gracefully logging out and then logging back in */
qedf_eh_host_reset(struct scsi_cmnd * sc_cmd)954 static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
955 {
956 struct fc_lport *lport;
957 struct qedf_ctx *qedf;
958
959 lport = shost_priv(sc_cmd->device->host);
960 qedf = lport_priv(lport);
961
962 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
963 test_bit(QEDF_UNLOADING, &qedf->flags))
964 return FAILED;
965
966 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
967
968 qedf_ctx_soft_reset(lport);
969
970 return SUCCESS;
971 }
972
qedf_slave_configure(struct scsi_device * sdev)973 static int qedf_slave_configure(struct scsi_device *sdev)
974 {
975 if (qedf_queue_depth) {
976 scsi_change_queue_depth(sdev, qedf_queue_depth);
977 }
978
979 return 0;
980 }
981
982 static struct scsi_host_template qedf_host_template = {
983 .module = THIS_MODULE,
984 .name = QEDF_MODULE_NAME,
985 .this_id = -1,
986 .cmd_per_lun = 32,
987 .max_sectors = 0xffff,
988 .queuecommand = qedf_queuecommand,
989 .shost_attrs = qedf_host_attrs,
990 .eh_abort_handler = qedf_eh_abort,
991 .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
992 .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
993 .eh_host_reset_handler = qedf_eh_host_reset,
994 .slave_configure = qedf_slave_configure,
995 .dma_boundary = QED_HW_DMA_BOUNDARY,
996 .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
997 .can_queue = FCOE_PARAMS_NUM_TASKS,
998 .change_queue_depth = scsi_change_queue_depth,
999 };
1000
qedf_get_paged_crc_eof(struct sk_buff * skb,int tlen)1001 static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1002 {
1003 int rc;
1004
1005 spin_lock(&qedf_global_lock);
1006 rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
1007 spin_unlock(&qedf_global_lock);
1008
1009 return rc;
1010 }
1011
qedf_fcport_lookup(struct qedf_ctx * qedf,u32 port_id)1012 static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1013 {
1014 struct qedf_rport *fcport;
1015 struct fc_rport_priv *rdata;
1016
1017 rcu_read_lock();
1018 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1019 rdata = fcport->rdata;
1020 if (rdata == NULL)
1021 continue;
1022 if (rdata->ids.port_id == port_id) {
1023 rcu_read_unlock();
1024 return fcport;
1025 }
1026 }
1027 rcu_read_unlock();
1028
1029 /* Return NULL to caller to let them know fcport was not found */
1030 return NULL;
1031 }
1032
1033 /* Transmits an ELS frame over an offloaded session */
qedf_xmit_l2_frame(struct qedf_rport * fcport,struct fc_frame * fp)1034 static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
1035 {
1036 struct fc_frame_header *fh;
1037 int rc = 0;
1038
1039 fh = fc_frame_header_get(fp);
1040 if ((fh->fh_type == FC_TYPE_ELS) &&
1041 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1042 switch (fc_frame_payload_op(fp)) {
1043 case ELS_ADISC:
1044 qedf_send_adisc(fcport, fp);
1045 rc = 1;
1046 break;
1047 }
1048 }
1049
1050 return rc;
1051 }
1052
1053 /*
1054 * qedf_xmit - qedf FCoE frame transmit function
1055 */
qedf_xmit(struct fc_lport * lport,struct fc_frame * fp)1056 static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
1057 {
1058 struct fc_lport *base_lport;
1059 struct qedf_ctx *qedf;
1060 struct ethhdr *eh;
1061 struct fcoe_crc_eof *cp;
1062 struct sk_buff *skb;
1063 struct fc_frame_header *fh;
1064 struct fcoe_hdr *hp;
1065 u8 sof, eof;
1066 u32 crc;
1067 unsigned int hlen, tlen, elen;
1068 int wlen;
1069 struct fc_stats *stats;
1070 struct fc_lport *tmp_lport;
1071 struct fc_lport *vn_port = NULL;
1072 struct qedf_rport *fcport;
1073 int rc;
1074 u16 vlan_tci = 0;
1075
1076 qedf = (struct qedf_ctx *)lport_priv(lport);
1077
1078 fh = fc_frame_header_get(fp);
1079 skb = fp_skb(fp);
1080
1081 /* Filter out traffic to other NPIV ports on the same host */
1082 if (lport->vport)
1083 base_lport = shost_priv(vport_to_shost(lport->vport));
1084 else
1085 base_lport = lport;
1086
1087 /* Flag if the destination is the base port */
1088 if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
1089 vn_port = base_lport;
1090 } else {
1091 /* Got through the list of vports attached to the base_lport
1092 * and see if we have a match with the destination address.
1093 */
1094 list_for_each_entry(tmp_lport, &base_lport->vports, list) {
1095 if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
1096 vn_port = tmp_lport;
1097 break;
1098 }
1099 }
1100 }
1101 if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
1102 struct fc_rport_priv *rdata = NULL;
1103
1104 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1105 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
1106 kfree_skb(skb);
1107 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
1108 if (rdata) {
1109 rdata->retries = lport->max_rport_retry_count;
1110 kref_put(&rdata->kref, fc_rport_destroy);
1111 }
1112 return -EINVAL;
1113 }
1114 /* End NPIV filtering */
1115
1116 if (!qedf->ctlr.sel_fcf) {
1117 kfree_skb(skb);
1118 return 0;
1119 }
1120
1121 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1122 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1123 kfree_skb(skb);
1124 return 0;
1125 }
1126
1127 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1128 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1129 kfree_skb(skb);
1130 return 0;
1131 }
1132
1133 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
1134 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1135 return 0;
1136 }
1137
1138 /* Check to see if this needs to be sent on an offloaded session */
1139 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
1140
1141 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1142 rc = qedf_xmit_l2_frame(fcport, fp);
1143 /*
1144 * If the frame was successfully sent over the middle path
1145 * then do not try to also send it over the LL2 path
1146 */
1147 if (rc)
1148 return 0;
1149 }
1150
1151 sof = fr_sof(fp);
1152 eof = fr_eof(fp);
1153
1154 elen = sizeof(struct ethhdr);
1155 hlen = sizeof(struct fcoe_hdr);
1156 tlen = sizeof(struct fcoe_crc_eof);
1157 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1158
1159 skb->ip_summed = CHECKSUM_NONE;
1160 crc = fcoe_fc_crc(fp);
1161
1162 /* copy port crc and eof to the skb buff */
1163 if (skb_is_nonlinear(skb)) {
1164 skb_frag_t *frag;
1165
1166 if (qedf_get_paged_crc_eof(skb, tlen)) {
1167 kfree_skb(skb);
1168 return -ENOMEM;
1169 }
1170 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1171 cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
1172 } else {
1173 cp = skb_put(skb, tlen);
1174 }
1175
1176 memset(cp, 0, sizeof(*cp));
1177 cp->fcoe_eof = eof;
1178 cp->fcoe_crc32 = cpu_to_le32(~crc);
1179 if (skb_is_nonlinear(skb)) {
1180 kunmap_atomic(cp);
1181 cp = NULL;
1182 }
1183
1184
1185 /* adjust skb network/transport offsets to match mac/fcoe/port */
1186 skb_push(skb, elen + hlen);
1187 skb_reset_mac_header(skb);
1188 skb_reset_network_header(skb);
1189 skb->mac_len = elen;
1190 skb->protocol = htons(ETH_P_FCOE);
1191
1192 /*
1193 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
1194 * for FIP/FCoE traffic.
1195 */
1196 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1197
1198 /* fill up mac and fcoe headers */
1199 eh = eth_hdr(skb);
1200 eh->h_proto = htons(ETH_P_FCOE);
1201 if (qedf->ctlr.map_dest)
1202 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1203 else
1204 /* insert GW address */
1205 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1206
1207 /* Set the source MAC address */
1208 ether_addr_copy(eh->h_source, qedf->data_src_addr);
1209
1210 hp = (struct fcoe_hdr *)(eh + 1);
1211 memset(hp, 0, sizeof(*hp));
1212 if (FC_FCOE_VER)
1213 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1214 hp->fcoe_sof = sof;
1215
1216 /*update tx stats */
1217 stats = per_cpu_ptr(lport->stats, get_cpu());
1218 stats->TxFrames++;
1219 stats->TxWords += wlen;
1220 put_cpu();
1221
1222 /* Get VLAN ID from skb for printing purposes */
1223 __vlan_hwaccel_get_tag(skb, &vlan_tci);
1224
1225 /* send down to lld */
1226 fr_dev(fp) = lport;
1227 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1228 "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
1229 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
1230 vlan_tci);
1231 if (qedf_dump_frames)
1232 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1233 1, skb->data, skb->len, false);
1234 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1235 if (rc) {
1236 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1237 kfree_skb(skb);
1238 return rc;
1239 }
1240
1241 return 0;
1242 }
1243
qedf_alloc_sq(struct qedf_ctx * qedf,struct qedf_rport * fcport)1244 static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1245 {
1246 int rval = 0;
1247 u32 *pbl;
1248 dma_addr_t page;
1249 int num_pages;
1250
1251 /* Calculate appropriate queue and PBL sizes */
1252 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
1253 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
1254 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
1255 sizeof(void *);
1256 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1257
1258 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1259 &fcport->sq_dma, GFP_KERNEL);
1260 if (!fcport->sq) {
1261 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1262 rval = 1;
1263 goto out;
1264 }
1265
1266 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1267 fcport->sq_pbl_size,
1268 &fcport->sq_pbl_dma, GFP_KERNEL);
1269 if (!fcport->sq_pbl) {
1270 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1271 rval = 1;
1272 goto out_free_sq;
1273 }
1274
1275 /* Create PBL */
1276 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
1277 page = fcport->sq_dma;
1278 pbl = (u32 *)fcport->sq_pbl;
1279
1280 while (num_pages--) {
1281 *pbl = U64_LO(page);
1282 pbl++;
1283 *pbl = U64_HI(page);
1284 pbl++;
1285 page += QEDF_PAGE_SIZE;
1286 }
1287
1288 return rval;
1289
1290 out_free_sq:
1291 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1292 fcport->sq_dma);
1293 out:
1294 return rval;
1295 }
1296
qedf_free_sq(struct qedf_ctx * qedf,struct qedf_rport * fcport)1297 static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1298 {
1299 if (fcport->sq_pbl)
1300 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1301 fcport->sq_pbl, fcport->sq_pbl_dma);
1302 if (fcport->sq)
1303 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1304 fcport->sq, fcport->sq_dma);
1305 }
1306
qedf_offload_connection(struct qedf_ctx * qedf,struct qedf_rport * fcport)1307 static int qedf_offload_connection(struct qedf_ctx *qedf,
1308 struct qedf_rport *fcport)
1309 {
1310 struct qed_fcoe_params_offload conn_info;
1311 u32 port_id;
1312 int rval;
1313 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
1314
1315 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1316 "portid=%06x.\n", fcport->rdata->ids.port_id);
1317 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1318 &fcport->fw_cid, &fcport->p_doorbell);
1319 if (rval) {
1320 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1321 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1322 rval = 1; /* For some reason qed returns 0 on failure here */
1323 goto out;
1324 }
1325
1326 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1327 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
1328 fcport->fw_cid, fcport->handle);
1329
1330 memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
1331
1332 /* Fill in the offload connection info */
1333 conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
1334
1335 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
1336 conn_info.sq_next_page_addr =
1337 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
1338
1339 /* Need to use our FCoE MAC for the offload session */
1340 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
1341
1342 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1343
1344 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1345 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1346 conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
1347 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
1348
1349 /* Set VLAN data */
1350 conn_info.vlan_tag = qedf->vlan_id <<
1351 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
1352 conn_info.vlan_tag |=
1353 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1354 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
1355 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
1356
1357 /* Set host port source id */
1358 port_id = fc_host_port_id(qedf->lport->host);
1359 fcport->sid = port_id;
1360 conn_info.s_id.addr_hi = (port_id & 0x000000FF);
1361 conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1362 conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1363
1364 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
1365
1366 /* Set remote port destination id */
1367 port_id = fcport->rdata->rport->port_id;
1368 conn_info.d_id.addr_hi = (port_id & 0x000000FF);
1369 conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
1370 conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
1371
1372 conn_info.def_q_idx = 0; /* Default index for send queue? */
1373
1374 /* Set FC-TAPE specific flags if needed */
1375 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1376 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1377 "Enable CONF, REC for portid=%06x.\n",
1378 fcport->rdata->ids.port_id);
1379 conn_info.flags |= 1 <<
1380 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
1381 conn_info.flags |=
1382 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
1383 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
1384 }
1385
1386 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1387 if (rval) {
1388 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1389 "for portid=%06x.\n", fcport->rdata->ids.port_id);
1390 goto out_free_conn;
1391 } else
1392 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1393 "succeeded portid=%06x total_sqe=%d.\n",
1394 fcport->rdata->ids.port_id, total_sqe);
1395
1396 spin_lock_init(&fcport->rport_lock);
1397 atomic_set(&fcport->free_sqes, total_sqe);
1398 return 0;
1399 out_free_conn:
1400 qed_ops->release_conn(qedf->cdev, fcport->handle);
1401 out:
1402 return rval;
1403 }
1404
1405 #define QEDF_TERM_BUFF_SIZE 10
qedf_upload_connection(struct qedf_ctx * qedf,struct qedf_rport * fcport)1406 static void qedf_upload_connection(struct qedf_ctx *qedf,
1407 struct qedf_rport *fcport)
1408 {
1409 void *term_params;
1410 dma_addr_t term_params_dma;
1411
1412 /* Term params needs to be a DMA coherent buffer as qed shared the
1413 * physical DMA address with the firmware. The buffer may be used in
1414 * the receive path so we may eventually have to move this.
1415 */
1416 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1417 &term_params_dma, GFP_KERNEL);
1418
1419 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1420 "port_id=%06x.\n", fcport->rdata->ids.port_id);
1421
1422 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1423 qed_ops->release_conn(qedf->cdev, fcport->handle);
1424
1425 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1426 term_params_dma);
1427 }
1428
qedf_cleanup_fcport(struct qedf_ctx * qedf,struct qedf_rport * fcport)1429 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1430 struct qedf_rport *fcport)
1431 {
1432 struct fc_rport_priv *rdata = fcport->rdata;
1433
1434 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1435 fcport->rdata->ids.port_id);
1436
1437 /* Flush any remaining i/o's before we upload the connection */
1438 qedf_flush_active_ios(fcport, -1);
1439
1440 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
1441 qedf_upload_connection(qedf, fcport);
1442 qedf_free_sq(qedf, fcport);
1443 fcport->rdata = NULL;
1444 fcport->qedf = NULL;
1445 kref_put(&rdata->kref, fc_rport_destroy);
1446 }
1447
1448 /*
1449 * This event_callback is called after successful completion of libfc
1450 * initiated target login. qedf can proceed with initiating the session
1451 * establishment.
1452 */
qedf_rport_event_handler(struct fc_lport * lport,struct fc_rport_priv * rdata,enum fc_rport_event event)1453 static void qedf_rport_event_handler(struct fc_lport *lport,
1454 struct fc_rport_priv *rdata,
1455 enum fc_rport_event event)
1456 {
1457 struct qedf_ctx *qedf = lport_priv(lport);
1458 struct fc_rport *rport = rdata->rport;
1459 struct fc_rport_libfc_priv *rp;
1460 struct qedf_rport *fcport;
1461 u32 port_id;
1462 int rval;
1463 unsigned long flags;
1464
1465 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1466 "port_id = 0x%x\n", event, rdata->ids.port_id);
1467
1468 switch (event) {
1469 case RPORT_EV_READY:
1470 if (!rport) {
1471 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1472 break;
1473 }
1474
1475 rp = rport->dd_data;
1476 fcport = (struct qedf_rport *)&rp[1];
1477 fcport->qedf = qedf;
1478
1479 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1480 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1481 "portid=0x%x as max number of offloaded sessions "
1482 "reached.\n", rdata->ids.port_id);
1483 return;
1484 }
1485
1486 /*
1487 * Don't try to offload the session again. Can happen when we
1488 * get an ADISC
1489 */
1490 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1491 QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1492 "offloaded, portid=0x%x.\n",
1493 rdata->ids.port_id);
1494 return;
1495 }
1496
1497 if (rport->port_id == FC_FID_DIR_SERV) {
1498 /*
1499 * qedf_rport structure doesn't exist for
1500 * directory server.
1501 * We should not come here, as lport will
1502 * take care of fabric login
1503 */
1504 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1505 "exist for dir server port_id=%x\n",
1506 rdata->ids.port_id);
1507 break;
1508 }
1509
1510 if (rdata->spp_type != FC_TYPE_FCP) {
1511 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1512 "Not offloading since spp type isn't FCP\n");
1513 break;
1514 }
1515 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1516 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1517 "Not FCP target so not offloading\n");
1518 break;
1519 }
1520
1521 /* Initial reference held on entry, so this can't fail */
1522 kref_get(&rdata->kref);
1523 fcport->rdata = rdata;
1524 fcport->rport = rport;
1525
1526 rval = qedf_alloc_sq(qedf, fcport);
1527 if (rval) {
1528 qedf_cleanup_fcport(qedf, fcport);
1529 break;
1530 }
1531
1532 /* Set device type */
1533 if (rdata->flags & FC_RP_FLAGS_RETRY &&
1534 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
1535 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
1536 fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
1537 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1538 "portid=%06x is a TAPE device.\n",
1539 rdata->ids.port_id);
1540 } else {
1541 fcport->dev_type = QEDF_RPORT_TYPE_DISK;
1542 }
1543
1544 rval = qedf_offload_connection(qedf, fcport);
1545 if (rval) {
1546 qedf_cleanup_fcport(qedf, fcport);
1547 break;
1548 }
1549
1550 /* Add fcport to list of qedf_ctx list of offloaded ports */
1551 spin_lock_irqsave(&qedf->hba_lock, flags);
1552 list_add_rcu(&fcport->peers, &qedf->fcports);
1553 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1554
1555 /*
1556 * Set the session ready bit to let everyone know that this
1557 * connection is ready for I/O
1558 */
1559 set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
1560 atomic_inc(&qedf->num_offloads);
1561
1562 break;
1563 case RPORT_EV_LOGO:
1564 case RPORT_EV_FAILED:
1565 case RPORT_EV_STOP:
1566 port_id = rdata->ids.port_id;
1567 if (port_id == FC_FID_DIR_SERV)
1568 break;
1569
1570 if (rdata->spp_type != FC_TYPE_FCP) {
1571 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1572 "No action since spp type isn't FCP\n");
1573 break;
1574 }
1575 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1576 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1577 "Not FCP target so no action\n");
1578 break;
1579 }
1580
1581 if (!rport) {
1582 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1583 "port_id=%x - rport notcreated Yet!!\n", port_id);
1584 break;
1585 }
1586 rp = rport->dd_data;
1587 /*
1588 * Perform session upload. Note that rdata->peers is already
1589 * removed from disc->rports list before we get this event.
1590 */
1591 fcport = (struct qedf_rport *)&rp[1];
1592
1593 spin_lock_irqsave(&fcport->rport_lock, flags);
1594 /* Only free this fcport if it is offloaded already */
1595 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1596 !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1597 &fcport->flags)) {
1598 set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1599 &fcport->flags);
1600 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1601 qedf_cleanup_fcport(qedf, fcport);
1602 /*
1603 * Remove fcport to list of qedf_ctx list of offloaded
1604 * ports
1605 */
1606 spin_lock_irqsave(&qedf->hba_lock, flags);
1607 list_del_rcu(&fcport->peers);
1608 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1609
1610 clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1611 &fcport->flags);
1612 atomic_dec(&qedf->num_offloads);
1613 } else {
1614 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1615 }
1616 break;
1617
1618 case RPORT_EV_NONE:
1619 break;
1620 }
1621 }
1622
qedf_abort_io(struct fc_lport * lport)1623 static void qedf_abort_io(struct fc_lport *lport)
1624 {
1625 /* NO-OP but need to fill in the template */
1626 }
1627
qedf_fcp_cleanup(struct fc_lport * lport)1628 static void qedf_fcp_cleanup(struct fc_lport *lport)
1629 {
1630 /*
1631 * NO-OP but need to fill in template to prevent a NULL
1632 * function pointer dereference during link down. I/Os
1633 * will be flushed when port is uploaded.
1634 */
1635 }
1636
1637 static struct libfc_function_template qedf_lport_template = {
1638 .frame_send = qedf_xmit,
1639 .fcp_abort_io = qedf_abort_io,
1640 .fcp_cleanup = qedf_fcp_cleanup,
1641 .rport_event_callback = qedf_rport_event_handler,
1642 .elsct_send = qedf_elsct_send,
1643 };
1644
qedf_fcoe_ctlr_setup(struct qedf_ctx * qedf)1645 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1646 {
1647 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1648
1649 qedf->ctlr.send = qedf_fip_send;
1650 qedf->ctlr.get_src_addr = qedf_get_src_mac;
1651 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1652 }
1653
qedf_setup_fdmi(struct qedf_ctx * qedf)1654 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1655 {
1656 struct fc_lport *lport = qedf->lport;
1657 u8 buf[8];
1658 int pos;
1659 uint32_t i;
1660
1661 /*
1662 * fdmi_enabled needs to be set for libfc
1663 * to execute FDMI registration
1664 */
1665 lport->fdmi_enabled = 1;
1666
1667 /*
1668 * Setup the necessary fc_host attributes to that will be used to fill
1669 * in the FDMI information.
1670 */
1671
1672 /* Get the PCI-e Device Serial Number Capability */
1673 pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1674 if (pos) {
1675 pos += 4;
1676 for (i = 0; i < 8; i++)
1677 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1678
1679 snprintf(fc_host_serial_number(lport->host),
1680 FC_SERIAL_NUMBER_SIZE,
1681 "%02X%02X%02X%02X%02X%02X%02X%02X",
1682 buf[7], buf[6], buf[5], buf[4],
1683 buf[3], buf[2], buf[1], buf[0]);
1684 } else
1685 snprintf(fc_host_serial_number(lport->host),
1686 FC_SERIAL_NUMBER_SIZE, "Unknown");
1687
1688 snprintf(fc_host_manufacturer(lport->host),
1689 FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
1690
1691 if (qedf->pdev->device == QL45xxx) {
1692 snprintf(fc_host_model(lport->host),
1693 FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
1694
1695 snprintf(fc_host_model_description(lport->host),
1696 FC_SYMBOLIC_NAME_SIZE, "%s",
1697 "Marvell FastLinQ QL45xxx FCoE Adapter");
1698 }
1699
1700 if (qedf->pdev->device == QL41xxx) {
1701 snprintf(fc_host_model(lport->host),
1702 FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
1703
1704 snprintf(fc_host_model_description(lport->host),
1705 FC_SYMBOLIC_NAME_SIZE, "%s",
1706 "Marvell FastLinQ QL41xxx FCoE Adapter");
1707 }
1708
1709 snprintf(fc_host_hardware_version(lport->host),
1710 FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1711
1712 snprintf(fc_host_driver_version(lport->host),
1713 FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
1714
1715 snprintf(fc_host_firmware_version(lport->host),
1716 FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
1717 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1718 FW_ENGINEERING_VERSION);
1719
1720 }
1721
qedf_lport_setup(struct qedf_ctx * qedf)1722 static int qedf_lport_setup(struct qedf_ctx *qedf)
1723 {
1724 struct fc_lport *lport = qedf->lport;
1725
1726 lport->link_up = 0;
1727 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1728 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1729 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1730 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1731 lport->boot_time = jiffies;
1732 lport->e_d_tov = 2 * 1000;
1733 lport->r_a_tov = 10 * 1000;
1734
1735 /* Set NPIV support */
1736 lport->does_npiv = 1;
1737 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
1738
1739 fc_set_wwnn(lport, qedf->wwnn);
1740 fc_set_wwpn(lport, qedf->wwpn);
1741
1742 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1743 QEDF_ERR(&qedf->dbg_ctx,
1744 "fcoe_libfc_config failed.\n");
1745 return -ENOMEM;
1746 }
1747
1748 /* Allocate the exchange manager */
1749 fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1750 0xfffe, NULL);
1751
1752 if (fc_lport_init_stats(lport))
1753 return -ENOMEM;
1754
1755 /* Finish lport config */
1756 fc_lport_config(lport);
1757
1758 /* Set max frame size */
1759 fc_set_mfs(lport, QEDF_MFS);
1760 fc_host_maxframe_size(lport->host) = lport->mfs;
1761
1762 /* Set default dev_loss_tmo based on module parameter */
1763 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
1764
1765 /* Set symbolic node name */
1766 if (qedf->pdev->device == QL45xxx)
1767 snprintf(fc_host_symbolic_name(lport->host), 256,
1768 "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1769
1770 if (qedf->pdev->device == QL41xxx)
1771 snprintf(fc_host_symbolic_name(lport->host), 256,
1772 "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1773
1774 qedf_setup_fdmi(qedf);
1775
1776 return 0;
1777 }
1778
1779 /*
1780 * NPIV functions
1781 */
1782
qedf_vport_libfc_config(struct fc_vport * vport,struct fc_lport * lport)1783 static int qedf_vport_libfc_config(struct fc_vport *vport,
1784 struct fc_lport *lport)
1785 {
1786 lport->link_up = 0;
1787 lport->qfull = 0;
1788 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
1789 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
1790 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
1791 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
1792 lport->boot_time = jiffies;
1793 lport->e_d_tov = 2 * 1000;
1794 lport->r_a_tov = 10 * 1000;
1795 lport->does_npiv = 1; /* Temporary until we add NPIV support */
1796
1797 /* Allocate stats for vport */
1798 if (fc_lport_init_stats(lport))
1799 return -ENOMEM;
1800
1801 /* Finish lport config */
1802 fc_lport_config(lport);
1803
1804 /* offload related configuration */
1805 lport->crc_offload = 0;
1806 lport->seq_offload = 0;
1807 lport->lro_enabled = 0;
1808 lport->lro_xid = 0;
1809 lport->lso_max = 0;
1810
1811 return 0;
1812 }
1813
qedf_vport_create(struct fc_vport * vport,bool disabled)1814 static int qedf_vport_create(struct fc_vport *vport, bool disabled)
1815 {
1816 struct Scsi_Host *shost = vport_to_shost(vport);
1817 struct fc_lport *n_port = shost_priv(shost);
1818 struct fc_lport *vn_port;
1819 struct qedf_ctx *base_qedf = lport_priv(n_port);
1820 struct qedf_ctx *vport_qedf;
1821
1822 char buf[32];
1823 int rc = 0;
1824
1825 rc = fcoe_validate_vport_create(vport);
1826 if (rc) {
1827 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1828 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
1829 "WWPN (0x%s) already exists.\n", buf);
1830 return rc;
1831 }
1832
1833 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
1834 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
1835 "because link is not up.\n");
1836 return -EIO;
1837 }
1838
1839 vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
1840 if (!vn_port) {
1841 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
1842 "for vport.\n");
1843 return -ENOMEM;
1844 }
1845
1846 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1847 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
1848 buf);
1849
1850 /* Copy some fields from base_qedf */
1851 vport_qedf = lport_priv(vn_port);
1852 memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
1853
1854 /* Set qedf data specific to this vport */
1855 vport_qedf->lport = vn_port;
1856 /* Use same hba_lock as base_qedf */
1857 vport_qedf->hba_lock = base_qedf->hba_lock;
1858 vport_qedf->pdev = base_qedf->pdev;
1859 vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
1860 init_completion(&vport_qedf->flogi_compl);
1861 INIT_LIST_HEAD(&vport_qedf->fcports);
1862 INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
1863
1864 rc = qedf_vport_libfc_config(vport, vn_port);
1865 if (rc) {
1866 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
1867 "for lport stats.\n");
1868 goto err;
1869 }
1870
1871 fc_set_wwnn(vn_port, vport->node_name);
1872 fc_set_wwpn(vn_port, vport->port_name);
1873 vport_qedf->wwnn = vn_port->wwnn;
1874 vport_qedf->wwpn = vn_port->wwpn;
1875
1876 vn_port->host->transportt = qedf_fc_vport_transport_template;
1877 vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
1878 vn_port->host->max_lun = qedf_max_lun;
1879 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
1880 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
1881
1882 rc = scsi_add_host(vn_port->host, &vport->dev);
1883 if (rc) {
1884 QEDF_WARN(&base_qedf->dbg_ctx,
1885 "Error adding Scsi_Host rc=0x%x.\n", rc);
1886 goto err;
1887 }
1888
1889 /* Set default dev_loss_tmo based on module parameter */
1890 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
1891
1892 /* Init libfc stuffs */
1893 memcpy(&vn_port->tt, &qedf_lport_template,
1894 sizeof(qedf_lport_template));
1895 fc_exch_init(vn_port);
1896 fc_elsct_init(vn_port);
1897 fc_lport_init(vn_port);
1898 fc_disc_init(vn_port);
1899 fc_disc_config(vn_port, vn_port);
1900
1901
1902 /* Allocate the exchange manager */
1903 shost = vport_to_shost(vport);
1904 n_port = shost_priv(shost);
1905 fc_exch_mgr_list_clone(n_port, vn_port);
1906
1907 /* Set max frame size */
1908 fc_set_mfs(vn_port, QEDF_MFS);
1909
1910 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
1911
1912 if (disabled) {
1913 fc_vport_set_state(vport, FC_VPORT_DISABLED);
1914 } else {
1915 vn_port->boot_time = jiffies;
1916 fc_fabric_login(vn_port);
1917 fc_vport_setlink(vn_port);
1918 }
1919
1920 /* Set symbolic node name */
1921 if (base_qedf->pdev->device == QL45xxx)
1922 snprintf(fc_host_symbolic_name(vn_port->host), 256,
1923 "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1924
1925 if (base_qedf->pdev->device == QL41xxx)
1926 snprintf(fc_host_symbolic_name(vn_port->host), 256,
1927 "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1928
1929 /* Set supported speed */
1930 fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
1931
1932 /* Set speed */
1933 vn_port->link_speed = n_port->link_speed;
1934
1935 /* Set port type */
1936 fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
1937
1938 /* Set maxframe size */
1939 fc_host_maxframe_size(vn_port->host) = n_port->mfs;
1940
1941 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
1942 vn_port);
1943
1944 /* Set up debug context for vport */
1945 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
1946 vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
1947
1948 return 0;
1949
1950 err:
1951 scsi_host_put(vn_port->host);
1952 return rc;
1953 }
1954
qedf_vport_destroy(struct fc_vport * vport)1955 static int qedf_vport_destroy(struct fc_vport *vport)
1956 {
1957 struct Scsi_Host *shost = vport_to_shost(vport);
1958 struct fc_lport *n_port = shost_priv(shost);
1959 struct fc_lport *vn_port = vport->dd_data;
1960 struct qedf_ctx *qedf = lport_priv(vn_port);
1961
1962 if (!qedf) {
1963 QEDF_ERR(NULL, "qedf is NULL.\n");
1964 goto out;
1965 }
1966
1967 /* Set unloading bit on vport qedf_ctx to prevent more I/O */
1968 set_bit(QEDF_UNLOADING, &qedf->flags);
1969
1970 mutex_lock(&n_port->lp_mutex);
1971 list_del(&vn_port->list);
1972 mutex_unlock(&n_port->lp_mutex);
1973
1974 fc_fabric_logoff(vn_port);
1975 fc_lport_destroy(vn_port);
1976
1977 /* Detach from scsi-ml */
1978 fc_remove_host(vn_port->host);
1979 scsi_remove_host(vn_port->host);
1980
1981 /*
1982 * Only try to release the exchange manager if the vn_port
1983 * configuration is complete.
1984 */
1985 if (vn_port->state == LPORT_ST_READY)
1986 fc_exch_mgr_free(vn_port);
1987
1988 /* Free memory used by statistical counters */
1989 fc_lport_free_stats(vn_port);
1990
1991 /* Release Scsi_Host */
1992 scsi_host_put(vn_port->host);
1993
1994 out:
1995 return 0;
1996 }
1997
qedf_vport_disable(struct fc_vport * vport,bool disable)1998 static int qedf_vport_disable(struct fc_vport *vport, bool disable)
1999 {
2000 struct fc_lport *lport = vport->dd_data;
2001
2002 if (disable) {
2003 fc_vport_set_state(vport, FC_VPORT_DISABLED);
2004 fc_fabric_logoff(lport);
2005 } else {
2006 lport->boot_time = jiffies;
2007 fc_fabric_login(lport);
2008 fc_vport_setlink(lport);
2009 }
2010 return 0;
2011 }
2012
2013 /*
2014 * During removal we need to wait for all the vports associated with a port
2015 * to be destroyed so we avoid a race condition where libfc is still trying
2016 * to reap vports while the driver remove function has already reaped the
2017 * driver contexts associated with the physical port.
2018 */
qedf_wait_for_vport_destroy(struct qedf_ctx * qedf)2019 static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2020 {
2021 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2022
2023 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2024 "Entered.\n");
2025 while (fc_host->npiv_vports_inuse > 0) {
2026 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2027 "Waiting for all vports to be reaped.\n");
2028 msleep(1000);
2029 }
2030 }
2031
2032 /**
2033 * qedf_fcoe_reset - Resets the fcoe
2034 *
2035 * @shost: shost the reset is from
2036 *
2037 * Returns: always 0
2038 */
qedf_fcoe_reset(struct Scsi_Host * shost)2039 static int qedf_fcoe_reset(struct Scsi_Host *shost)
2040 {
2041 struct fc_lport *lport = shost_priv(shost);
2042
2043 qedf_ctx_soft_reset(lport);
2044 return 0;
2045 }
2046
qedf_get_host_port_id(struct Scsi_Host * shost)2047 static void qedf_get_host_port_id(struct Scsi_Host *shost)
2048 {
2049 struct fc_lport *lport = shost_priv(shost);
2050
2051 fc_host_port_id(shost) = lport->port_id;
2052 }
2053
qedf_fc_get_host_stats(struct Scsi_Host * shost)2054 static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
2055 *shost)
2056 {
2057 struct fc_host_statistics *qedf_stats;
2058 struct fc_lport *lport = shost_priv(shost);
2059 struct qedf_ctx *qedf = lport_priv(lport);
2060 struct qed_fcoe_stats *fw_fcoe_stats;
2061
2062 qedf_stats = fc_get_host_stats(shost);
2063
2064 /* We don't collect offload stats for specific NPIV ports */
2065 if (lport->vport)
2066 goto out;
2067
2068 fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
2069 if (!fw_fcoe_stats) {
2070 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2071 "fw_fcoe_stats.\n");
2072 goto out;
2073 }
2074
2075 mutex_lock(&qedf->stats_mutex);
2076
2077 /* Query firmware for offload stats */
2078 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2079
2080 /*
2081 * The expectation is that we add our offload stats to the stats
2082 * being maintained by libfc each time the fc_get_host_status callback
2083 * is invoked. The additions are not carried over for each call to
2084 * the fc_get_host_stats callback.
2085 */
2086 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
2087 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
2088 fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
2089 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
2090 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
2091 fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
2092 qedf_stats->fcp_input_megabytes +=
2093 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
2094 qedf_stats->fcp_output_megabytes +=
2095 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
2096 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
2097 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
2098 qedf_stats->invalid_crc_count +=
2099 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
2100 qedf_stats->dumped_frames =
2101 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2102 qedf_stats->error_frames +=
2103 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
2104 qedf_stats->fcp_input_requests += qedf->input_requests;
2105 qedf_stats->fcp_output_requests += qedf->output_requests;
2106 qedf_stats->fcp_control_requests += qedf->control_requests;
2107 qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2108 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2109
2110 mutex_unlock(&qedf->stats_mutex);
2111 kfree(fw_fcoe_stats);
2112 out:
2113 return qedf_stats;
2114 }
2115
2116 static struct fc_function_template qedf_fc_transport_fn = {
2117 .show_host_node_name = 1,
2118 .show_host_port_name = 1,
2119 .show_host_supported_classes = 1,
2120 .show_host_supported_fc4s = 1,
2121 .show_host_active_fc4s = 1,
2122 .show_host_maxframe_size = 1,
2123
2124 .get_host_port_id = qedf_get_host_port_id,
2125 .show_host_port_id = 1,
2126 .show_host_supported_speeds = 1,
2127 .get_host_speed = fc_get_host_speed,
2128 .show_host_speed = 1,
2129 .show_host_port_type = 1,
2130 .get_host_port_state = fc_get_host_port_state,
2131 .show_host_port_state = 1,
2132 .show_host_symbolic_name = 1,
2133
2134 /*
2135 * Tell FC transport to allocate enough space to store the backpointer
2136 * for the associate qedf_rport struct.
2137 */
2138 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2139 sizeof(struct qedf_rport)),
2140 .show_rport_maxframe_size = 1,
2141 .show_rport_supported_classes = 1,
2142 .show_host_fabric_name = 1,
2143 .show_starget_node_name = 1,
2144 .show_starget_port_name = 1,
2145 .show_starget_port_id = 1,
2146 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2147 .show_rport_dev_loss_tmo = 1,
2148 .get_fc_host_stats = qedf_fc_get_host_stats,
2149 .issue_fc_host_lip = qedf_fcoe_reset,
2150 .vport_create = qedf_vport_create,
2151 .vport_delete = qedf_vport_destroy,
2152 .vport_disable = qedf_vport_disable,
2153 .bsg_request = fc_lport_bsg_request,
2154 };
2155
2156 static struct fc_function_template qedf_fc_vport_transport_fn = {
2157 .show_host_node_name = 1,
2158 .show_host_port_name = 1,
2159 .show_host_supported_classes = 1,
2160 .show_host_supported_fc4s = 1,
2161 .show_host_active_fc4s = 1,
2162 .show_host_maxframe_size = 1,
2163 .show_host_port_id = 1,
2164 .show_host_supported_speeds = 1,
2165 .get_host_speed = fc_get_host_speed,
2166 .show_host_speed = 1,
2167 .show_host_port_type = 1,
2168 .get_host_port_state = fc_get_host_port_state,
2169 .show_host_port_state = 1,
2170 .show_host_symbolic_name = 1,
2171 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
2172 sizeof(struct qedf_rport)),
2173 .show_rport_maxframe_size = 1,
2174 .show_rport_supported_classes = 1,
2175 .show_host_fabric_name = 1,
2176 .show_starget_node_name = 1,
2177 .show_starget_port_name = 1,
2178 .show_starget_port_id = 1,
2179 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
2180 .show_rport_dev_loss_tmo = 1,
2181 .get_fc_host_stats = fc_get_host_stats,
2182 .issue_fc_host_lip = qedf_fcoe_reset,
2183 .bsg_request = fc_lport_bsg_request,
2184 };
2185
qedf_fp_has_work(struct qedf_fastpath * fp)2186 static bool qedf_fp_has_work(struct qedf_fastpath *fp)
2187 {
2188 struct qedf_ctx *qedf = fp->qedf;
2189 struct global_queue *que;
2190 struct qed_sb_info *sb_info = fp->sb_info;
2191 struct status_block_e4 *sb = sb_info->sb_virt;
2192 u16 prod_idx;
2193
2194 /* Get the pointer to the global CQ this completion is on */
2195 que = qedf->global_queues[fp->sb_id];
2196
2197 /* Be sure all responses have been written to PI */
2198 rmb();
2199
2200 /* Get the current firmware producer index */
2201 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2202
2203 return (que->cq_prod_idx != prod_idx);
2204 }
2205
2206 /*
2207 * Interrupt handler code.
2208 */
2209
2210 /* Process completion queue and copy CQE contents for deferred processesing
2211 *
2212 * Return true if we should wake the I/O thread, false if not.
2213 */
qedf_process_completions(struct qedf_fastpath * fp)2214 static bool qedf_process_completions(struct qedf_fastpath *fp)
2215 {
2216 struct qedf_ctx *qedf = fp->qedf;
2217 struct qed_sb_info *sb_info = fp->sb_info;
2218 struct status_block_e4 *sb = sb_info->sb_virt;
2219 struct global_queue *que;
2220 u16 prod_idx;
2221 struct fcoe_cqe *cqe;
2222 struct qedf_io_work *io_work;
2223 int num_handled = 0;
2224 unsigned int cpu;
2225 struct qedf_ioreq *io_req = NULL;
2226 u16 xid;
2227 u16 new_cqes;
2228 u32 comp_type;
2229
2230 /* Get the current firmware producer index */
2231 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
2232
2233 /* Get the pointer to the global CQ this completion is on */
2234 que = qedf->global_queues[fp->sb_id];
2235
2236 /* Calculate the amount of new elements since last processing */
2237 new_cqes = (prod_idx >= que->cq_prod_idx) ?
2238 (prod_idx - que->cq_prod_idx) :
2239 0x10000 - que->cq_prod_idx + prod_idx;
2240
2241 /* Save producer index */
2242 que->cq_prod_idx = prod_idx;
2243
2244 while (new_cqes) {
2245 fp->completions++;
2246 num_handled++;
2247 cqe = &que->cq[que->cq_cons_idx];
2248
2249 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2250 FCOE_CQE_CQE_TYPE_MASK;
2251
2252 /*
2253 * Process unsolicited CQEs directly in the interrupt handler
2254 * sine we need the fastpath ID
2255 */
2256 if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
2257 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2258 "Unsolicated CQE.\n");
2259 qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2260 /*
2261 * Don't add a work list item. Increment consumer
2262 * consumer index and move on.
2263 */
2264 goto inc_idx;
2265 }
2266
2267 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2268 io_req = &qedf->cmd_mgr->cmds[xid];
2269
2270 /*
2271 * Figure out which percpu thread we should queue this I/O
2272 * on.
2273 */
2274 if (!io_req)
2275 /* If there is not io_req assocated with this CQE
2276 * just queue it on CPU 0
2277 */
2278 cpu = 0;
2279 else {
2280 cpu = io_req->cpu;
2281 io_req->int_cpu = smp_processor_id();
2282 }
2283
2284 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2285 if (!io_work) {
2286 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2287 "work for I/O completion.\n");
2288 continue;
2289 }
2290 memset(io_work, 0, sizeof(struct qedf_io_work));
2291
2292 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2293
2294 /* Copy contents of CQE for deferred processing */
2295 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2296
2297 io_work->qedf = fp->qedf;
2298 io_work->fp = NULL; /* Only used for unsolicited frames */
2299
2300 queue_work_on(cpu, qedf_io_wq, &io_work->work);
2301
2302 inc_idx:
2303 que->cq_cons_idx++;
2304 if (que->cq_cons_idx == fp->cq_num_entries)
2305 que->cq_cons_idx = 0;
2306 new_cqes--;
2307 }
2308
2309 return true;
2310 }
2311
2312
2313 /* MSI-X fastpath handler code */
qedf_msix_handler(int irq,void * dev_id)2314 static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
2315 {
2316 struct qedf_fastpath *fp = dev_id;
2317
2318 if (!fp) {
2319 QEDF_ERR(NULL, "fp is null.\n");
2320 return IRQ_HANDLED;
2321 }
2322 if (!fp->sb_info) {
2323 QEDF_ERR(NULL, "fp->sb_info in null.");
2324 return IRQ_HANDLED;
2325 }
2326
2327 /*
2328 * Disable interrupts for this status block while we process new
2329 * completions
2330 */
2331 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
2332
2333 while (1) {
2334 qedf_process_completions(fp);
2335
2336 if (qedf_fp_has_work(fp) == 0) {
2337 /* Update the sb information */
2338 qed_sb_update_sb_idx(fp->sb_info);
2339
2340 /* Check for more work */
2341 rmb();
2342
2343 if (qedf_fp_has_work(fp) == 0) {
2344 /* Re-enable interrupts */
2345 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
2346 return IRQ_HANDLED;
2347 }
2348 }
2349 }
2350
2351 /* Do we ever want to break out of above loop? */
2352 return IRQ_HANDLED;
2353 }
2354
2355 /* simd handler for MSI/INTa */
qedf_simd_int_handler(void * cookie)2356 static void qedf_simd_int_handler(void *cookie)
2357 {
2358 /* Cookie is qedf_ctx struct */
2359 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2360
2361 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2362 }
2363
2364 #define QEDF_SIMD_HANDLER_NUM 0
qedf_sync_free_irqs(struct qedf_ctx * qedf)2365 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2366 {
2367 int i;
2368 u16 vector_idx = 0;
2369 u32 vector;
2370
2371 if (qedf->int_info.msix_cnt) {
2372 for (i = 0; i < qedf->int_info.used_cnt; i++) {
2373 vector_idx = i * qedf->dev_info.common.num_hwfns +
2374 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2375 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2376 "Freeing IRQ #%d vector_idx=%d.\n",
2377 i, vector_idx);
2378 vector = qedf->int_info.msix[vector_idx].vector;
2379 synchronize_irq(vector);
2380 irq_set_affinity_hint(vector, NULL);
2381 irq_set_affinity_notifier(vector, NULL);
2382 free_irq(vector, &qedf->fp_array[i]);
2383 }
2384 } else
2385 qed_ops->common->simd_handler_clean(qedf->cdev,
2386 QEDF_SIMD_HANDLER_NUM);
2387
2388 qedf->int_info.used_cnt = 0;
2389 qed_ops->common->set_fp_int(qedf->cdev, 0);
2390 }
2391
qedf_request_msix_irq(struct qedf_ctx * qedf)2392 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2393 {
2394 int i, rc, cpu;
2395 u16 vector_idx = 0;
2396 u32 vector;
2397
2398 cpu = cpumask_first(cpu_online_mask);
2399 for (i = 0; i < qedf->num_queues; i++) {
2400 vector_idx = i * qedf->dev_info.common.num_hwfns +
2401 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2402 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2403 "Requesting IRQ #%d vector_idx=%d.\n",
2404 i, vector_idx);
2405 vector = qedf->int_info.msix[vector_idx].vector;
2406 rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2407 &qedf->fp_array[i]);
2408
2409 if (rc) {
2410 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2411 qedf_sync_free_irqs(qedf);
2412 return rc;
2413 }
2414
2415 qedf->int_info.used_cnt++;
2416 rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
2417 cpu = cpumask_next(cpu, cpu_online_mask);
2418 }
2419
2420 return 0;
2421 }
2422
qedf_setup_int(struct qedf_ctx * qedf)2423 static int qedf_setup_int(struct qedf_ctx *qedf)
2424 {
2425 int rc = 0;
2426
2427 /*
2428 * Learn interrupt configuration
2429 */
2430 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2431 if (rc <= 0)
2432 return 0;
2433
2434 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2435 if (rc)
2436 return 0;
2437
2438 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2439 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2440 num_online_cpus());
2441
2442 if (qedf->int_info.msix_cnt)
2443 return qedf_request_msix_irq(qedf);
2444
2445 qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2446 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
2447 qedf->int_info.used_cnt = 1;
2448
2449 QEDF_ERR(&qedf->dbg_ctx,
2450 "Cannot load driver due to a lack of MSI-X vectors.\n");
2451 return -EINVAL;
2452 }
2453
2454 /* Main function for libfc frame reception */
qedf_recv_frame(struct qedf_ctx * qedf,struct sk_buff * skb)2455 static void qedf_recv_frame(struct qedf_ctx *qedf,
2456 struct sk_buff *skb)
2457 {
2458 u32 fr_len;
2459 struct fc_lport *lport;
2460 struct fc_frame_header *fh;
2461 struct fcoe_crc_eof crc_eof;
2462 struct fc_frame *fp;
2463 u8 *mac = NULL;
2464 u8 *dest_mac = NULL;
2465 struct fcoe_hdr *hp;
2466 struct qedf_rport *fcport;
2467 struct fc_lport *vn_port;
2468 u32 f_ctl;
2469
2470 lport = qedf->lport;
2471 if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
2472 QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
2473 kfree_skb(skb);
2474 return;
2475 }
2476
2477 if (skb_is_nonlinear(skb))
2478 skb_linearize(skb);
2479 mac = eth_hdr(skb)->h_source;
2480 dest_mac = eth_hdr(skb)->h_dest;
2481
2482 /* Pull the header */
2483 hp = (struct fcoe_hdr *)skb->data;
2484 fh = (struct fc_frame_header *) skb_transport_header(skb);
2485 skb_pull(skb, sizeof(struct fcoe_hdr));
2486 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2487
2488 fp = (struct fc_frame *)skb;
2489 fc_frame_init(fp);
2490 fr_dev(fp) = lport;
2491 fr_sof(fp) = hp->fcoe_sof;
2492 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2493 QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
2494 kfree_skb(skb);
2495 return;
2496 }
2497 fr_eof(fp) = crc_eof.fcoe_eof;
2498 fr_crc(fp) = crc_eof.fcoe_crc32;
2499 if (pskb_trim(skb, fr_len)) {
2500 QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
2501 kfree_skb(skb);
2502 return;
2503 }
2504
2505 fh = fc_frame_header_get(fp);
2506
2507 /*
2508 * Invalid frame filters.
2509 */
2510
2511 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2512 fh->fh_type == FC_TYPE_FCP) {
2513 /* Drop FCP data. We dont this in L2 path */
2514 kfree_skb(skb);
2515 return;
2516 }
2517 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
2518 fh->fh_type == FC_TYPE_ELS) {
2519 switch (fc_frame_payload_op(fp)) {
2520 case ELS_LOGO:
2521 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
2522 /* drop non-FIP LOGO */
2523 kfree_skb(skb);
2524 return;
2525 }
2526 break;
2527 }
2528 }
2529
2530 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
2531 /* Drop incoming ABTS */
2532 kfree_skb(skb);
2533 return;
2534 }
2535
2536 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
2537 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2538 "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
2539 kfree_skb(skb);
2540 return;
2541 }
2542
2543 if (qedf->ctlr.state) {
2544 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
2545 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2546 "Wrong source address: mac:%pM dest_addr:%pM.\n",
2547 mac, qedf->ctlr.dest_addr);
2548 kfree_skb(skb);
2549 return;
2550 }
2551 }
2552
2553 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
2554
2555 /*
2556 * If the destination ID from the frame header does not match what we
2557 * have on record for lport and the search for a NPIV port came up
2558 * empty then this is not addressed to our port so simply drop it.
2559 */
2560 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
2561 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2562 "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2563 lport->port_id, ntoh24(fh->fh_d_id));
2564 kfree_skb(skb);
2565 return;
2566 }
2567
2568 f_ctl = ntoh24(fh->fh_f_ctl);
2569 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
2570 (f_ctl & FC_FC_EX_CTX)) {
2571 /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2572 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2573 "Dropping ABTS response as both SEQ/EX CTX set.\n");
2574 kfree_skb(skb);
2575 return;
2576 }
2577
2578 /*
2579 * If a connection is uploading, drop incoming FCoE frames as there
2580 * is a small window where we could try to return a frame while libfc
2581 * is trying to clean things up.
2582 */
2583
2584 /* Get fcport associated with d_id if it exists */
2585 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2586
2587 if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
2588 &fcport->flags)) {
2589 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2590 "Connection uploading, dropping fp=%p.\n", fp);
2591 kfree_skb(skb);
2592 return;
2593 }
2594
2595 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2596 "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
2597 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2598 fh->fh_type);
2599 if (qedf_dump_frames)
2600 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
2601 1, skb->data, skb->len, false);
2602 fc_exch_recv(lport, fp);
2603 }
2604
qedf_ll2_process_skb(struct work_struct * work)2605 static void qedf_ll2_process_skb(struct work_struct *work)
2606 {
2607 struct qedf_skb_work *skb_work =
2608 container_of(work, struct qedf_skb_work, work);
2609 struct qedf_ctx *qedf = skb_work->qedf;
2610 struct sk_buff *skb = skb_work->skb;
2611 struct ethhdr *eh;
2612
2613 if (!qedf) {
2614 QEDF_ERR(NULL, "qedf is NULL\n");
2615 goto err_out;
2616 }
2617
2618 eh = (struct ethhdr *)skb->data;
2619
2620 /* Undo VLAN encapsulation */
2621 if (eh->h_proto == htons(ETH_P_8021Q)) {
2622 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
2623 eh = skb_pull(skb, VLAN_HLEN);
2624 skb_reset_mac_header(skb);
2625 }
2626
2627 /*
2628 * Process either a FIP frame or FCoE frame based on the
2629 * protocol value. If it's not either just drop the
2630 * frame.
2631 */
2632 if (eh->h_proto == htons(ETH_P_FIP)) {
2633 qedf_fip_recv(qedf, skb);
2634 goto out;
2635 } else if (eh->h_proto == htons(ETH_P_FCOE)) {
2636 __skb_pull(skb, ETH_HLEN);
2637 qedf_recv_frame(qedf, skb);
2638 goto out;
2639 } else
2640 goto err_out;
2641
2642 err_out:
2643 kfree_skb(skb);
2644 out:
2645 kfree(skb_work);
2646 return;
2647 }
2648
qedf_ll2_rx(void * cookie,struct sk_buff * skb,u32 arg1,u32 arg2)2649 static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
2650 u32 arg1, u32 arg2)
2651 {
2652 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2653 struct qedf_skb_work *skb_work;
2654
2655 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2656 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2657 "Dropping frame as link state is down.\n");
2658 kfree_skb(skb);
2659 return 0;
2660 }
2661
2662 skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
2663 if (!skb_work) {
2664 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2665 "dropping frame.\n");
2666 kfree_skb(skb);
2667 return 0;
2668 }
2669
2670 INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
2671 skb_work->skb = skb;
2672 skb_work->qedf = qedf;
2673 queue_work(qedf->ll2_recv_wq, &skb_work->work);
2674
2675 return 0;
2676 }
2677
2678 static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
2679 .rx_cb = qedf_ll2_rx,
2680 .tx_cb = NULL,
2681 };
2682
2683 /* Main thread to process I/O completions */
qedf_fp_io_handler(struct work_struct * work)2684 void qedf_fp_io_handler(struct work_struct *work)
2685 {
2686 struct qedf_io_work *io_work =
2687 container_of(work, struct qedf_io_work, work);
2688 u32 comp_type;
2689
2690 /*
2691 * Deferred part of unsolicited CQE sends
2692 * frame to libfc.
2693 */
2694 comp_type = (io_work->cqe.cqe_data >>
2695 FCOE_CQE_CQE_TYPE_SHIFT) &
2696 FCOE_CQE_CQE_TYPE_MASK;
2697 if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
2698 io_work->fp)
2699 fc_exch_recv(io_work->qedf->lport, io_work->fp);
2700 else
2701 qedf_process_cqe(io_work->qedf, &io_work->cqe);
2702
2703 kfree(io_work);
2704 }
2705
qedf_alloc_and_init_sb(struct qedf_ctx * qedf,struct qed_sb_info * sb_info,u16 sb_id)2706 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2707 struct qed_sb_info *sb_info, u16 sb_id)
2708 {
2709 struct status_block_e4 *sb_virt;
2710 dma_addr_t sb_phys;
2711 int ret;
2712
2713 sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2714 sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
2715
2716 if (!sb_virt) {
2717 QEDF_ERR(&qedf->dbg_ctx,
2718 "Status block allocation failed for id = %d.\n",
2719 sb_id);
2720 return -ENOMEM;
2721 }
2722
2723 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2724 sb_id, QED_SB_TYPE_STORAGE);
2725
2726 if (ret) {
2727 QEDF_ERR(&qedf->dbg_ctx,
2728 "Status block initialization failed (0x%x) for id = %d.\n",
2729 ret, sb_id);
2730 return ret;
2731 }
2732
2733 return 0;
2734 }
2735
qedf_free_sb(struct qedf_ctx * qedf,struct qed_sb_info * sb_info)2736 static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2737 {
2738 if (sb_info->sb_virt)
2739 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2740 (void *)sb_info->sb_virt, sb_info->sb_phys);
2741 }
2742
qedf_destroy_sb(struct qedf_ctx * qedf)2743 static void qedf_destroy_sb(struct qedf_ctx *qedf)
2744 {
2745 int id;
2746 struct qedf_fastpath *fp = NULL;
2747
2748 for (id = 0; id < qedf->num_queues; id++) {
2749 fp = &(qedf->fp_array[id]);
2750 if (fp->sb_id == QEDF_SB_ID_NULL)
2751 break;
2752 qedf_free_sb(qedf, fp->sb_info);
2753 kfree(fp->sb_info);
2754 }
2755 kfree(qedf->fp_array);
2756 }
2757
qedf_prepare_sb(struct qedf_ctx * qedf)2758 static int qedf_prepare_sb(struct qedf_ctx *qedf)
2759 {
2760 int id;
2761 struct qedf_fastpath *fp;
2762 int ret;
2763
2764 qedf->fp_array =
2765 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2766 GFP_KERNEL);
2767
2768 if (!qedf->fp_array) {
2769 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2770 "failed.\n");
2771 return -ENOMEM;
2772 }
2773
2774 for (id = 0; id < qedf->num_queues; id++) {
2775 fp = &(qedf->fp_array[id]);
2776 fp->sb_id = QEDF_SB_ID_NULL;
2777 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2778 if (!fp->sb_info) {
2779 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2780 "allocation failed.\n");
2781 goto err;
2782 }
2783 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2784 if (ret) {
2785 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2786 "initialization failed.\n");
2787 goto err;
2788 }
2789 fp->sb_id = id;
2790 fp->qedf = qedf;
2791 fp->cq_num_entries =
2792 qedf->global_queues[id]->cq_mem_size /
2793 sizeof(struct fcoe_cqe);
2794 }
2795 err:
2796 return 0;
2797 }
2798
qedf_process_cqe(struct qedf_ctx * qedf,struct fcoe_cqe * cqe)2799 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2800 {
2801 u16 xid;
2802 struct qedf_ioreq *io_req;
2803 struct qedf_rport *fcport;
2804 u32 comp_type;
2805
2806 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
2807 FCOE_CQE_CQE_TYPE_MASK;
2808
2809 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
2810 io_req = &qedf->cmd_mgr->cmds[xid];
2811
2812 /* Completion not for a valid I/O anymore so just return */
2813 if (!io_req) {
2814 QEDF_ERR(&qedf->dbg_ctx,
2815 "io_req is NULL for xid=0x%x.\n", xid);
2816 return;
2817 }
2818
2819 fcport = io_req->fcport;
2820
2821 if (fcport == NULL) {
2822 QEDF_ERR(&qedf->dbg_ctx,
2823 "fcport is NULL for xid=0x%x io_req=%p.\n",
2824 xid, io_req);
2825 return;
2826 }
2827
2828 /*
2829 * Check that fcport is offloaded. If it isn't then the spinlock
2830 * isn't valid and shouldn't be taken. We should just return.
2831 */
2832 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2833 QEDF_ERR(&qedf->dbg_ctx,
2834 "Session not offloaded yet, fcport = %p.\n", fcport);
2835 return;
2836 }
2837
2838
2839 switch (comp_type) {
2840 case FCOE_GOOD_COMPLETION_CQE_TYPE:
2841 atomic_inc(&fcport->free_sqes);
2842 switch (io_req->cmd_type) {
2843 case QEDF_SCSI_CMD:
2844 qedf_scsi_completion(qedf, cqe, io_req);
2845 break;
2846 case QEDF_ELS:
2847 qedf_process_els_compl(qedf, cqe, io_req);
2848 break;
2849 case QEDF_TASK_MGMT_CMD:
2850 qedf_process_tmf_compl(qedf, cqe, io_req);
2851 break;
2852 case QEDF_SEQ_CLEANUP:
2853 qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2854 break;
2855 }
2856 break;
2857 case FCOE_ERROR_DETECTION_CQE_TYPE:
2858 atomic_inc(&fcport->free_sqes);
2859 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2860 "Error detect CQE.\n");
2861 qedf_process_error_detect(qedf, cqe, io_req);
2862 break;
2863 case FCOE_EXCH_CLEANUP_CQE_TYPE:
2864 atomic_inc(&fcport->free_sqes);
2865 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2866 "Cleanup CQE.\n");
2867 qedf_process_cleanup_compl(qedf, cqe, io_req);
2868 break;
2869 case FCOE_ABTS_CQE_TYPE:
2870 atomic_inc(&fcport->free_sqes);
2871 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2872 "Abort CQE.\n");
2873 qedf_process_abts_compl(qedf, cqe, io_req);
2874 break;
2875 case FCOE_DUMMY_CQE_TYPE:
2876 atomic_inc(&fcport->free_sqes);
2877 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2878 "Dummy CQE.\n");
2879 break;
2880 case FCOE_LOCAL_COMP_CQE_TYPE:
2881 atomic_inc(&fcport->free_sqes);
2882 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2883 "Local completion CQE.\n");
2884 break;
2885 case FCOE_WARNING_CQE_TYPE:
2886 atomic_inc(&fcport->free_sqes);
2887 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2888 "Warning CQE.\n");
2889 qedf_process_warning_compl(qedf, cqe, io_req);
2890 break;
2891 case MAX_FCOE_CQE_TYPE:
2892 atomic_inc(&fcport->free_sqes);
2893 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2894 "Max FCoE CQE.\n");
2895 break;
2896 default:
2897 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2898 "Default CQE.\n");
2899 break;
2900 }
2901 }
2902
qedf_free_bdq(struct qedf_ctx * qedf)2903 static void qedf_free_bdq(struct qedf_ctx *qedf)
2904 {
2905 int i;
2906
2907 if (qedf->bdq_pbl_list)
2908 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2909 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2910
2911 if (qedf->bdq_pbl)
2912 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2913 qedf->bdq_pbl, qedf->bdq_pbl_dma);
2914
2915 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2916 if (qedf->bdq[i].buf_addr) {
2917 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2918 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2919 }
2920 }
2921 }
2922
qedf_free_global_queues(struct qedf_ctx * qedf)2923 static void qedf_free_global_queues(struct qedf_ctx *qedf)
2924 {
2925 int i;
2926 struct global_queue **gl = qedf->global_queues;
2927
2928 for (i = 0; i < qedf->num_queues; i++) {
2929 if (!gl[i])
2930 continue;
2931
2932 if (gl[i]->cq)
2933 dma_free_coherent(&qedf->pdev->dev,
2934 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
2935 if (gl[i]->cq_pbl)
2936 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2937 gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
2938
2939 kfree(gl[i]);
2940 }
2941
2942 qedf_free_bdq(qedf);
2943 }
2944
qedf_alloc_bdq(struct qedf_ctx * qedf)2945 static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2946 {
2947 int i;
2948 struct scsi_bd *pbl;
2949 u64 *list;
2950 dma_addr_t page;
2951
2952 /* Alloc dma memory for BDQ buffers */
2953 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2954 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2955 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2956 if (!qedf->bdq[i].buf_addr) {
2957 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2958 "buffer %d.\n", i);
2959 return -ENOMEM;
2960 }
2961 }
2962
2963 /* Alloc dma memory for BDQ page buffer list */
2964 qedf->bdq_pbl_mem_size =
2965 QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
2966 qedf->bdq_pbl_mem_size =
2967 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2968
2969 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2970 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2971 if (!qedf->bdq_pbl) {
2972 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2973 return -ENOMEM;
2974 }
2975
2976 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2977 "BDQ PBL addr=0x%p dma=%pad\n",
2978 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2979
2980 /*
2981 * Populate BDQ PBL with physical and virtual address of individual
2982 * BDQ buffers
2983 */
2984 pbl = (struct scsi_bd *)qedf->bdq_pbl;
2985 for (i = 0; i < QEDF_BDQ_SIZE; i++) {
2986 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2987 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
2988 pbl->opaque.fcoe_opaque.hi = 0;
2989 /* Opaque lo data is an index into the BDQ array */
2990 pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
2991 pbl++;
2992 }
2993
2994 /* Allocate list of PBL pages */
2995 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2996 QEDF_PAGE_SIZE,
2997 &qedf->bdq_pbl_list_dma,
2998 GFP_KERNEL);
2999 if (!qedf->bdq_pbl_list) {
3000 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
3001 return -ENOMEM;
3002 }
3003
3004 /*
3005 * Now populate PBL list with pages that contain pointers to the
3006 * individual buffers.
3007 */
3008 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
3009 QEDF_PAGE_SIZE;
3010 list = (u64 *)qedf->bdq_pbl_list;
3011 page = qedf->bdq_pbl_list_dma;
3012 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
3013 *list = qedf->bdq_pbl_dma;
3014 list++;
3015 page += QEDF_PAGE_SIZE;
3016 }
3017
3018 return 0;
3019 }
3020
qedf_alloc_global_queues(struct qedf_ctx * qedf)3021 static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3022 {
3023 u32 *list;
3024 int i;
3025 int status;
3026 u32 *pbl;
3027 dma_addr_t page;
3028 int num_pages;
3029
3030 /* Allocate and map CQs, RQs */
3031 /*
3032 * Number of global queues (CQ / RQ). This should
3033 * be <= number of available MSIX vectors for the PF
3034 */
3035 if (!qedf->num_queues) {
3036 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3037 return -ENOMEM;
3038 }
3039
3040 /*
3041 * Make sure we allocated the PBL that will contain the physical
3042 * addresses of our queues
3043 */
3044 if (!qedf->p_cpuq) {
3045 status = -EINVAL;
3046 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3047 goto mem_alloc_failure;
3048 }
3049
3050 qedf->global_queues = kzalloc((sizeof(struct global_queue *)
3051 * qedf->num_queues), GFP_KERNEL);
3052 if (!qedf->global_queues) {
3053 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3054 "queues array ptr memory\n");
3055 return -ENOMEM;
3056 }
3057 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3058 "qedf->global_queues=%p.\n", qedf->global_queues);
3059
3060 /* Allocate DMA coherent buffers for BDQ */
3061 status = qedf_alloc_bdq(qedf);
3062 if (status) {
3063 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3064 goto mem_alloc_failure;
3065 }
3066
3067 /* Allocate a CQ and an associated PBL for each MSI-X vector */
3068 for (i = 0; i < qedf->num_queues; i++) {
3069 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
3070 GFP_KERNEL);
3071 if (!qedf->global_queues[i]) {
3072 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3073 "global queue %d.\n", i);
3074 status = -ENOMEM;
3075 goto mem_alloc_failure;
3076 }
3077
3078 qedf->global_queues[i]->cq_mem_size =
3079 FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3080 qedf->global_queues[i]->cq_mem_size =
3081 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3082
3083 qedf->global_queues[i]->cq_pbl_size =
3084 (qedf->global_queues[i]->cq_mem_size /
3085 PAGE_SIZE) * sizeof(void *);
3086 qedf->global_queues[i]->cq_pbl_size =
3087 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3088
3089 qedf->global_queues[i]->cq =
3090 dma_alloc_coherent(&qedf->pdev->dev,
3091 qedf->global_queues[i]->cq_mem_size,
3092 &qedf->global_queues[i]->cq_dma,
3093 GFP_KERNEL);
3094
3095 if (!qedf->global_queues[i]->cq) {
3096 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3097 status = -ENOMEM;
3098 goto mem_alloc_failure;
3099 }
3100
3101 qedf->global_queues[i]->cq_pbl =
3102 dma_alloc_coherent(&qedf->pdev->dev,
3103 qedf->global_queues[i]->cq_pbl_size,
3104 &qedf->global_queues[i]->cq_pbl_dma,
3105 GFP_KERNEL);
3106
3107 if (!qedf->global_queues[i]->cq_pbl) {
3108 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3109 status = -ENOMEM;
3110 goto mem_alloc_failure;
3111 }
3112
3113 /* Create PBL */
3114 num_pages = qedf->global_queues[i]->cq_mem_size /
3115 QEDF_PAGE_SIZE;
3116 page = qedf->global_queues[i]->cq_dma;
3117 pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3118
3119 while (num_pages--) {
3120 *pbl = U64_LO(page);
3121 pbl++;
3122 *pbl = U64_HI(page);
3123 pbl++;
3124 page += QEDF_PAGE_SIZE;
3125 }
3126 /* Set the initial consumer index for cq */
3127 qedf->global_queues[i]->cq_cons_idx = 0;
3128 }
3129
3130 list = (u32 *)qedf->p_cpuq;
3131
3132 /*
3133 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
3134 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
3135 * to the physical address which contains an array of pointers to
3136 * the physical addresses of the specific queue pages.
3137 */
3138 for (i = 0; i < qedf->num_queues; i++) {
3139 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3140 list++;
3141 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3142 list++;
3143 *list = U64_LO(0);
3144 list++;
3145 *list = U64_HI(0);
3146 list++;
3147 }
3148
3149 return 0;
3150
3151 mem_alloc_failure:
3152 qedf_free_global_queues(qedf);
3153 return status;
3154 }
3155
qedf_set_fcoe_pf_param(struct qedf_ctx * qedf)3156 static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3157 {
3158 u8 sq_num_pbl_pages;
3159 u32 sq_mem_size;
3160 u32 cq_mem_size;
3161 u32 cq_num_entries;
3162 int rval;
3163
3164 /*
3165 * The number of completion queues/fastpath interrupts/status blocks
3166 * we allocation is the minimum off:
3167 *
3168 * Number of CPUs
3169 * Number allocated by qed for our PCI function
3170 */
3171 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3172
3173 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3174 qedf->num_queues);
3175
3176 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
3177 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3178 &qedf->hw_p_cpuq, GFP_KERNEL);
3179
3180 if (!qedf->p_cpuq) {
3181 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3182 return 1;
3183 }
3184
3185 rval = qedf_alloc_global_queues(qedf);
3186 if (rval) {
3187 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3188 "failed.\n");
3189 return 1;
3190 }
3191
3192 /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
3193 sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
3194 sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
3195 sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
3196
3197 /* Calculate CQ num entries */
3198 cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
3199 cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
3200 cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
3201
3202 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3203
3204 /* Setup the value for fcoe PF */
3205 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3206 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3207 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3208 (u64)qedf->hw_p_cpuq;
3209 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3210
3211 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3212
3213 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3214 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3215
3216 /* log_page_size: 12 for 4KB pages */
3217 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3218
3219 qedf->pf_params.fcoe_pf_params.mtu = 9000;
3220 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3221 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3222
3223 /* BDQ address and size */
3224 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3225 qedf->bdq_pbl_list_dma;
3226 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3227 qedf->bdq_pbl_list_num_entries;
3228 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3229
3230 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3231 "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
3232 qedf->bdq_pbl_list,
3233 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3234 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3235
3236 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3237 "cq_num_entries=%d.\n",
3238 qedf->pf_params.fcoe_pf_params.cq_num_entries);
3239
3240 return 0;
3241 }
3242
3243 /* Free DMA coherent memory for array of queue pointers we pass to qed */
qedf_free_fcoe_pf_param(struct qedf_ctx * qedf)3244 static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3245 {
3246 size_t size = 0;
3247
3248 if (qedf->p_cpuq) {
3249 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3250 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
3251 qedf->hw_p_cpuq);
3252 }
3253
3254 qedf_free_global_queues(qedf);
3255
3256 kfree(qedf->global_queues);
3257 }
3258
3259 /*
3260 * PCI driver functions
3261 */
3262
3263 static const struct pci_device_id qedf_pci_tbl[] = {
3264 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
3265 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
3266 {0}
3267 };
3268 MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
3269
3270 static struct pci_driver qedf_pci_driver = {
3271 .name = QEDF_MODULE_NAME,
3272 .id_table = qedf_pci_tbl,
3273 .probe = qedf_probe,
3274 .remove = qedf_remove,
3275 .shutdown = qedf_shutdown,
3276 };
3277
__qedf_probe(struct pci_dev * pdev,int mode)3278 static int __qedf_probe(struct pci_dev *pdev, int mode)
3279 {
3280 int rc = -EINVAL;
3281 struct fc_lport *lport;
3282 struct qedf_ctx *qedf = NULL;
3283 struct Scsi_Host *host;
3284 bool is_vf = false;
3285 struct qed_ll2_params params;
3286 char host_buf[20];
3287 struct qed_link_params link_params;
3288 int status;
3289 void *task_start, *task_end;
3290 struct qed_slowpath_params slowpath_params;
3291 struct qed_probe_params qed_params;
3292 u16 retry_cnt = 10;
3293
3294 /*
3295 * When doing error recovery we didn't reap the lport so don't try
3296 * to reallocate it.
3297 */
3298 retry_probe:
3299 if (mode == QEDF_MODE_RECOVERY)
3300 msleep(2000);
3301
3302 if (mode != QEDF_MODE_RECOVERY) {
3303 lport = libfc_host_alloc(&qedf_host_template,
3304 sizeof(struct qedf_ctx));
3305
3306 if (!lport) {
3307 QEDF_ERR(NULL, "Could not allocate lport.\n");
3308 rc = -ENOMEM;
3309 goto err0;
3310 }
3311
3312 fc_disc_init(lport);
3313
3314 /* Initialize qedf_ctx */
3315 qedf = lport_priv(lport);
3316 set_bit(QEDF_PROBING, &qedf->flags);
3317 qedf->lport = lport;
3318 qedf->ctlr.lp = lport;
3319 qedf->pdev = pdev;
3320 qedf->dbg_ctx.pdev = pdev;
3321 qedf->dbg_ctx.host_no = lport->host->host_no;
3322 spin_lock_init(&qedf->hba_lock);
3323 INIT_LIST_HEAD(&qedf->fcports);
3324 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3325 atomic_set(&qedf->num_offloads, 0);
3326 qedf->stop_io_on_error = false;
3327 pci_set_drvdata(pdev, qedf);
3328 init_completion(&qedf->fipvlan_compl);
3329 mutex_init(&qedf->stats_mutex);
3330 mutex_init(&qedf->flush_mutex);
3331 qedf->flogi_pending = 0;
3332
3333 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3334 "QLogic FastLinQ FCoE Module qedf %s, "
3335 "FW %d.%d.%d.%d\n", QEDF_VERSION,
3336 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
3337 FW_ENGINEERING_VERSION);
3338 } else {
3339 /* Init pointers during recovery */
3340 qedf = pci_get_drvdata(pdev);
3341 set_bit(QEDF_PROBING, &qedf->flags);
3342 lport = qedf->lport;
3343 }
3344
3345 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3346
3347 host = lport->host;
3348
3349 /* Allocate mempool for qedf_io_work structs */
3350 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3351 qedf_io_work_cache);
3352 if (qedf->io_mempool == NULL) {
3353 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3354 goto err1;
3355 }
3356 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3357 qedf->io_mempool);
3358
3359 sprintf(host_buf, "qedf_%u_link",
3360 qedf->lport->host->host_no);
3361 qedf->link_update_wq = create_workqueue(host_buf);
3362 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3363 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3364 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3365 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3366 qedf->fipvlan_retries = qedf_fipvlan_retries;
3367 /* Set a default prio in case DCBX doesn't converge */
3368 if (qedf_default_prio > -1) {
3369 /*
3370 * This is the case where we pass a modparam in so we want to
3371 * honor it even if dcbx doesn't converge.
3372 */
3373 qedf->prio = qedf_default_prio;
3374 } else
3375 qedf->prio = QEDF_DEFAULT_PRIO;
3376
3377 /*
3378 * Common probe. Takes care of basic hardware init and pci_*
3379 * functions.
3380 */
3381 memset(&qed_params, 0, sizeof(qed_params));
3382 qed_params.protocol = QED_PROTOCOL_FCOE;
3383 qed_params.dp_module = qedf_dp_module;
3384 qed_params.dp_level = qedf_dp_level;
3385 qed_params.is_vf = is_vf;
3386 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3387 if (!qedf->cdev) {
3388 if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
3389 QEDF_ERR(&qedf->dbg_ctx,
3390 "Retry %d initialize hardware\n", retry_cnt);
3391 retry_cnt--;
3392 goto retry_probe;
3393 }
3394 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3395 rc = -ENODEV;
3396 goto err1;
3397 }
3398
3399 /* Learn information crucial for qedf to progress */
3400 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3401 if (rc) {
3402 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3403 goto err1;
3404 }
3405
3406 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3407 "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
3408 qedf->dev_info.common.num_hwfns,
3409 qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3410
3411 /* queue allocation code should come here
3412 * order should be
3413 * slowpath_start
3414 * status block allocation
3415 * interrupt registration (to get min number of queues)
3416 * set_fcoe_pf_param
3417 * qed_sp_fcoe_func_start
3418 */
3419 rc = qedf_set_fcoe_pf_param(qedf);
3420 if (rc) {
3421 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3422 goto err2;
3423 }
3424 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3425
3426 /* Learn information crucial for qedf to progress */
3427 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3428 if (rc) {
3429 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3430 goto err2;
3431 }
3432
3433 /* Record BDQ producer doorbell addresses */
3434 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3435 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3436 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3437 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3438 qedf->bdq_secondary_prod);
3439
3440 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3441
3442 rc = qedf_prepare_sb(qedf);
3443 if (rc) {
3444
3445 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3446 goto err2;
3447 }
3448
3449 /* Start the Slowpath-process */
3450 slowpath_params.int_mode = QED_INT_MODE_MSIX;
3451 slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
3452 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
3453 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
3454 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
3455 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
3456 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3457 if (rc) {
3458 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3459 goto err2;
3460 }
3461
3462 /*
3463 * update_pf_params needs to be called before and after slowpath
3464 * start
3465 */
3466 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3467
3468 /* Setup interrupts */
3469 rc = qedf_setup_int(qedf);
3470 if (rc) {
3471 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3472 goto err3;
3473 }
3474
3475 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3476 if (rc) {
3477 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3478 goto err4;
3479 }
3480 task_start = qedf_get_task_mem(&qedf->tasks, 0);
3481 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3482 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3483 "end=%p block_size=%u.\n", task_start, task_end,
3484 qedf->tasks.size);
3485
3486 /*
3487 * We need to write the number of BDs in the BDQ we've preallocated so
3488 * the f/w will do a prefetch and we'll get an unsolicited CQE when a
3489 * packet arrives.
3490 */
3491 qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3492 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3493 "Writing %d to primary and secondary BDQ doorbell registers.\n",
3494 qedf->bdq_prod_idx);
3495 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3496 readw(qedf->bdq_primary_prod);
3497 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3498 readw(qedf->bdq_secondary_prod);
3499
3500 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3501
3502 /* Now that the dev_info struct has been filled in set the MAC
3503 * address
3504 */
3505 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3506 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3507 qedf->mac);
3508
3509 /*
3510 * Set the WWNN and WWPN in the following way:
3511 *
3512 * If the info we get from qed is non-zero then use that to set the
3513 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
3514 * on the MAC address.
3515 */
3516 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3517 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3518 "Setting WWPN and WWNN from qed dev_info.\n");
3519 qedf->wwnn = qedf->dev_info.wwnn;
3520 qedf->wwpn = qedf->dev_info.wwpn;
3521 } else {
3522 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3523 "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
3524 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3525 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3526 }
3527 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
3528 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3529
3530 sprintf(host_buf, "host_%d", host->host_no);
3531 qed_ops->common->set_name(qedf->cdev, host_buf);
3532
3533 /* Allocate cmd mgr */
3534 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3535 if (!qedf->cmd_mgr) {
3536 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3537 rc = -ENOMEM;
3538 goto err5;
3539 }
3540
3541 if (mode != QEDF_MODE_RECOVERY) {
3542 host->transportt = qedf_fc_transport_template;
3543 host->max_lun = qedf_max_lun;
3544 host->max_cmd_len = QEDF_MAX_CDB_LEN;
3545 host->can_queue = FCOE_PARAMS_NUM_TASKS;
3546 rc = scsi_add_host(host, &pdev->dev);
3547 if (rc) {
3548 QEDF_WARN(&qedf->dbg_ctx,
3549 "Error adding Scsi_Host rc=0x%x.\n", rc);
3550 goto err6;
3551 }
3552 }
3553
3554 memset(¶ms, 0, sizeof(params));
3555 params.mtu = QEDF_LL2_BUF_SIZE;
3556 ether_addr_copy(params.ll2_mac_address, qedf->mac);
3557
3558 /* Start LL2 processing thread */
3559 snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
3560 qedf->ll2_recv_wq =
3561 create_workqueue(host_buf);
3562 if (!qedf->ll2_recv_wq) {
3563 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3564 rc = -ENOMEM;
3565 goto err7;
3566 }
3567
3568 #ifdef CONFIG_DEBUG_FS
3569 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3570 qedf_dbg_fops);
3571 #endif
3572
3573 /* Start LL2 */
3574 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3575 rc = qed_ops->ll2->start(qedf->cdev, ¶ms);
3576 if (rc) {
3577 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3578 goto err7;
3579 }
3580 set_bit(QEDF_LL2_STARTED, &qedf->flags);
3581
3582 /* Set initial FIP/FCoE VLAN to NULL */
3583 qedf->vlan_id = 0;
3584
3585 /*
3586 * No need to setup fcoe_ctlr or fc_lport objects during recovery since
3587 * they were not reaped during the unload process.
3588 */
3589 if (mode != QEDF_MODE_RECOVERY) {
3590 /* Setup imbedded fcoe controller */
3591 qedf_fcoe_ctlr_setup(qedf);
3592
3593 /* Setup lport */
3594 rc = qedf_lport_setup(qedf);
3595 if (rc) {
3596 QEDF_ERR(&(qedf->dbg_ctx),
3597 "qedf_lport_setup failed.\n");
3598 goto err7;
3599 }
3600 }
3601
3602 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3603 qedf->timer_work_queue =
3604 create_workqueue(host_buf);
3605 if (!qedf->timer_work_queue) {
3606 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3607 "workqueue.\n");
3608 rc = -ENOMEM;
3609 goto err7;
3610 }
3611
3612 /* DPC workqueue is not reaped during recovery unload */
3613 if (mode != QEDF_MODE_RECOVERY) {
3614 sprintf(host_buf, "qedf_%u_dpc",
3615 qedf->lport->host->host_no);
3616 qedf->dpc_wq = create_workqueue(host_buf);
3617 }
3618 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3619
3620 /*
3621 * GRC dump and sysfs parameters are not reaped during the recovery
3622 * unload process.
3623 */
3624 if (mode != QEDF_MODE_RECOVERY) {
3625 qedf->grcdump_size =
3626 qed_ops->common->dbg_all_data_size(qedf->cdev);
3627 if (qedf->grcdump_size) {
3628 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3629 qedf->grcdump_size);
3630 if (rc) {
3631 QEDF_ERR(&(qedf->dbg_ctx),
3632 "GRC Dump buffer alloc failed.\n");
3633 qedf->grcdump = NULL;
3634 }
3635
3636 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3637 "grcdump: addr=%p, size=%u.\n",
3638 qedf->grcdump, qedf->grcdump_size);
3639 }
3640 qedf_create_sysfs_ctx_attr(qedf);
3641
3642 /* Initialize I/O tracing for this adapter */
3643 spin_lock_init(&qedf->io_trace_lock);
3644 qedf->io_trace_idx = 0;
3645 }
3646
3647 init_completion(&qedf->flogi_compl);
3648
3649 status = qed_ops->common->update_drv_state(qedf->cdev, true);
3650 if (status)
3651 QEDF_ERR(&(qedf->dbg_ctx),
3652 "Failed to send drv state to MFW.\n");
3653
3654 memset(&link_params, 0, sizeof(struct qed_link_params));
3655 link_params.link_up = true;
3656 status = qed_ops->common->set_link(qedf->cdev, &link_params);
3657 if (status)
3658 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3659
3660 /* Start/restart discovery */
3661 if (mode == QEDF_MODE_RECOVERY)
3662 fcoe_ctlr_link_up(&qedf->ctlr);
3663 else
3664 fc_fabric_login(lport);
3665
3666 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3667
3668 clear_bit(QEDF_PROBING, &qedf->flags);
3669
3670 /* All good */
3671 return 0;
3672
3673 err7:
3674 if (qedf->ll2_recv_wq)
3675 destroy_workqueue(qedf->ll2_recv_wq);
3676 fc_remove_host(qedf->lport->host);
3677 scsi_remove_host(qedf->lport->host);
3678 #ifdef CONFIG_DEBUG_FS
3679 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3680 #endif
3681 err6:
3682 qedf_cmd_mgr_free(qedf->cmd_mgr);
3683 err5:
3684 qed_ops->stop(qedf->cdev);
3685 err4:
3686 qedf_free_fcoe_pf_param(qedf);
3687 qedf_sync_free_irqs(qedf);
3688 err3:
3689 qed_ops->common->slowpath_stop(qedf->cdev);
3690 err2:
3691 qed_ops->common->remove(qedf->cdev);
3692 err1:
3693 scsi_host_put(lport->host);
3694 err0:
3695 return rc;
3696 }
3697
qedf_probe(struct pci_dev * pdev,const struct pci_device_id * id)3698 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3699 {
3700 return __qedf_probe(pdev, QEDF_MODE_NORMAL);
3701 }
3702
__qedf_remove(struct pci_dev * pdev,int mode)3703 static void __qedf_remove(struct pci_dev *pdev, int mode)
3704 {
3705 struct qedf_ctx *qedf;
3706 int rc;
3707
3708 if (!pdev) {
3709 QEDF_ERR(NULL, "pdev is NULL.\n");
3710 return;
3711 }
3712
3713 qedf = pci_get_drvdata(pdev);
3714
3715 /*
3716 * Prevent race where we're in board disable work and then try to
3717 * rmmod the module.
3718 */
3719 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3720 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3721 return;
3722 }
3723
3724 if (mode != QEDF_MODE_RECOVERY)
3725 set_bit(QEDF_UNLOADING, &qedf->flags);
3726
3727 /* Logoff the fabric to upload all connections */
3728 if (mode == QEDF_MODE_RECOVERY)
3729 fcoe_ctlr_link_down(&qedf->ctlr);
3730 else
3731 fc_fabric_logoff(qedf->lport);
3732
3733 if (qedf_wait_for_upload(qedf) == false)
3734 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3735
3736 #ifdef CONFIG_DEBUG_FS
3737 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3738 #endif
3739
3740 /* Stop any link update handling */
3741 cancel_delayed_work_sync(&qedf->link_update);
3742 destroy_workqueue(qedf->link_update_wq);
3743 qedf->link_update_wq = NULL;
3744
3745 if (qedf->timer_work_queue)
3746 destroy_workqueue(qedf->timer_work_queue);
3747
3748 /* Stop Light L2 */
3749 clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3750 qed_ops->ll2->stop(qedf->cdev);
3751 if (qedf->ll2_recv_wq)
3752 destroy_workqueue(qedf->ll2_recv_wq);
3753
3754 /* Stop fastpath */
3755 qedf_sync_free_irqs(qedf);
3756 qedf_destroy_sb(qedf);
3757
3758 /*
3759 * During recovery don't destroy OS constructs that represent the
3760 * physical port.
3761 */
3762 if (mode != QEDF_MODE_RECOVERY) {
3763 qedf_free_grc_dump_buf(&qedf->grcdump);
3764 qedf_remove_sysfs_ctx_attr(qedf);
3765
3766 /* Remove all SCSI/libfc/libfcoe structures */
3767 fcoe_ctlr_destroy(&qedf->ctlr);
3768 fc_lport_destroy(qedf->lport);
3769 fc_remove_host(qedf->lport->host);
3770 scsi_remove_host(qedf->lport->host);
3771 }
3772
3773 qedf_cmd_mgr_free(qedf->cmd_mgr);
3774
3775 if (mode != QEDF_MODE_RECOVERY) {
3776 fc_exch_mgr_free(qedf->lport);
3777 fc_lport_free_stats(qedf->lport);
3778
3779 /* Wait for all vports to be reaped */
3780 qedf_wait_for_vport_destroy(qedf);
3781 }
3782
3783 /*
3784 * Now that all connections have been uploaded we can stop the
3785 * rest of the qed operations
3786 */
3787 qed_ops->stop(qedf->cdev);
3788
3789 if (mode != QEDF_MODE_RECOVERY) {
3790 if (qedf->dpc_wq) {
3791 /* Stop general DPC handling */
3792 destroy_workqueue(qedf->dpc_wq);
3793 qedf->dpc_wq = NULL;
3794 }
3795 }
3796
3797 /* Final shutdown for the board */
3798 qedf_free_fcoe_pf_param(qedf);
3799 if (mode != QEDF_MODE_RECOVERY) {
3800 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3801 pci_set_drvdata(pdev, NULL);
3802 }
3803
3804 rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3805 if (rc)
3806 QEDF_ERR(&(qedf->dbg_ctx),
3807 "Failed to send drv state to MFW.\n");
3808
3809 qed_ops->common->slowpath_stop(qedf->cdev);
3810 qed_ops->common->remove(qedf->cdev);
3811
3812 mempool_destroy(qedf->io_mempool);
3813
3814 /* Only reap the Scsi_host on a real removal */
3815 if (mode != QEDF_MODE_RECOVERY)
3816 scsi_host_put(qedf->lport->host);
3817 }
3818
qedf_remove(struct pci_dev * pdev)3819 static void qedf_remove(struct pci_dev *pdev)
3820 {
3821 /* Check to make sure this function wasn't already disabled */
3822 if (!atomic_read(&pdev->enable_cnt))
3823 return;
3824
3825 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3826 }
3827
qedf_wq_grcdump(struct work_struct * work)3828 void qedf_wq_grcdump(struct work_struct *work)
3829 {
3830 struct qedf_ctx *qedf =
3831 container_of(work, struct qedf_ctx, grcdump_work.work);
3832
3833 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3834 qedf_capture_grc_dump(qedf);
3835 }
3836
qedf_schedule_hw_err_handler(void * dev,enum qed_hw_err_type err_type)3837 void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
3838 {
3839 struct qedf_ctx *qedf = dev;
3840
3841 QEDF_ERR(&(qedf->dbg_ctx),
3842 "Hardware error handler scheduled, event=%d.\n",
3843 err_type);
3844
3845 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3846 QEDF_ERR(&(qedf->dbg_ctx),
3847 "Already in recovery, not scheduling board disable work.\n");
3848 return;
3849 }
3850
3851 switch (err_type) {
3852 case QED_HW_ERR_FAN_FAIL:
3853 schedule_delayed_work(&qedf->board_disable_work, 0);
3854 break;
3855 case QED_HW_ERR_MFW_RESP_FAIL:
3856 case QED_HW_ERR_HW_ATTN:
3857 case QED_HW_ERR_DMAE_FAIL:
3858 case QED_HW_ERR_FW_ASSERT:
3859 /* Prevent HW attentions from being reasserted */
3860 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3861 break;
3862 case QED_HW_ERR_RAMROD_FAIL:
3863 /* Prevent HW attentions from being reasserted */
3864 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3865
3866 if (qedf_enable_recovery)
3867 qed_ops->common->recovery_process(qedf->cdev);
3868
3869 break;
3870 default:
3871 break;
3872 }
3873 }
3874
3875 /*
3876 * Protocol TLV handler
3877 */
qedf_get_protocol_tlv_data(void * dev,void * data)3878 void qedf_get_protocol_tlv_data(void *dev, void *data)
3879 {
3880 struct qedf_ctx *qedf = dev;
3881 struct qed_mfw_tlv_fcoe *fcoe = data;
3882 struct fc_lport *lport;
3883 struct Scsi_Host *host;
3884 struct fc_host_attrs *fc_host;
3885 struct fc_host_statistics *hst;
3886
3887 if (!qedf) {
3888 QEDF_ERR(NULL, "qedf is null.\n");
3889 return;
3890 }
3891
3892 if (test_bit(QEDF_PROBING, &qedf->flags)) {
3893 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3894 return;
3895 }
3896
3897 lport = qedf->lport;
3898 host = lport->host;
3899 fc_host = shost_to_fc_host(host);
3900
3901 /* Force a refresh of the fc_host stats including offload stats */
3902 hst = qedf_fc_get_host_stats(host);
3903
3904 fcoe->qos_pri_set = true;
3905 fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
3906
3907 fcoe->ra_tov_set = true;
3908 fcoe->ra_tov = lport->r_a_tov;
3909
3910 fcoe->ed_tov_set = true;
3911 fcoe->ed_tov = lport->e_d_tov;
3912
3913 fcoe->npiv_state_set = true;
3914 fcoe->npiv_state = 1; /* NPIV always enabled */
3915
3916 fcoe->num_npiv_ids_set = true;
3917 fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
3918
3919 /* Certain attributes we only want to set if we've selected an FCF */
3920 if (qedf->ctlr.sel_fcf) {
3921 fcoe->switch_name_set = true;
3922 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3923 }
3924
3925 fcoe->port_state_set = true;
3926 /* For qedf we're either link down or fabric attach */
3927 if (lport->link_up)
3928 fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
3929 else
3930 fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
3931
3932 fcoe->link_failures_set = true;
3933 fcoe->link_failures = (u16)hst->link_failure_count;
3934
3935 fcoe->fcoe_txq_depth_set = true;
3936 fcoe->fcoe_rxq_depth_set = true;
3937 fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
3938 fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
3939
3940 fcoe->fcoe_rx_frames_set = true;
3941 fcoe->fcoe_rx_frames = hst->rx_frames;
3942
3943 fcoe->fcoe_tx_frames_set = true;
3944 fcoe->fcoe_tx_frames = hst->tx_frames;
3945
3946 fcoe->fcoe_rx_bytes_set = true;
3947 fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
3948
3949 fcoe->fcoe_tx_bytes_set = true;
3950 fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
3951
3952 fcoe->crc_count_set = true;
3953 fcoe->crc_count = hst->invalid_crc_count;
3954
3955 fcoe->tx_abts_set = true;
3956 fcoe->tx_abts = hst->fcp_packet_aborts;
3957
3958 fcoe->tx_lun_rst_set = true;
3959 fcoe->tx_lun_rst = qedf->lun_resets;
3960
3961 fcoe->abort_task_sets_set = true;
3962 fcoe->abort_task_sets = qedf->packet_aborts;
3963
3964 fcoe->scsi_busy_set = true;
3965 fcoe->scsi_busy = qedf->busy;
3966
3967 fcoe->scsi_tsk_full_set = true;
3968 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3969 }
3970
3971 /* Deferred work function to perform soft context reset on STAG change */
qedf_stag_change_work(struct work_struct * work)3972 void qedf_stag_change_work(struct work_struct *work)
3973 {
3974 struct qedf_ctx *qedf =
3975 container_of(work, struct qedf_ctx, stag_work.work);
3976
3977 if (!qedf) {
3978 QEDF_ERR(NULL, "qedf is NULL");
3979 return;
3980 }
3981 QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
3982 qedf_ctx_soft_reset(qedf->lport);
3983 }
3984
qedf_shutdown(struct pci_dev * pdev)3985 static void qedf_shutdown(struct pci_dev *pdev)
3986 {
3987 __qedf_remove(pdev, QEDF_MODE_NORMAL);
3988 }
3989
3990 /*
3991 * Recovery handler code
3992 */
qedf_schedule_recovery_handler(void * dev)3993 static void qedf_schedule_recovery_handler(void *dev)
3994 {
3995 struct qedf_ctx *qedf = dev;
3996
3997 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
3998 schedule_delayed_work(&qedf->recovery_work, 0);
3999 }
4000
qedf_recovery_handler(struct work_struct * work)4001 static void qedf_recovery_handler(struct work_struct *work)
4002 {
4003 struct qedf_ctx *qedf =
4004 container_of(work, struct qedf_ctx, recovery_work.work);
4005
4006 if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4007 return;
4008
4009 /*
4010 * Call common_ops->recovery_prolog to allow the MFW to quiesce
4011 * any PCI transactions.
4012 */
4013 qed_ops->common->recovery_prolog(qedf->cdev);
4014
4015 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4016 __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4017 /*
4018 * Reset link and dcbx to down state since we will not get a link down
4019 * event from the MFW but calling __qedf_remove will essentially be a
4020 * link down event.
4021 */
4022 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4023 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4024 __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4025 clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4026 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4027 }
4028
4029 /* Generic TLV data callback */
qedf_get_generic_tlv_data(void * dev,struct qed_generic_tlvs * data)4030 void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
4031 {
4032 struct qedf_ctx *qedf;
4033
4034 if (!dev) {
4035 QEDF_INFO(NULL, QEDF_LOG_EVT,
4036 "dev is NULL so ignoring get_generic_tlv_data request.\n");
4037 return;
4038 }
4039 qedf = (struct qedf_ctx *)dev;
4040
4041 memset(data, 0, sizeof(struct qed_generic_tlvs));
4042 ether_addr_copy(data->mac[0], qedf->mac);
4043 }
4044
4045 /*
4046 * Module Init/Remove
4047 */
4048
qedf_init(void)4049 static int __init qedf_init(void)
4050 {
4051 int ret;
4052
4053 /* If debug=1 passed, set the default log mask */
4054 if (qedf_debug == QEDF_LOG_DEFAULT)
4055 qedf_debug = QEDF_DEFAULT_LOG_MASK;
4056
4057 /*
4058 * Check that default prio for FIP/FCoE traffic is between 0..7 if a
4059 * value has been set
4060 */
4061 if (qedf_default_prio > -1)
4062 if (qedf_default_prio > 7) {
4063 qedf_default_prio = QEDF_DEFAULT_PRIO;
4064 QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n",
4065 QEDF_DEFAULT_PRIO);
4066 }
4067
4068 /* Print driver banner */
4069 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
4070 QEDF_VERSION);
4071
4072 /* Create kmem_cache for qedf_io_work structs */
4073 qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
4074 sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
4075 if (qedf_io_work_cache == NULL) {
4076 QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
4077 goto err1;
4078 }
4079 QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
4080 qedf_io_work_cache);
4081
4082 qed_ops = qed_get_fcoe_ops();
4083 if (!qed_ops) {
4084 QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
4085 goto err1;
4086 }
4087
4088 #ifdef CONFIG_DEBUG_FS
4089 qedf_dbg_init("qedf");
4090 #endif
4091
4092 qedf_fc_transport_template =
4093 fc_attach_transport(&qedf_fc_transport_fn);
4094 if (!qedf_fc_transport_template) {
4095 QEDF_ERR(NULL, "Could not register with FC transport\n");
4096 goto err2;
4097 }
4098
4099 qedf_fc_vport_transport_template =
4100 fc_attach_transport(&qedf_fc_vport_transport_fn);
4101 if (!qedf_fc_vport_transport_template) {
4102 QEDF_ERR(NULL, "Could not register vport template with FC "
4103 "transport\n");
4104 goto err3;
4105 }
4106
4107 qedf_io_wq = create_workqueue("qedf_io_wq");
4108 if (!qedf_io_wq) {
4109 QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
4110 goto err4;
4111 }
4112
4113 qedf_cb_ops.get_login_failures = qedf_get_login_failures;
4114
4115 ret = pci_register_driver(&qedf_pci_driver);
4116 if (ret) {
4117 QEDF_ERR(NULL, "Failed to register driver\n");
4118 goto err5;
4119 }
4120
4121 return 0;
4122
4123 err5:
4124 destroy_workqueue(qedf_io_wq);
4125 err4:
4126 fc_release_transport(qedf_fc_vport_transport_template);
4127 err3:
4128 fc_release_transport(qedf_fc_transport_template);
4129 err2:
4130 #ifdef CONFIG_DEBUG_FS
4131 qedf_dbg_exit();
4132 #endif
4133 qed_put_fcoe_ops();
4134 err1:
4135 return -EINVAL;
4136 }
4137
qedf_cleanup(void)4138 static void __exit qedf_cleanup(void)
4139 {
4140 pci_unregister_driver(&qedf_pci_driver);
4141
4142 destroy_workqueue(qedf_io_wq);
4143
4144 fc_release_transport(qedf_fc_vport_transport_template);
4145 fc_release_transport(qedf_fc_transport_template);
4146 #ifdef CONFIG_DEBUG_FS
4147 qedf_dbg_exit();
4148 #endif
4149 qed_put_fcoe_ops();
4150
4151 kmem_cache_destroy(qedf_io_work_cache);
4152 }
4153
4154 MODULE_LICENSE("GPL");
4155 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
4156 MODULE_AUTHOR("QLogic Corporation");
4157 MODULE_VERSION(QEDF_VERSION);
4158 module_init(qedf_init);
4159 module_exit(qedf_cleanup);
4160