1 /*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
32 #include "fnic_io.h"
33 #include "fnic.h"
34 #include "fnic_fip.h"
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
37
38 static u8 fcoe_all_fcfs[ETH_ALEN];
39 struct workqueue_struct *fnic_fip_queue;
40 struct workqueue_struct *fnic_event_queue;
41
42 static void fnic_set_eth_mode(struct fnic *);
43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48
fnic_handle_link(struct work_struct * work)49 void fnic_handle_link(struct work_struct *work)
50 {
51 struct fnic *fnic = container_of(work, struct fnic, link_work);
52 unsigned long flags;
53 int old_link_status;
54 u32 old_link_down_cnt;
55
56 spin_lock_irqsave(&fnic->fnic_lock, flags);
57
58 if (fnic->stop_rx_link_events) {
59 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60 return;
61 }
62
63 old_link_down_cnt = fnic->link_down_cnt;
64 old_link_status = fnic->link_status;
65 fnic->link_status = vnic_dev_link_status(fnic->vdev);
66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
67
68 if (old_link_status == fnic->link_status) {
69 if (!fnic->link_status)
70 /* DOWN -> DOWN */
71 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
72 else {
73 if (old_link_down_cnt != fnic->link_down_cnt) {
74 /* UP -> DOWN -> UP */
75 fnic->lport->host_stats.link_failure_count++;
76 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
77 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
78 "link down\n");
79 fcoe_ctlr_link_down(&fnic->ctlr);
80 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
81 /* start FCoE VLAN discovery */
82 fnic_fcoe_send_vlan_req(fnic);
83 return;
84 }
85 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
86 "link up\n");
87 fcoe_ctlr_link_up(&fnic->ctlr);
88 } else
89 /* UP -> UP */
90 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
91 }
92 } else if (fnic->link_status) {
93 /* DOWN -> UP */
94 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
95 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
96 /* start FCoE VLAN discovery */
97 fnic_fcoe_send_vlan_req(fnic);
98 return;
99 }
100 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
101 fcoe_ctlr_link_up(&fnic->ctlr);
102 } else {
103 /* UP -> DOWN */
104 fnic->lport->host_stats.link_failure_count++;
105 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
106 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
107 fcoe_ctlr_link_down(&fnic->ctlr);
108 }
109
110 }
111
112 /*
113 * This function passes incoming fabric frames to libFC
114 */
fnic_handle_frame(struct work_struct * work)115 void fnic_handle_frame(struct work_struct *work)
116 {
117 struct fnic *fnic = container_of(work, struct fnic, frame_work);
118 struct fc_lport *lp = fnic->lport;
119 unsigned long flags;
120 struct sk_buff *skb;
121 struct fc_frame *fp;
122
123 while ((skb = skb_dequeue(&fnic->frame_queue))) {
124
125 spin_lock_irqsave(&fnic->fnic_lock, flags);
126 if (fnic->stop_rx_link_events) {
127 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
128 dev_kfree_skb(skb);
129 return;
130 }
131 fp = (struct fc_frame *)skb;
132
133 /*
134 * If we're in a transitional state, just re-queue and return.
135 * The queue will be serviced when we get to a stable state.
136 */
137 if (fnic->state != FNIC_IN_FC_MODE &&
138 fnic->state != FNIC_IN_ETH_MODE) {
139 skb_queue_head(&fnic->frame_queue, skb);
140 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
141 return;
142 }
143 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
144
145 fc_exch_recv(lp, fp);
146 }
147 }
148
fnic_fcoe_evlist_free(struct fnic * fnic)149 void fnic_fcoe_evlist_free(struct fnic *fnic)
150 {
151 struct fnic_event *fevt = NULL;
152 struct fnic_event *next = NULL;
153 unsigned long flags;
154
155 spin_lock_irqsave(&fnic->fnic_lock, flags);
156 if (list_empty(&fnic->evlist)) {
157 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
158 return;
159 }
160
161 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
162 list_del(&fevt->list);
163 kfree(fevt);
164 }
165 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
166 }
167
fnic_handle_event(struct work_struct * work)168 void fnic_handle_event(struct work_struct *work)
169 {
170 struct fnic *fnic = container_of(work, struct fnic, event_work);
171 struct fnic_event *fevt = NULL;
172 struct fnic_event *next = NULL;
173 unsigned long flags;
174
175 spin_lock_irqsave(&fnic->fnic_lock, flags);
176 if (list_empty(&fnic->evlist)) {
177 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
178 return;
179 }
180
181 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
182 if (fnic->stop_rx_link_events) {
183 list_del(&fevt->list);
184 kfree(fevt);
185 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
186 return;
187 }
188 /*
189 * If we're in a transitional state, just re-queue and return.
190 * The queue will be serviced when we get to a stable state.
191 */
192 if (fnic->state != FNIC_IN_FC_MODE &&
193 fnic->state != FNIC_IN_ETH_MODE) {
194 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
195 return;
196 }
197
198 list_del(&fevt->list);
199 switch (fevt->event) {
200 case FNIC_EVT_START_VLAN_DISC:
201 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
202 fnic_fcoe_send_vlan_req(fnic);
203 spin_lock_irqsave(&fnic->fnic_lock, flags);
204 break;
205 case FNIC_EVT_START_FCF_DISC:
206 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
207 "Start FCF Discovery\n");
208 fnic_fcoe_start_fcf_disc(fnic);
209 break;
210 default:
211 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
212 "Unknown event 0x%x\n", fevt->event);
213 break;
214 }
215 kfree(fevt);
216 }
217 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
218 }
219
220 /**
221 * Check if the Received FIP FLOGI frame is rejected
222 * @fip: The FCoE controller that received the frame
223 * @skb: The received FIP frame
224 *
225 * Returns non-zero if the frame is rejected with unsupported cmd with
226 * insufficient resource els explanation.
227 */
is_fnic_fip_flogi_reject(struct fcoe_ctlr * fip,struct sk_buff * skb)228 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
229 struct sk_buff *skb)
230 {
231 struct fc_lport *lport = fip->lp;
232 struct fip_header *fiph;
233 struct fc_frame_header *fh = NULL;
234 struct fip_desc *desc;
235 struct fip_encaps *els;
236 enum fip_desc_type els_dtype = 0;
237 u16 op;
238 u8 els_op;
239 u8 sub;
240
241 size_t els_len = 0;
242 size_t rlen;
243 size_t dlen = 0;
244
245 if (skb_linearize(skb))
246 return 0;
247
248 if (skb->len < sizeof(*fiph))
249 return 0;
250
251 fiph = (struct fip_header *)skb->data;
252 op = ntohs(fiph->fip_op);
253 sub = fiph->fip_subcode;
254
255 if (op != FIP_OP_LS)
256 return 0;
257
258 if (sub != FIP_SC_REP)
259 return 0;
260
261 rlen = ntohs(fiph->fip_dl_len) * 4;
262 if (rlen + sizeof(*fiph) > skb->len)
263 return 0;
264
265 desc = (struct fip_desc *)(fiph + 1);
266 dlen = desc->fip_dlen * FIP_BPW;
267
268 if (desc->fip_dtype == FIP_DT_FLOGI) {
269
270 shost_printk(KERN_DEBUG, lport->host,
271 " FIP TYPE FLOGI: fab name:%llx "
272 "vfid:%d map:%x\n",
273 fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
274 fip->sel_fcf->fc_map);
275 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
276 return 0;
277
278 els_len = dlen - sizeof(*els);
279 els = (struct fip_encaps *)desc;
280 fh = (struct fc_frame_header *)(els + 1);
281 els_dtype = desc->fip_dtype;
282
283 if (!fh)
284 return 0;
285
286 /*
287 * ELS command code, reason and explanation should be = Reject,
288 * unsupported command and insufficient resource
289 */
290 els_op = *(u8 *)(fh + 1);
291 if (els_op == ELS_LS_RJT) {
292 shost_printk(KERN_INFO, lport->host,
293 "Flogi Request Rejected by Switch\n");
294 return 1;
295 }
296 shost_printk(KERN_INFO, lport->host,
297 "Flogi Request Accepted by Switch\n");
298 }
299 return 0;
300 }
301
fnic_fcoe_send_vlan_req(struct fnic * fnic)302 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
303 {
304 struct fcoe_ctlr *fip = &fnic->ctlr;
305 struct sk_buff *skb;
306 char *eth_fr;
307 int fr_len;
308 struct fip_vlan *vlan;
309 u64 vlan_tov;
310
311 fnic_fcoe_reset_vlans(fnic);
312 fnic->set_vlan(fnic, 0);
313 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
314 "Sending VLAN request...\n");
315 skb = dev_alloc_skb(sizeof(struct fip_vlan));
316 if (!skb)
317 return;
318
319 fr_len = sizeof(*vlan);
320 eth_fr = (char *)skb->data;
321 vlan = (struct fip_vlan *)eth_fr;
322
323 memset(vlan, 0, sizeof(*vlan));
324 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
325 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
326 vlan->eth.h_proto = htons(ETH_P_FIP);
327
328 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
329 vlan->fip.fip_op = htons(FIP_OP_VLAN);
330 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
331 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
332
333 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
334 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
335 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
336
337 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
338 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
339 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
340
341 skb_put(skb, sizeof(*vlan));
342 skb->protocol = htons(ETH_P_FIP);
343 skb_reset_mac_header(skb);
344 skb_reset_network_header(skb);
345 fip->send(fip, skb);
346
347 /* set a timer so that we can retry if there no response */
348 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
349 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
350 }
351
fnic_fcoe_process_vlan_resp(struct fnic * fnic,struct sk_buff * skb)352 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
353 {
354 struct fcoe_ctlr *fip = &fnic->ctlr;
355 struct fip_header *fiph;
356 struct fip_desc *desc;
357 u16 vid;
358 size_t rlen;
359 size_t dlen;
360 struct fcoe_vlan *vlan;
361 u64 sol_time;
362 unsigned long flags;
363
364 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
365 "Received VLAN response...\n");
366
367 fiph = (struct fip_header *) skb->data;
368
369 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
370 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
371 ntohs(fiph->fip_op), fiph->fip_subcode);
372
373 rlen = ntohs(fiph->fip_dl_len) * 4;
374 fnic_fcoe_reset_vlans(fnic);
375 spin_lock_irqsave(&fnic->vlans_lock, flags);
376 desc = (struct fip_desc *)(fiph + 1);
377 while (rlen > 0) {
378 dlen = desc->fip_dlen * FIP_BPW;
379 switch (desc->fip_dtype) {
380 case FIP_DT_VLAN:
381 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
382 shost_printk(KERN_INFO, fnic->lport->host,
383 "process_vlan_resp: FIP VLAN %d\n", vid);
384 vlan = kmalloc(sizeof(*vlan),
385 GFP_ATOMIC);
386 if (!vlan) {
387 /* retry from timer */
388 spin_unlock_irqrestore(&fnic->vlans_lock,
389 flags);
390 goto out;
391 }
392 memset(vlan, 0, sizeof(struct fcoe_vlan));
393 vlan->vid = vid & 0x0fff;
394 vlan->state = FIP_VLAN_AVAIL;
395 list_add_tail(&vlan->list, &fnic->vlans);
396 break;
397 }
398 desc = (struct fip_desc *)((char *)desc + dlen);
399 rlen -= dlen;
400 }
401
402 /* any VLAN descriptors present ? */
403 if (list_empty(&fnic->vlans)) {
404 /* retry from timer */
405 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
406 "No VLAN descriptors in FIP VLAN response\n");
407 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
408 goto out;
409 }
410
411 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
412 fnic->set_vlan(fnic, vlan->vid);
413 vlan->state = FIP_VLAN_SENT; /* sent now */
414 vlan->sol_count++;
415 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
416
417 /* start the solicitation */
418 fcoe_ctlr_link_up(fip);
419
420 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
421 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
422 out:
423 return;
424 }
425
fnic_fcoe_start_fcf_disc(struct fnic * fnic)426 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
427 {
428 unsigned long flags;
429 struct fcoe_vlan *vlan;
430 u64 sol_time;
431
432 spin_lock_irqsave(&fnic->vlans_lock, flags);
433 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
434 fnic->set_vlan(fnic, vlan->vid);
435 vlan->state = FIP_VLAN_SENT; /* sent now */
436 vlan->sol_count = 1;
437 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
438
439 /* start the solicitation */
440 fcoe_ctlr_link_up(&fnic->ctlr);
441
442 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
443 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
444 }
445
fnic_fcoe_vlan_check(struct fnic * fnic,u16 flag)446 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
447 {
448 unsigned long flags;
449 struct fcoe_vlan *fvlan;
450
451 spin_lock_irqsave(&fnic->vlans_lock, flags);
452 if (list_empty(&fnic->vlans)) {
453 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
454 return -EINVAL;
455 }
456
457 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
458 if (fvlan->state == FIP_VLAN_USED) {
459 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
460 return 0;
461 }
462
463 if (fvlan->state == FIP_VLAN_SENT) {
464 fvlan->state = FIP_VLAN_USED;
465 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
466 return 0;
467 }
468 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
469 return -EINVAL;
470 }
471
fnic_event_enq(struct fnic * fnic,enum fnic_evt ev)472 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
473 {
474 struct fnic_event *fevt;
475 unsigned long flags;
476
477 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
478 if (!fevt)
479 return;
480
481 fevt->fnic = fnic;
482 fevt->event = ev;
483
484 spin_lock_irqsave(&fnic->fnic_lock, flags);
485 list_add_tail(&fevt->list, &fnic->evlist);
486 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
487
488 schedule_work(&fnic->event_work);
489 }
490
fnic_fcoe_handle_fip_frame(struct fnic * fnic,struct sk_buff * skb)491 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
492 {
493 struct fip_header *fiph;
494 int ret = 1;
495 u16 op;
496 u8 sub;
497
498 if (!skb || !(skb->data))
499 return -1;
500
501 if (skb_linearize(skb))
502 goto drop;
503
504 fiph = (struct fip_header *)skb->data;
505 op = ntohs(fiph->fip_op);
506 sub = fiph->fip_subcode;
507
508 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
509 goto drop;
510
511 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
512 goto drop;
513
514 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
515 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
516 goto drop;
517 /* pass it on to fcoe */
518 ret = 1;
519 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
520 /* set the vlan as used */
521 fnic_fcoe_process_vlan_resp(fnic, skb);
522 ret = 0;
523 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
524 /* received CVL request, restart vlan disc */
525 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
526 /* pass it on to fcoe */
527 ret = 1;
528 }
529 drop:
530 return ret;
531 }
532
fnic_handle_fip_frame(struct work_struct * work)533 void fnic_handle_fip_frame(struct work_struct *work)
534 {
535 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
536 unsigned long flags;
537 struct sk_buff *skb;
538 struct ethhdr *eh;
539
540 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
541 spin_lock_irqsave(&fnic->fnic_lock, flags);
542 if (fnic->stop_rx_link_events) {
543 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
544 dev_kfree_skb(skb);
545 return;
546 }
547 /*
548 * If we're in a transitional state, just re-queue and return.
549 * The queue will be serviced when we get to a stable state.
550 */
551 if (fnic->state != FNIC_IN_FC_MODE &&
552 fnic->state != FNIC_IN_ETH_MODE) {
553 skb_queue_head(&fnic->fip_frame_queue, skb);
554 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
555 return;
556 }
557 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
558 eh = (struct ethhdr *)skb->data;
559 if (eh->h_proto == htons(ETH_P_FIP)) {
560 skb_pull(skb, sizeof(*eh));
561 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
562 dev_kfree_skb(skb);
563 continue;
564 }
565 /*
566 * If there's FLOGI rejects - clear all
567 * fcf's & restart from scratch
568 */
569 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
570 shost_printk(KERN_INFO, fnic->lport->host,
571 "Trigger a Link down - VLAN Disc\n");
572 fcoe_ctlr_link_down(&fnic->ctlr);
573 /* start FCoE VLAN discovery */
574 fnic_fcoe_send_vlan_req(fnic);
575 dev_kfree_skb(skb);
576 continue;
577 }
578 fcoe_ctlr_recv(&fnic->ctlr, skb);
579 continue;
580 }
581 }
582 }
583
584 /**
585 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
586 * @fnic: fnic instance.
587 * @skb: Ethernet Frame.
588 */
fnic_import_rq_eth_pkt(struct fnic * fnic,struct sk_buff * skb)589 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
590 {
591 struct fc_frame *fp;
592 struct ethhdr *eh;
593 struct fcoe_hdr *fcoe_hdr;
594 struct fcoe_crc_eof *ft;
595
596 /*
597 * Undo VLAN encapsulation if present.
598 */
599 eh = (struct ethhdr *)skb->data;
600 if (eh->h_proto == htons(ETH_P_8021Q)) {
601 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
602 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
603 skb_reset_mac_header(skb);
604 }
605 if (eh->h_proto == htons(ETH_P_FIP)) {
606 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
607 printk(KERN_ERR "Dropped FIP frame, as firmware "
608 "uses non-FIP mode, Enable FIP "
609 "using UCSM\n");
610 goto drop;
611 }
612 skb_queue_tail(&fnic->fip_frame_queue, skb);
613 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
614 return 1; /* let caller know packet was used */
615 }
616 if (eh->h_proto != htons(ETH_P_FCOE))
617 goto drop;
618 skb_set_network_header(skb, sizeof(*eh));
619 skb_pull(skb, sizeof(*eh));
620
621 fcoe_hdr = (struct fcoe_hdr *)skb->data;
622 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
623 goto drop;
624
625 fp = (struct fc_frame *)skb;
626 fc_frame_init(fp);
627 fr_sof(fp) = fcoe_hdr->fcoe_sof;
628 skb_pull(skb, sizeof(struct fcoe_hdr));
629 skb_reset_transport_header(skb);
630
631 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
632 fr_eof(fp) = ft->fcoe_eof;
633 skb_trim(skb, skb->len - sizeof(*ft));
634 return 0;
635 drop:
636 dev_kfree_skb_irq(skb);
637 return -1;
638 }
639
640 /**
641 * fnic_update_mac_locked() - set data MAC address and filters.
642 * @fnic: fnic instance.
643 * @new: newly-assigned FCoE MAC address.
644 *
645 * Called with the fnic lock held.
646 */
fnic_update_mac_locked(struct fnic * fnic,u8 * new)647 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
648 {
649 u8 *ctl = fnic->ctlr.ctl_src_addr;
650 u8 *data = fnic->data_src_addr;
651
652 if (is_zero_ether_addr(new))
653 new = ctl;
654 if (!compare_ether_addr(data, new))
655 return;
656 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
657 if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
658 vnic_dev_del_addr(fnic->vdev, data);
659 memcpy(data, new, ETH_ALEN);
660 if (compare_ether_addr(new, ctl))
661 vnic_dev_add_addr(fnic->vdev, new);
662 }
663
664 /**
665 * fnic_update_mac() - set data MAC address and filters.
666 * @lport: local port.
667 * @new: newly-assigned FCoE MAC address.
668 */
fnic_update_mac(struct fc_lport * lport,u8 * new)669 void fnic_update_mac(struct fc_lport *lport, u8 *new)
670 {
671 struct fnic *fnic = lport_priv(lport);
672
673 spin_lock_irq(&fnic->fnic_lock);
674 fnic_update_mac_locked(fnic, new);
675 spin_unlock_irq(&fnic->fnic_lock);
676 }
677
678 /**
679 * fnic_set_port_id() - set the port_ID after successful FLOGI.
680 * @lport: local port.
681 * @port_id: assigned FC_ID.
682 * @fp: received frame containing the FLOGI accept or NULL.
683 *
684 * This is called from libfc when a new FC_ID has been assigned.
685 * This causes us to reset the firmware to FC_MODE and setup the new MAC
686 * address and FC_ID.
687 *
688 * It is also called with FC_ID 0 when we're logged off.
689 *
690 * If the FC_ID is due to point-to-point, fp may be NULL.
691 */
fnic_set_port_id(struct fc_lport * lport,u32 port_id,struct fc_frame * fp)692 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
693 {
694 struct fnic *fnic = lport_priv(lport);
695 u8 *mac;
696 int ret;
697
698 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
699 port_id, fp);
700
701 /*
702 * If we're clearing the FC_ID, change to use the ctl_src_addr.
703 * Set ethernet mode to send FLOGI.
704 */
705 if (!port_id) {
706 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
707 fnic_set_eth_mode(fnic);
708 return;
709 }
710
711 if (fp) {
712 mac = fr_cb(fp)->granted_mac;
713 if (is_zero_ether_addr(mac)) {
714 /* non-FIP - FLOGI already accepted - ignore return */
715 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
716 }
717 fnic_update_mac(lport, mac);
718 }
719
720 /* Change state to reflect transition to FC mode */
721 spin_lock_irq(&fnic->fnic_lock);
722 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
723 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
724 else {
725 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
726 "Unexpected fnic state %s while"
727 " processing flogi resp\n",
728 fnic_state_to_str(fnic->state));
729 spin_unlock_irq(&fnic->fnic_lock);
730 return;
731 }
732 spin_unlock_irq(&fnic->fnic_lock);
733
734 /*
735 * Send FLOGI registration to firmware to set up FC mode.
736 * The new address will be set up when registration completes.
737 */
738 ret = fnic_flogi_reg_handler(fnic, port_id);
739
740 if (ret < 0) {
741 spin_lock_irq(&fnic->fnic_lock);
742 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
743 fnic->state = FNIC_IN_ETH_MODE;
744 spin_unlock_irq(&fnic->fnic_lock);
745 }
746 }
747
fnic_rq_cmpl_frame_recv(struct vnic_rq * rq,struct cq_desc * cq_desc,struct vnic_rq_buf * buf,int skipped,void * opaque)748 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
749 *cq_desc, struct vnic_rq_buf *buf,
750 int skipped __attribute__((unused)),
751 void *opaque)
752 {
753 struct fnic *fnic = vnic_dev_priv(rq->vdev);
754 struct sk_buff *skb;
755 struct fc_frame *fp;
756 unsigned int eth_hdrs_stripped;
757 u8 type, color, eop, sop, ingress_port, vlan_stripped;
758 u8 fcoe = 0, fcoe_sof, fcoe_eof;
759 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
760 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
761 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
762 u8 fcs_ok = 1, packet_error = 0;
763 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
764 u32 rss_hash;
765 u16 exchange_id, tmpl;
766 u8 sof = 0;
767 u8 eof = 0;
768 u32 fcp_bytes_written = 0;
769 unsigned long flags;
770
771 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
772 PCI_DMA_FROMDEVICE);
773 skb = buf->os_buf;
774 fp = (struct fc_frame *)skb;
775 buf->os_buf = NULL;
776
777 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
778 if (type == CQ_DESC_TYPE_RQ_FCP) {
779 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
780 &type, &color, &q_number, &completed_index,
781 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
782 &tmpl, &fcp_bytes_written, &sof, &eof,
783 &ingress_port, &packet_error,
784 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
785 &vlan);
786 eth_hdrs_stripped = 1;
787 skb_trim(skb, fcp_bytes_written);
788 fr_sof(fp) = sof;
789 fr_eof(fp) = eof;
790
791 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
792 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
793 &type, &color, &q_number, &completed_index,
794 &ingress_port, &fcoe, &eop, &sop,
795 &rss_type, &csum_not_calc, &rss_hash,
796 &bytes_written, &packet_error,
797 &vlan_stripped, &vlan, &checksum,
798 &fcoe_sof, &fcoe_fc_crc_ok,
799 &fcoe_enc_error, &fcoe_eof,
800 &tcp_udp_csum_ok, &udp, &tcp,
801 &ipv4_csum_ok, &ipv6, &ipv4,
802 &ipv4_fragment, &fcs_ok);
803 eth_hdrs_stripped = 0;
804 skb_trim(skb, bytes_written);
805 if (!fcs_ok) {
806 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
807 "fcs error. dropping packet.\n");
808 goto drop;
809 }
810 if (fnic_import_rq_eth_pkt(fnic, skb))
811 return;
812
813 } else {
814 /* wrong CQ type*/
815 shost_printk(KERN_ERR, fnic->lport->host,
816 "fnic rq_cmpl wrong cq type x%x\n", type);
817 goto drop;
818 }
819
820 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
821 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
822 "fnic rq_cmpl fcoe x%x fcsok x%x"
823 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
824 " x%x\n",
825 fcoe, fcs_ok, packet_error,
826 fcoe_fc_crc_ok, fcoe_enc_error);
827 goto drop;
828 }
829
830 spin_lock_irqsave(&fnic->fnic_lock, flags);
831 if (fnic->stop_rx_link_events) {
832 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
833 goto drop;
834 }
835 fr_dev(fp) = fnic->lport;
836 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
837
838 skb_queue_tail(&fnic->frame_queue, skb);
839 queue_work(fnic_event_queue, &fnic->frame_work);
840
841 return;
842 drop:
843 dev_kfree_skb_irq(skb);
844 }
845
fnic_rq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)846 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
847 struct cq_desc *cq_desc, u8 type,
848 u16 q_number, u16 completed_index,
849 void *opaque)
850 {
851 struct fnic *fnic = vnic_dev_priv(vdev);
852
853 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
854 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
855 NULL);
856 return 0;
857 }
858
fnic_rq_cmpl_handler(struct fnic * fnic,int rq_work_to_do)859 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
860 {
861 unsigned int tot_rq_work_done = 0, cur_work_done;
862 unsigned int i;
863 int err;
864
865 for (i = 0; i < fnic->rq_count; i++) {
866 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
867 fnic_rq_cmpl_handler_cont,
868 NULL);
869 if (cur_work_done) {
870 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
871 if (err)
872 shost_printk(KERN_ERR, fnic->lport->host,
873 "fnic_alloc_rq_frame can't alloc"
874 " frame\n");
875 }
876 tot_rq_work_done += cur_work_done;
877 }
878
879 return tot_rq_work_done;
880 }
881
882 /*
883 * This function is called once at init time to allocate and fill RQ
884 * buffers. Subsequently, it is called in the interrupt context after RQ
885 * buffer processing to replenish the buffers in the RQ
886 */
fnic_alloc_rq_frame(struct vnic_rq * rq)887 int fnic_alloc_rq_frame(struct vnic_rq *rq)
888 {
889 struct fnic *fnic = vnic_dev_priv(rq->vdev);
890 struct sk_buff *skb;
891 u16 len;
892 dma_addr_t pa;
893
894 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
895 skb = dev_alloc_skb(len);
896 if (!skb) {
897 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
898 "Unable to allocate RQ sk_buff\n");
899 return -ENOMEM;
900 }
901 skb_reset_mac_header(skb);
902 skb_reset_transport_header(skb);
903 skb_reset_network_header(skb);
904 skb_put(skb, len);
905 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
906 fnic_queue_rq_desc(rq, skb, pa, len);
907 return 0;
908 }
909
fnic_free_rq_buf(struct vnic_rq * rq,struct vnic_rq_buf * buf)910 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
911 {
912 struct fc_frame *fp = buf->os_buf;
913 struct fnic *fnic = vnic_dev_priv(rq->vdev);
914
915 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
916 PCI_DMA_FROMDEVICE);
917
918 dev_kfree_skb(fp_skb(fp));
919 buf->os_buf = NULL;
920 }
921
922 /**
923 * fnic_eth_send() - Send Ethernet frame.
924 * @fip: fcoe_ctlr instance.
925 * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
926 */
fnic_eth_send(struct fcoe_ctlr * fip,struct sk_buff * skb)927 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
928 {
929 struct fnic *fnic = fnic_from_ctlr(fip);
930 struct vnic_wq *wq = &fnic->wq[0];
931 dma_addr_t pa;
932 struct ethhdr *eth_hdr;
933 struct vlan_ethhdr *vlan_hdr;
934 unsigned long flags;
935
936 if (!fnic->vlan_hw_insert) {
937 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
938 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
939 sizeof(*vlan_hdr) - sizeof(*eth_hdr));
940 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
941 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
942 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
943 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
944 }
945
946 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
947
948 spin_lock_irqsave(&fnic->wq_lock[0], flags);
949 if (!vnic_wq_desc_avail(wq)) {
950 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
951 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
952 kfree_skb(skb);
953 return;
954 }
955
956 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
957 0 /* hw inserts cos value */,
958 fnic->vlan_id, 1);
959 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
960 }
961
962 /*
963 * Send FC frame.
964 */
fnic_send_frame(struct fnic * fnic,struct fc_frame * fp)965 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
966 {
967 struct vnic_wq *wq = &fnic->wq[0];
968 struct sk_buff *skb;
969 dma_addr_t pa;
970 struct ethhdr *eth_hdr;
971 struct vlan_ethhdr *vlan_hdr;
972 struct fcoe_hdr *fcoe_hdr;
973 struct fc_frame_header *fh;
974 u32 tot_len, eth_hdr_len;
975 int ret = 0;
976 unsigned long flags;
977
978 fh = fc_frame_header_get(fp);
979 skb = fp_skb(fp);
980
981 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
982 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
983 return 0;
984
985 if (!fnic->vlan_hw_insert) {
986 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
987 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
988 eth_hdr = (struct ethhdr *)vlan_hdr;
989 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
990 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
991 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
992 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
993 } else {
994 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
995 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
996 eth_hdr->h_proto = htons(ETH_P_FCOE);
997 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
998 }
999
1000 if (fnic->ctlr.map_dest)
1001 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1002 else
1003 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1004 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1005
1006 tot_len = skb->len;
1007 BUG_ON(tot_len % 4);
1008
1009 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1010 fcoe_hdr->fcoe_sof = fr_sof(fp);
1011 if (FC_FCOE_VER)
1012 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1013
1014 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
1015
1016 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1017
1018 if (!vnic_wq_desc_avail(wq)) {
1019 pci_unmap_single(fnic->pdev, pa,
1020 tot_len, PCI_DMA_TODEVICE);
1021 ret = -1;
1022 goto fnic_send_frame_end;
1023 }
1024
1025 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1026 0 /* hw inserts cos value */,
1027 fnic->vlan_id, 1, 1, 1);
1028 fnic_send_frame_end:
1029 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1030
1031 if (ret)
1032 dev_kfree_skb_any(fp_skb(fp));
1033
1034 return ret;
1035 }
1036
1037 /*
1038 * fnic_send
1039 * Routine to send a raw frame
1040 */
fnic_send(struct fc_lport * lp,struct fc_frame * fp)1041 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1042 {
1043 struct fnic *fnic = lport_priv(lp);
1044 unsigned long flags;
1045
1046 if (fnic->in_remove) {
1047 dev_kfree_skb(fp_skb(fp));
1048 return -1;
1049 }
1050
1051 /*
1052 * Queue frame if in a transitional state.
1053 * This occurs while registering the Port_ID / MAC address after FLOGI.
1054 */
1055 spin_lock_irqsave(&fnic->fnic_lock, flags);
1056 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1057 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1058 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1059 return 0;
1060 }
1061 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1062
1063 return fnic_send_frame(fnic, fp);
1064 }
1065
1066 /**
1067 * fnic_flush_tx() - send queued frames.
1068 * @fnic: fnic device
1069 *
1070 * Send frames that were waiting to go out in FC or Ethernet mode.
1071 * Whenever changing modes we purge queued frames, so these frames should
1072 * be queued for the stable mode that we're in, either FC or Ethernet.
1073 *
1074 * Called without fnic_lock held.
1075 */
fnic_flush_tx(struct fnic * fnic)1076 void fnic_flush_tx(struct fnic *fnic)
1077 {
1078 struct sk_buff *skb;
1079 struct fc_frame *fp;
1080
1081 while ((skb = skb_dequeue(&fnic->tx_queue))) {
1082 fp = (struct fc_frame *)skb;
1083 fnic_send_frame(fnic, fp);
1084 }
1085 }
1086
1087 /**
1088 * fnic_set_eth_mode() - put fnic into ethernet mode.
1089 * @fnic: fnic device
1090 *
1091 * Called without fnic lock held.
1092 */
fnic_set_eth_mode(struct fnic * fnic)1093 static void fnic_set_eth_mode(struct fnic *fnic)
1094 {
1095 unsigned long flags;
1096 enum fnic_state old_state;
1097 int ret;
1098
1099 spin_lock_irqsave(&fnic->fnic_lock, flags);
1100 again:
1101 old_state = fnic->state;
1102 switch (old_state) {
1103 case FNIC_IN_FC_MODE:
1104 case FNIC_IN_ETH_TRANS_FC_MODE:
1105 default:
1106 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1107 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1108
1109 ret = fnic_fw_reset_handler(fnic);
1110
1111 spin_lock_irqsave(&fnic->fnic_lock, flags);
1112 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1113 goto again;
1114 if (ret)
1115 fnic->state = old_state;
1116 break;
1117
1118 case FNIC_IN_FC_TRANS_ETH_MODE:
1119 case FNIC_IN_ETH_MODE:
1120 break;
1121 }
1122 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1123 }
1124
fnic_wq_complete_frame_send(struct vnic_wq * wq,struct cq_desc * cq_desc,struct vnic_wq_buf * buf,void * opaque)1125 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1126 struct cq_desc *cq_desc,
1127 struct vnic_wq_buf *buf, void *opaque)
1128 {
1129 struct sk_buff *skb = buf->os_buf;
1130 struct fc_frame *fp = (struct fc_frame *)skb;
1131 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1132
1133 pci_unmap_single(fnic->pdev, buf->dma_addr,
1134 buf->len, PCI_DMA_TODEVICE);
1135 dev_kfree_skb_irq(fp_skb(fp));
1136 buf->os_buf = NULL;
1137 }
1138
fnic_wq_cmpl_handler_cont(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)1139 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1140 struct cq_desc *cq_desc, u8 type,
1141 u16 q_number, u16 completed_index,
1142 void *opaque)
1143 {
1144 struct fnic *fnic = vnic_dev_priv(vdev);
1145 unsigned long flags;
1146
1147 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1148 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1149 fnic_wq_complete_frame_send, NULL);
1150 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1151
1152 return 0;
1153 }
1154
fnic_wq_cmpl_handler(struct fnic * fnic,int work_to_do)1155 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1156 {
1157 unsigned int wq_work_done = 0;
1158 unsigned int i;
1159
1160 for (i = 0; i < fnic->raw_wq_count; i++) {
1161 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1162 work_to_do,
1163 fnic_wq_cmpl_handler_cont,
1164 NULL);
1165 }
1166
1167 return wq_work_done;
1168 }
1169
1170
fnic_free_wq_buf(struct vnic_wq * wq,struct vnic_wq_buf * buf)1171 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1172 {
1173 struct fc_frame *fp = buf->os_buf;
1174 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1175
1176 pci_unmap_single(fnic->pdev, buf->dma_addr,
1177 buf->len, PCI_DMA_TODEVICE);
1178
1179 dev_kfree_skb(fp_skb(fp));
1180 buf->os_buf = NULL;
1181 }
1182
fnic_fcoe_reset_vlans(struct fnic * fnic)1183 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1184 {
1185 unsigned long flags;
1186 struct fcoe_vlan *vlan;
1187 struct fcoe_vlan *next;
1188
1189 /*
1190 * indicate a link down to fcoe so that all fcf's are free'd
1191 * might not be required since we did this before sending vlan
1192 * discovery request
1193 */
1194 spin_lock_irqsave(&fnic->vlans_lock, flags);
1195 if (!list_empty(&fnic->vlans)) {
1196 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1197 list_del(&vlan->list);
1198 kfree(vlan);
1199 }
1200 }
1201 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1202 }
1203
fnic_handle_fip_timer(struct fnic * fnic)1204 void fnic_handle_fip_timer(struct fnic *fnic)
1205 {
1206 unsigned long flags;
1207 struct fcoe_vlan *vlan;
1208 u64 sol_time;
1209
1210 spin_lock_irqsave(&fnic->fnic_lock, flags);
1211 if (fnic->stop_rx_link_events) {
1212 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1213 return;
1214 }
1215 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1216
1217 if (fnic->ctlr.mode == FIP_ST_NON_FIP)
1218 return;
1219
1220 spin_lock_irqsave(&fnic->vlans_lock, flags);
1221 if (list_empty(&fnic->vlans)) {
1222 /* no vlans available, try again */
1223 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1224 "Start VLAN Discovery\n");
1225 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1226 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1227 return;
1228 }
1229
1230 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1231 shost_printk(KERN_DEBUG, fnic->lport->host,
1232 "fip_timer: vlan %d state %d sol_count %d\n",
1233 vlan->vid, vlan->state, vlan->sol_count);
1234 switch (vlan->state) {
1235 case FIP_VLAN_USED:
1236 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1237 "FIP VLAN is selected for FC transaction\n");
1238 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1239 break;
1240 case FIP_VLAN_FAILED:
1241 /* if all vlans are in failed state, restart vlan disc */
1242 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1243 "Start VLAN Discovery\n");
1244 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1245 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1246 break;
1247 case FIP_VLAN_SENT:
1248 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1249 /*
1250 * no response on this vlan, remove from the list.
1251 * Try the next vlan
1252 */
1253 shost_printk(KERN_INFO, fnic->lport->host,
1254 "Dequeue this VLAN ID %d from list\n",
1255 vlan->vid);
1256 list_del(&vlan->list);
1257 kfree(vlan);
1258 vlan = NULL;
1259 if (list_empty(&fnic->vlans)) {
1260 /* we exhausted all vlans, restart vlan disc */
1261 spin_unlock_irqrestore(&fnic->vlans_lock,
1262 flags);
1263 shost_printk(KERN_INFO, fnic->lport->host,
1264 "fip_timer: vlan list empty, "
1265 "trigger vlan disc\n");
1266 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1267 return;
1268 }
1269 /* check the next vlan */
1270 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1271 list);
1272 fnic->set_vlan(fnic, vlan->vid);
1273 vlan->state = FIP_VLAN_SENT; /* sent now */
1274 }
1275 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1276 vlan->sol_count++;
1277 sol_time = jiffies + msecs_to_jiffies
1278 (FCOE_CTLR_START_DELAY);
1279 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1280 break;
1281 }
1282 }
1283