• Home
  • Raw
  • Download

Lines Matching refs:fnic

30 static void fnic_set_eth_mode(struct fnic *);
31 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
32 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
33 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
34 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
35 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
39 struct fnic *fnic = container_of(work, struct fnic, link_work); in fnic_handle_link() local
45 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_link()
47 fnic->link_events = 1; /* less work to just set everytime*/ in fnic_handle_link()
49 if (fnic->stop_rx_link_events) { in fnic_handle_link()
50 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
54 old_link_down_cnt = fnic->link_down_cnt; in fnic_handle_link()
55 old_link_status = fnic->link_status; in fnic_handle_link()
57 &fnic->fnic_stats.misc_stats.current_port_speed); in fnic_handle_link()
59 fnic->link_status = vnic_dev_link_status(fnic->vdev); in fnic_handle_link()
60 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); in fnic_handle_link()
62 new_port_speed = vnic_dev_port_speed(fnic->vdev); in fnic_handle_link()
63 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, in fnic_handle_link()
66 FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, in fnic_handle_link()
70 switch (vnic_dev_port_speed(fnic->vdev)) { in fnic_handle_link()
72 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; in fnic_handle_link()
73 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; in fnic_handle_link()
76 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; in fnic_handle_link()
77 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; in fnic_handle_link()
80 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; in fnic_handle_link()
81 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; in fnic_handle_link()
85 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; in fnic_handle_link()
86 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; in fnic_handle_link()
89 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; in fnic_handle_link()
90 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; in fnic_handle_link()
93 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; in fnic_handle_link()
94 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; in fnic_handle_link()
98 if (old_link_status == fnic->link_status) { in fnic_handle_link()
99 if (!fnic->link_status) { in fnic_handle_link()
101 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
102 fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_handle_link()
106 if (old_link_down_cnt != fnic->link_down_cnt) { in fnic_handle_link()
108 fnic->lport->host_stats.link_failure_count++; in fnic_handle_link()
109 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
111 fnic->lport->host->host_no, in fnic_handle_link()
116 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
118 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_link()
119 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
122 fnic->lport->host->host_no, in fnic_handle_link()
128 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_link()
131 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
133 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_handle_link()
136 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
138 fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
143 } else if (fnic->link_status) { in fnic_handle_link()
145 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
146 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
149 fnic->lport->host->host_no, in fnic_handle_link()
152 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_link()
155 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); in fnic_handle_link()
156 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
158 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_handle_link()
161 fnic->lport->host_stats.link_failure_count++; in fnic_handle_link()
162 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
163 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); in fnic_handle_link()
165 fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
168 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
169 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
171 del_timer_sync(&fnic->fip_timer); in fnic_handle_link()
173 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_link()
183 struct fnic *fnic = container_of(work, struct fnic, frame_work); in fnic_handle_frame() local
184 struct fc_lport *lp = fnic->lport; in fnic_handle_frame()
189 while ((skb = skb_dequeue(&fnic->frame_queue))) { in fnic_handle_frame()
191 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_frame()
192 if (fnic->stop_rx_link_events) { in fnic_handle_frame()
193 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
203 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_frame()
204 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_frame()
205 skb_queue_head(&fnic->frame_queue, skb); in fnic_handle_frame()
206 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
209 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
215 void fnic_fcoe_evlist_free(struct fnic *fnic) in fnic_fcoe_evlist_free() argument
221 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
222 if (list_empty(&fnic->evlist)) { in fnic_fcoe_evlist_free()
223 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
227 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_fcoe_evlist_free()
231 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
236 struct fnic *fnic = container_of(work, struct fnic, event_work); in fnic_handle_event() local
241 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_event()
242 if (list_empty(&fnic->evlist)) { in fnic_handle_event()
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
247 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_handle_event()
248 if (fnic->stop_rx_link_events) { in fnic_handle_event()
251 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
258 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_event()
259 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_event()
260 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
267 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
268 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_event()
269 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_event()
272 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_event()
274 fnic_fcoe_start_fcf_disc(fnic); in fnic_handle_event()
277 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_event()
283 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
359 static void fnic_fcoe_send_vlan_req(struct fnic *fnic) in fnic_fcoe_send_vlan_req() argument
361 struct fcoe_ctlr *fip = &fnic->ctlr; in fnic_fcoe_send_vlan_req()
362 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcoe_send_vlan_req()
368 fnic_fcoe_reset_vlans(fnic); in fnic_fcoe_send_vlan_req()
369 fnic->set_vlan(fnic, 0); in fnic_fcoe_send_vlan_req()
372 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_send_vlan_req()
409 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); in fnic_fcoe_send_vlan_req()
412 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) in fnic_fcoe_process_vlan_resp() argument
414 struct fcoe_ctlr *fip = &fnic->ctlr; in fnic_fcoe_process_vlan_resp()
417 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcoe_process_vlan_resp()
425 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
430 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
435 fnic_fcoe_reset_vlans(fnic); in fnic_fcoe_process_vlan_resp()
436 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
443 shost_printk(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
448 spin_unlock_irqrestore(&fnic->vlans_lock, in fnic_fcoe_process_vlan_resp()
454 list_add_tail(&vlan->list, &fnic->vlans); in fnic_fcoe_process_vlan_resp()
462 if (list_empty(&fnic->vlans)) { in fnic_fcoe_process_vlan_resp()
465 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
467 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
471 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_process_vlan_resp()
472 fnic->set_vlan(fnic, vlan->vid); in fnic_fcoe_process_vlan_resp()
475 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
481 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_fcoe_process_vlan_resp()
486 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) in fnic_fcoe_start_fcf_disc() argument
492 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_start_fcf_disc()
493 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_start_fcf_disc()
494 fnic->set_vlan(fnic, vlan->vid); in fnic_fcoe_start_fcf_disc()
497 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_start_fcf_disc()
500 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_fcoe_start_fcf_disc()
503 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_fcoe_start_fcf_disc()
506 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) in fnic_fcoe_vlan_check() argument
511 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
512 if (list_empty(&fnic->vlans)) { in fnic_fcoe_vlan_check()
513 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
517 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_vlan_check()
519 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
525 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
528 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
532 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) in fnic_event_enq() argument
541 fevt->fnic = fnic; in fnic_event_enq()
544 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_event_enq()
545 list_add_tail(&fevt->list, &fnic->evlist); in fnic_event_enq()
546 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_event_enq()
548 schedule_work(&fnic->event_work); in fnic_event_enq()
551 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) in fnic_fcoe_handle_fip_frame() argument
575 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) in fnic_fcoe_handle_fip_frame()
581 fnic_fcoe_process_vlan_resp(fnic, skb); in fnic_fcoe_handle_fip_frame()
585 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_fcoe_handle_fip_frame()
595 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); in fnic_handle_fip_frame() local
596 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_handle_fip_frame()
601 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { in fnic_handle_fip_frame()
602 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
603 if (fnic->stop_rx_link_events) { in fnic_handle_fip_frame()
604 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
612 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_fip_frame()
613 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_fip_frame()
614 skb_queue_head(&fnic->fip_frame_queue, skb); in fnic_handle_fip_frame()
615 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
622 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { in fnic_handle_fip_frame()
630 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { in fnic_handle_fip_frame()
633 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_frame()
635 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_fip_frame()
637 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_fip_frame()
641 fcoe_ctlr_recv(&fnic->ctlr, skb); in fnic_handle_fip_frame()
652 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) in fnic_import_rq_eth_pkt() argument
669 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { in fnic_import_rq_eth_pkt()
675 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_import_rq_eth_pkt()
679 skb_queue_tail(&fnic->fip_frame_queue, skb); in fnic_import_rq_eth_pkt()
680 queue_work(fnic_fip_queue, &fnic->fip_frame_work); in fnic_import_rq_eth_pkt()
714 void fnic_update_mac_locked(struct fnic *fnic, u8 *new) in fnic_update_mac_locked() argument
716 u8 *ctl = fnic->ctlr.ctl_src_addr; in fnic_update_mac_locked()
717 u8 *data = fnic->data_src_addr; in fnic_update_mac_locked()
723 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); in fnic_update_mac_locked()
725 vnic_dev_del_addr(fnic->vdev, data); in fnic_update_mac_locked()
728 vnic_dev_add_addr(fnic->vdev, new); in fnic_update_mac_locked()
738 struct fnic *fnic = lport_priv(lport); in fnic_update_mac() local
740 spin_lock_irq(&fnic->fnic_lock); in fnic_update_mac()
741 fnic_update_mac_locked(fnic, new); in fnic_update_mac()
742 spin_unlock_irq(&fnic->fnic_lock); in fnic_update_mac()
761 struct fnic *fnic = lport_priv(lport); in fnic_set_port_id() local
773 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); in fnic_set_port_id()
774 fnic_set_eth_mode(fnic); in fnic_set_port_id()
782 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); in fnic_set_port_id()
788 spin_lock_irq(&fnic->fnic_lock); in fnic_set_port_id()
789 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) in fnic_set_port_id()
790 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; in fnic_set_port_id()
792 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_set_port_id()
795 fnic_state_to_str(fnic->state)); in fnic_set_port_id()
796 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
799 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
805 ret = fnic_flogi_reg_handler(fnic, port_id); in fnic_set_port_id()
808 spin_lock_irq(&fnic->fnic_lock); in fnic_set_port_id()
809 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) in fnic_set_port_id()
810 fnic->state = FNIC_IN_ETH_MODE; in fnic_set_port_id()
811 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
820 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_rq_cmpl_frame_recv() local
823 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_rq_cmpl_frame_recv()
838 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_rq_cmpl_frame_recv()
872 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
876 if (fnic_import_rq_eth_pkt(fnic, skb)) in fnic_rq_cmpl_frame_recv()
881 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
888 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
897 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
898 if (fnic->stop_rx_link_events) { in fnic_rq_cmpl_frame_recv()
899 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
902 fr_dev(fp) = fnic->lport; in fnic_rq_cmpl_frame_recv()
903 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
904 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, in fnic_rq_cmpl_frame_recv()
909 skb_queue_tail(&fnic->frame_queue, skb); in fnic_rq_cmpl_frame_recv()
910 queue_work(fnic_event_queue, &fnic->frame_work); in fnic_rq_cmpl_frame_recv()
922 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_rq_cmpl_handler_cont() local
924 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, in fnic_rq_cmpl_handler_cont()
930 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) in fnic_rq_cmpl_handler() argument
936 for (i = 0; i < fnic->rq_count; i++) { in fnic_rq_cmpl_handler()
937 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, in fnic_rq_cmpl_handler()
941 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_rq_cmpl_handler()
943 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rq_cmpl_handler()
960 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_alloc_rq_frame() local
969 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_alloc_rq_frame()
977 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); in fnic_alloc_rq_frame()
978 if (dma_mapping_error(&fnic->pdev->dev, pa)) { in fnic_alloc_rq_frame()
995 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_free_rq_buf() local
997 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_free_rq_buf()
1011 struct fnic *fnic = fnic_from_ctlr(fip); in fnic_eth_send() local
1012 struct vnic_wq *wq = &fnic->wq[0]; in fnic_eth_send()
1018 if (!fnic->vlan_hw_insert) { in fnic_eth_send()
1024 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); in fnic_eth_send()
1025 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_eth_send()
1030 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_eth_send()
1036 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len, in fnic_eth_send()
1038 if (dma_mapping_error(&fnic->pdev->dev, pa)) { in fnic_eth_send()
1043 spin_lock_irqsave(&fnic->wq_lock[0], flags); in fnic_eth_send()
1049 fnic->vlan_id, 1); in fnic_eth_send()
1050 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_eth_send()
1054 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_eth_send()
1055 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE); in fnic_eth_send()
1063 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) in fnic_send_frame() argument
1065 struct vnic_wq *wq = &fnic->wq[0]; in fnic_send_frame()
1080 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) in fnic_send_frame()
1083 if (!fnic->vlan_hw_insert) { in fnic_send_frame()
1089 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); in fnic_send_frame()
1098 if (fnic->ctlr.map_dest) in fnic_send_frame()
1101 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); in fnic_send_frame()
1102 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); in fnic_send_frame()
1112 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); in fnic_send_frame()
1113 if (dma_mapping_error(&fnic->pdev->dev, pa)) { in fnic_send_frame()
1119 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, in fnic_send_frame()
1124 spin_lock_irqsave(&fnic->wq_lock[0], flags); in fnic_send_frame()
1127 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); in fnic_send_frame()
1134 fnic->vlan_id, 1, 1, 1); in fnic_send_frame()
1137 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_send_frame()
1152 struct fnic *fnic = lport_priv(lp); in fnic_send() local
1155 if (fnic->in_remove) { in fnic_send()
1164 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_send()
1165 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { in fnic_send()
1166 skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); in fnic_send()
1167 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_send()
1170 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_send()
1172 return fnic_send_frame(fnic, fp); in fnic_send()
1185 void fnic_flush_tx(struct fnic *fnic) in fnic_flush_tx() argument
1190 while ((skb = skb_dequeue(&fnic->tx_queue))) { in fnic_flush_tx()
1192 fnic_send_frame(fnic, fp); in fnic_flush_tx()
1202 static void fnic_set_eth_mode(struct fnic *fnic) in fnic_set_eth_mode() argument
1208 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1210 old_state = fnic->state; in fnic_set_eth_mode()
1215 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_set_eth_mode()
1216 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1218 ret = fnic_fw_reset_handler(fnic); in fnic_set_eth_mode()
1220 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1221 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) in fnic_set_eth_mode()
1224 fnic->state = old_state; in fnic_set_eth_mode()
1231 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1240 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_complete_frame_send() local
1242 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_wq_complete_frame_send()
1253 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_wq_cmpl_handler_cont() local
1256 spin_lock_irqsave(&fnic->wq_lock[q_number], flags); in fnic_wq_cmpl_handler_cont()
1257 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, in fnic_wq_cmpl_handler_cont()
1259 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); in fnic_wq_cmpl_handler_cont()
1264 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) in fnic_wq_cmpl_handler() argument
1269 for (i = 0; i < fnic->raw_wq_count; i++) { in fnic_wq_cmpl_handler()
1270 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], in fnic_wq_cmpl_handler()
1283 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_free_wq_buf() local
1285 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_free_wq_buf()
1292 void fnic_fcoe_reset_vlans(struct fnic *fnic) in fnic_fcoe_reset_vlans() argument
1303 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_reset_vlans()
1304 if (!list_empty(&fnic->vlans)) { in fnic_fcoe_reset_vlans()
1305 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { in fnic_fcoe_reset_vlans()
1310 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_reset_vlans()
1313 void fnic_handle_fip_timer(struct fnic *fnic) in fnic_handle_fip_timer() argument
1317 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_handle_fip_timer()
1320 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1321 if (fnic->stop_rx_link_events) { in fnic_handle_fip_timer()
1322 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1325 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1327 if (fnic->ctlr.mode == FIP_MODE_NON_FIP) in fnic_handle_fip_timer()
1330 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1331 if (list_empty(&fnic->vlans)) { in fnic_handle_fip_timer()
1332 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1336 shost_printk(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1338 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1342 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_handle_fip_timer()
1343 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1348 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1350 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1353 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1357 shost_printk(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1359 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1367 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_handle_fip_timer()
1373 if (list_empty(&fnic->vlans)) { in fnic_handle_fip_timer()
1375 spin_unlock_irqrestore(&fnic->vlans_lock, in fnic_handle_fip_timer()
1377 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_handle_fip_timer()
1380 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1384 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, in fnic_handle_fip_timer()
1386 fnic->set_vlan(fnic, vlan->vid); in fnic_handle_fip_timer()
1389 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1394 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_handle_fip_timer()