Lines Matching refs:fnic
42 static void fnic_set_eth_mode(struct fnic *);
43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
51 struct fnic *fnic = container_of(work, struct fnic, link_work); in fnic_handle_link() local
57 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_link()
59 if (fnic->stop_rx_link_events) { in fnic_handle_link()
60 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
64 old_link_down_cnt = fnic->link_down_cnt; in fnic_handle_link()
65 old_link_status = fnic->link_status; in fnic_handle_link()
67 &fnic->fnic_stats.misc_stats.current_port_speed); in fnic_handle_link()
69 fnic->link_status = vnic_dev_link_status(fnic->vdev); in fnic_handle_link()
70 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); in fnic_handle_link()
72 new_port_speed = vnic_dev_port_speed(fnic->vdev); in fnic_handle_link()
73 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, in fnic_handle_link()
76 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_link()
80 switch (vnic_dev_port_speed(fnic->vdev)) { in fnic_handle_link()
82 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; in fnic_handle_link()
83 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; in fnic_handle_link()
86 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; in fnic_handle_link()
87 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; in fnic_handle_link()
90 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; in fnic_handle_link()
91 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; in fnic_handle_link()
95 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; in fnic_handle_link()
96 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; in fnic_handle_link()
99 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; in fnic_handle_link()
100 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; in fnic_handle_link()
103 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; in fnic_handle_link()
104 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; in fnic_handle_link()
108 if (old_link_status == fnic->link_status) { in fnic_handle_link()
109 if (!fnic->link_status) { in fnic_handle_link()
111 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
112 fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_handle_link()
116 if (old_link_down_cnt != fnic->link_down_cnt) { in fnic_handle_link()
118 fnic->lport->host_stats.link_failure_count++; in fnic_handle_link()
119 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
121 fnic->lport->host->host_no, in fnic_handle_link()
126 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
128 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_link()
129 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
132 fnic->lport->host->host_no, in fnic_handle_link()
138 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_link()
141 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
143 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_handle_link()
146 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
148 fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
153 } else if (fnic->link_status) { in fnic_handle_link()
155 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
156 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
159 fnic->lport->host->host_no, in fnic_handle_link()
162 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_link()
165 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); in fnic_handle_link()
166 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
168 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_handle_link()
171 fnic->lport->host_stats.link_failure_count++; in fnic_handle_link()
172 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
173 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); in fnic_handle_link()
175 fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
178 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
179 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
181 del_timer_sync(&fnic->fip_timer); in fnic_handle_link()
183 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_link()
193 struct fnic *fnic = container_of(work, struct fnic, frame_work); in fnic_handle_frame() local
194 struct fc_lport *lp = fnic->lport; in fnic_handle_frame()
199 while ((skb = skb_dequeue(&fnic->frame_queue))) { in fnic_handle_frame()
201 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_frame()
202 if (fnic->stop_rx_link_events) { in fnic_handle_frame()
203 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
213 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_frame()
214 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_frame()
215 skb_queue_head(&fnic->frame_queue, skb); in fnic_handle_frame()
216 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
219 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
225 void fnic_fcoe_evlist_free(struct fnic *fnic) in fnic_fcoe_evlist_free() argument
231 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
232 if (list_empty(&fnic->evlist)) { in fnic_fcoe_evlist_free()
233 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
237 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_fcoe_evlist_free()
241 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
246 struct fnic *fnic = container_of(work, struct fnic, event_work); in fnic_handle_event() local
251 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_event()
252 if (list_empty(&fnic->evlist)) { in fnic_handle_event()
253 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
257 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_handle_event()
258 if (fnic->stop_rx_link_events) { in fnic_handle_event()
261 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
268 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_event()
269 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_event()
270 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
277 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
278 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_event()
279 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_event()
282 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_event()
284 fnic_fcoe_start_fcf_disc(fnic); in fnic_handle_event()
287 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_event()
293 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
369 static void fnic_fcoe_send_vlan_req(struct fnic *fnic) in fnic_fcoe_send_vlan_req() argument
371 struct fcoe_ctlr *fip = &fnic->ctlr; in fnic_fcoe_send_vlan_req()
372 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcoe_send_vlan_req()
378 fnic_fcoe_reset_vlans(fnic); in fnic_fcoe_send_vlan_req()
379 fnic->set_vlan(fnic, 0); in fnic_fcoe_send_vlan_req()
382 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_send_vlan_req()
419 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); in fnic_fcoe_send_vlan_req()
422 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) in fnic_fcoe_process_vlan_resp() argument
424 struct fcoe_ctlr *fip = &fnic->ctlr; in fnic_fcoe_process_vlan_resp()
427 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcoe_process_vlan_resp()
435 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
440 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
445 fnic_fcoe_reset_vlans(fnic); in fnic_fcoe_process_vlan_resp()
446 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
453 shost_printk(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
458 spin_unlock_irqrestore(&fnic->vlans_lock, in fnic_fcoe_process_vlan_resp()
464 list_add_tail(&vlan->list, &fnic->vlans); in fnic_fcoe_process_vlan_resp()
472 if (list_empty(&fnic->vlans)) { in fnic_fcoe_process_vlan_resp()
475 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
477 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
481 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_process_vlan_resp()
482 fnic->set_vlan(fnic, vlan->vid); in fnic_fcoe_process_vlan_resp()
485 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
491 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_fcoe_process_vlan_resp()
496 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) in fnic_fcoe_start_fcf_disc() argument
502 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_start_fcf_disc()
503 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_start_fcf_disc()
504 fnic->set_vlan(fnic, vlan->vid); in fnic_fcoe_start_fcf_disc()
507 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_start_fcf_disc()
510 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_fcoe_start_fcf_disc()
513 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_fcoe_start_fcf_disc()
516 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) in fnic_fcoe_vlan_check() argument
521 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
522 if (list_empty(&fnic->vlans)) { in fnic_fcoe_vlan_check()
523 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
527 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_vlan_check()
529 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
535 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
538 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
542 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) in fnic_event_enq() argument
551 fevt->fnic = fnic; in fnic_event_enq()
554 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_event_enq()
555 list_add_tail(&fevt->list, &fnic->evlist); in fnic_event_enq()
556 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_event_enq()
558 schedule_work(&fnic->event_work); in fnic_event_enq()
561 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) in fnic_fcoe_handle_fip_frame() argument
585 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) in fnic_fcoe_handle_fip_frame()
591 fnic_fcoe_process_vlan_resp(fnic, skb); in fnic_fcoe_handle_fip_frame()
595 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_fcoe_handle_fip_frame()
605 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); in fnic_handle_fip_frame() local
606 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_handle_fip_frame()
611 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { in fnic_handle_fip_frame()
612 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
613 if (fnic->stop_rx_link_events) { in fnic_handle_fip_frame()
614 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
622 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_fip_frame()
623 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_fip_frame()
624 skb_queue_head(&fnic->fip_frame_queue, skb); in fnic_handle_fip_frame()
625 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
628 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
632 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { in fnic_handle_fip_frame()
640 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { in fnic_handle_fip_frame()
643 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_frame()
645 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_fip_frame()
647 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_fip_frame()
651 fcoe_ctlr_recv(&fnic->ctlr, skb); in fnic_handle_fip_frame()
662 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) in fnic_import_rq_eth_pkt() argument
679 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { in fnic_import_rq_eth_pkt()
685 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_import_rq_eth_pkt()
689 skb_queue_tail(&fnic->fip_frame_queue, skb); in fnic_import_rq_eth_pkt()
690 queue_work(fnic_fip_queue, &fnic->fip_frame_work); in fnic_import_rq_eth_pkt()
724 void fnic_update_mac_locked(struct fnic *fnic, u8 *new) in fnic_update_mac_locked() argument
726 u8 *ctl = fnic->ctlr.ctl_src_addr; in fnic_update_mac_locked()
727 u8 *data = fnic->data_src_addr; in fnic_update_mac_locked()
733 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); in fnic_update_mac_locked()
735 vnic_dev_del_addr(fnic->vdev, data); in fnic_update_mac_locked()
738 vnic_dev_add_addr(fnic->vdev, new); in fnic_update_mac_locked()
748 struct fnic *fnic = lport_priv(lport); in fnic_update_mac() local
750 spin_lock_irq(&fnic->fnic_lock); in fnic_update_mac()
751 fnic_update_mac_locked(fnic, new); in fnic_update_mac()
752 spin_unlock_irq(&fnic->fnic_lock); in fnic_update_mac()
771 struct fnic *fnic = lport_priv(lport); in fnic_set_port_id() local
783 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); in fnic_set_port_id()
784 fnic_set_eth_mode(fnic); in fnic_set_port_id()
792 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); in fnic_set_port_id()
798 spin_lock_irq(&fnic->fnic_lock); in fnic_set_port_id()
799 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) in fnic_set_port_id()
800 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; in fnic_set_port_id()
802 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_set_port_id()
805 fnic_state_to_str(fnic->state)); in fnic_set_port_id()
806 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
809 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
815 ret = fnic_flogi_reg_handler(fnic, port_id); in fnic_set_port_id()
818 spin_lock_irq(&fnic->fnic_lock); in fnic_set_port_id()
819 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) in fnic_set_port_id()
820 fnic->state = FNIC_IN_ETH_MODE; in fnic_set_port_id()
821 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
830 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_rq_cmpl_frame_recv() local
833 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_rq_cmpl_frame_recv()
848 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_rq_cmpl_frame_recv()
882 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
886 if (fnic_import_rq_eth_pkt(fnic, skb)) in fnic_rq_cmpl_frame_recv()
891 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
898 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
907 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
908 if (fnic->stop_rx_link_events) { in fnic_rq_cmpl_frame_recv()
909 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
912 fr_dev(fp) = fnic->lport; in fnic_rq_cmpl_frame_recv()
913 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
914 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, in fnic_rq_cmpl_frame_recv()
919 skb_queue_tail(&fnic->frame_queue, skb); in fnic_rq_cmpl_frame_recv()
920 queue_work(fnic_event_queue, &fnic->frame_work); in fnic_rq_cmpl_frame_recv()
932 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_rq_cmpl_handler_cont() local
934 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, in fnic_rq_cmpl_handler_cont()
940 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) in fnic_rq_cmpl_handler() argument
946 for (i = 0; i < fnic->rq_count; i++) { in fnic_rq_cmpl_handler()
947 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, in fnic_rq_cmpl_handler()
951 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_rq_cmpl_handler()
953 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rq_cmpl_handler()
970 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_alloc_rq_frame() local
979 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_alloc_rq_frame()
987 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); in fnic_alloc_rq_frame()
988 if (dma_mapping_error(&fnic->pdev->dev, pa)) { in fnic_alloc_rq_frame()
1005 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_free_rq_buf() local
1007 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_free_rq_buf()
1021 struct fnic *fnic = fnic_from_ctlr(fip); in fnic_eth_send() local
1022 struct vnic_wq *wq = &fnic->wq[0]; in fnic_eth_send()
1028 if (!fnic->vlan_hw_insert) { in fnic_eth_send()
1034 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); in fnic_eth_send()
1035 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_eth_send()
1040 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_eth_send()
1046 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len, in fnic_eth_send()
1048 if (dma_mapping_error(&fnic->pdev->dev, pa)) { in fnic_eth_send()
1053 spin_lock_irqsave(&fnic->wq_lock[0], flags); in fnic_eth_send()
1059 fnic->vlan_id, 1); in fnic_eth_send()
1060 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_eth_send()
1064 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_eth_send()
1065 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE); in fnic_eth_send()
1073 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) in fnic_send_frame() argument
1075 struct vnic_wq *wq = &fnic->wq[0]; in fnic_send_frame()
1090 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) in fnic_send_frame()
1093 if (!fnic->vlan_hw_insert) { in fnic_send_frame()
1099 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); in fnic_send_frame()
1108 if (fnic->ctlr.map_dest) in fnic_send_frame()
1111 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); in fnic_send_frame()
1112 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); in fnic_send_frame()
1122 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); in fnic_send_frame()
1123 if (dma_mapping_error(&fnic->pdev->dev, pa)) { in fnic_send_frame()
1129 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, in fnic_send_frame()
1134 spin_lock_irqsave(&fnic->wq_lock[0], flags); in fnic_send_frame()
1137 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); in fnic_send_frame()
1144 fnic->vlan_id, 1, 1, 1); in fnic_send_frame()
1147 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_send_frame()
1162 struct fnic *fnic = lport_priv(lp); in fnic_send() local
1165 if (fnic->in_remove) { in fnic_send()
1174 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_send()
1175 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { in fnic_send()
1176 skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); in fnic_send()
1177 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_send()
1180 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_send()
1182 return fnic_send_frame(fnic, fp); in fnic_send()
1195 void fnic_flush_tx(struct fnic *fnic) in fnic_flush_tx() argument
1200 while ((skb = skb_dequeue(&fnic->tx_queue))) { in fnic_flush_tx()
1202 fnic_send_frame(fnic, fp); in fnic_flush_tx()
1212 static void fnic_set_eth_mode(struct fnic *fnic) in fnic_set_eth_mode() argument
1218 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1220 old_state = fnic->state; in fnic_set_eth_mode()
1225 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_set_eth_mode()
1226 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1228 ret = fnic_fw_reset_handler(fnic); in fnic_set_eth_mode()
1230 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1231 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) in fnic_set_eth_mode()
1234 fnic->state = old_state; in fnic_set_eth_mode()
1241 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1250 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_complete_frame_send() local
1252 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_wq_complete_frame_send()
1263 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_wq_cmpl_handler_cont() local
1266 spin_lock_irqsave(&fnic->wq_lock[q_number], flags); in fnic_wq_cmpl_handler_cont()
1267 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, in fnic_wq_cmpl_handler_cont()
1269 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); in fnic_wq_cmpl_handler_cont()
1274 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) in fnic_wq_cmpl_handler() argument
1279 for (i = 0; i < fnic->raw_wq_count; i++) { in fnic_wq_cmpl_handler()
1280 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], in fnic_wq_cmpl_handler()
1293 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_free_wq_buf() local
1295 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, in fnic_free_wq_buf()
1302 void fnic_fcoe_reset_vlans(struct fnic *fnic) in fnic_fcoe_reset_vlans() argument
1313 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_reset_vlans()
1314 if (!list_empty(&fnic->vlans)) { in fnic_fcoe_reset_vlans()
1315 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { in fnic_fcoe_reset_vlans()
1320 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_reset_vlans()
1323 void fnic_handle_fip_timer(struct fnic *fnic) in fnic_handle_fip_timer() argument
1327 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_handle_fip_timer()
1330 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1331 if (fnic->stop_rx_link_events) { in fnic_handle_fip_timer()
1332 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1335 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1337 if (fnic->ctlr.mode == FIP_MODE_NON_FIP) in fnic_handle_fip_timer()
1340 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1341 if (list_empty(&fnic->vlans)) { in fnic_handle_fip_timer()
1342 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1345 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1347 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1351 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_handle_fip_timer()
1352 shost_printk(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1357 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1359 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1362 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1365 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1367 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1375 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_timer()
1381 if (list_empty(&fnic->vlans)) { in fnic_handle_fip_timer()
1383 spin_unlock_irqrestore(&fnic->vlans_lock, in fnic_handle_fip_timer()
1385 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_timer()
1388 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1392 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, in fnic_handle_fip_timer()
1394 fnic->set_vlan(fnic, vlan->vid); in fnic_handle_fip_timer()
1397 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1402 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_handle_fip_timer()