Lines Matching refs:fnic

42 static void fnic_set_eth_mode(struct fnic *);
43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
51 struct fnic *fnic = container_of(work, struct fnic, link_work); in fnic_handle_link() local
56 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_link()
58 if (fnic->stop_rx_link_events) { in fnic_handle_link()
59 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
63 old_link_down_cnt = fnic->link_down_cnt; in fnic_handle_link()
64 old_link_status = fnic->link_status; in fnic_handle_link()
65 fnic->link_status = vnic_dev_link_status(fnic->vdev); in fnic_handle_link()
66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); in fnic_handle_link()
68 if (old_link_status == fnic->link_status) { in fnic_handle_link()
69 if (!fnic->link_status) { in fnic_handle_link()
71 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
72 fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_handle_link()
76 if (old_link_down_cnt != fnic->link_down_cnt) { in fnic_handle_link()
78 fnic->lport->host_stats.link_failure_count++; in fnic_handle_link()
79 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
81 fnic->lport->host->host_no, in fnic_handle_link()
86 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
88 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_link()
89 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
92 fnic->lport->host->host_no, in fnic_handle_link()
98 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_link()
101 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
103 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_handle_link()
106 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
108 fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
113 } else if (fnic->link_status) { in fnic_handle_link()
115 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
116 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
119 fnic->lport->host->host_no, in fnic_handle_link()
122 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_link()
125 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); in fnic_handle_link()
126 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
128 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_handle_link()
131 fnic->lport->host_stats.link_failure_count++; in fnic_handle_link()
132 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_link()
133 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); in fnic_handle_link()
135 fnic->lport->host->host_no, FNIC_FC_LE, in fnic_handle_link()
138 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_handle_link()
139 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_link()
141 del_timer_sync(&fnic->fip_timer); in fnic_handle_link()
143 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_link()
153 struct fnic *fnic = container_of(work, struct fnic, frame_work); in fnic_handle_frame() local
154 struct fc_lport *lp = fnic->lport; in fnic_handle_frame()
159 while ((skb = skb_dequeue(&fnic->frame_queue))) { in fnic_handle_frame()
161 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_frame()
162 if (fnic->stop_rx_link_events) { in fnic_handle_frame()
163 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
173 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_frame()
174 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_frame()
175 skb_queue_head(&fnic->frame_queue, skb); in fnic_handle_frame()
176 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
179 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_frame()
185 void fnic_fcoe_evlist_free(struct fnic *fnic) in fnic_fcoe_evlist_free() argument
191 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
192 if (list_empty(&fnic->evlist)) { in fnic_fcoe_evlist_free()
193 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
197 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_fcoe_evlist_free()
201 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcoe_evlist_free()
206 struct fnic *fnic = container_of(work, struct fnic, event_work); in fnic_handle_event() local
211 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_event()
212 if (list_empty(&fnic->evlist)) { in fnic_handle_event()
213 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
217 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { in fnic_handle_event()
218 if (fnic->stop_rx_link_events) { in fnic_handle_event()
221 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
228 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_event()
229 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_event()
230 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
237 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
238 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_event()
239 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_event()
242 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_event()
244 fnic_fcoe_start_fcf_disc(fnic); in fnic_handle_event()
247 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_event()
253 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_event()
333 static void fnic_fcoe_send_vlan_req(struct fnic *fnic) in fnic_fcoe_send_vlan_req() argument
335 struct fcoe_ctlr *fip = &fnic->ctlr; in fnic_fcoe_send_vlan_req()
336 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcoe_send_vlan_req()
343 fnic_fcoe_reset_vlans(fnic); in fnic_fcoe_send_vlan_req()
344 fnic->set_vlan(fnic, 0); in fnic_fcoe_send_vlan_req()
345 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_send_vlan_req()
382 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); in fnic_fcoe_send_vlan_req()
385 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) in fnic_fcoe_process_vlan_resp() argument
387 struct fcoe_ctlr *fip = &fnic->ctlr; in fnic_fcoe_process_vlan_resp()
390 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcoe_process_vlan_resp()
398 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
403 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
408 fnic_fcoe_reset_vlans(fnic); in fnic_fcoe_process_vlan_resp()
409 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
416 shost_printk(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
422 spin_unlock_irqrestore(&fnic->vlans_lock, in fnic_fcoe_process_vlan_resp()
429 list_add_tail(&vlan->list, &fnic->vlans); in fnic_fcoe_process_vlan_resp()
437 if (list_empty(&fnic->vlans)) { in fnic_fcoe_process_vlan_resp()
440 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, in fnic_fcoe_process_vlan_resp()
442 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
446 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_process_vlan_resp()
447 fnic->set_vlan(fnic, vlan->vid); in fnic_fcoe_process_vlan_resp()
450 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_process_vlan_resp()
456 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_fcoe_process_vlan_resp()
461 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) in fnic_fcoe_start_fcf_disc() argument
467 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_start_fcf_disc()
468 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_start_fcf_disc()
469 fnic->set_vlan(fnic, vlan->vid); in fnic_fcoe_start_fcf_disc()
472 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_start_fcf_disc()
475 fcoe_ctlr_link_up(&fnic->ctlr); in fnic_fcoe_start_fcf_disc()
478 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_fcoe_start_fcf_disc()
481 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) in fnic_fcoe_vlan_check() argument
486 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
487 if (list_empty(&fnic->vlans)) { in fnic_fcoe_vlan_check()
488 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
492 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_fcoe_vlan_check()
494 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
500 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
503 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_vlan_check()
507 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) in fnic_event_enq() argument
516 fevt->fnic = fnic; in fnic_event_enq()
519 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_event_enq()
520 list_add_tail(&fevt->list, &fnic->evlist); in fnic_event_enq()
521 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_event_enq()
523 schedule_work(&fnic->event_work); in fnic_event_enq()
526 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) in fnic_fcoe_handle_fip_frame() argument
550 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) in fnic_fcoe_handle_fip_frame()
556 fnic_fcoe_process_vlan_resp(fnic, skb); in fnic_fcoe_handle_fip_frame()
560 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_fcoe_handle_fip_frame()
570 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); in fnic_handle_fip_frame() local
571 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_handle_fip_frame()
576 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { in fnic_handle_fip_frame()
577 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
578 if (fnic->stop_rx_link_events) { in fnic_handle_fip_frame()
579 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
587 if (fnic->state != FNIC_IN_FC_MODE && in fnic_handle_fip_frame()
588 fnic->state != FNIC_IN_ETH_MODE) { in fnic_handle_fip_frame()
589 skb_queue_head(&fnic->fip_frame_queue, skb); in fnic_handle_fip_frame()
590 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
593 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_frame()
597 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { in fnic_handle_fip_frame()
605 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { in fnic_handle_fip_frame()
608 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_frame()
610 fcoe_ctlr_link_down(&fnic->ctlr); in fnic_handle_fip_frame()
612 fnic_fcoe_send_vlan_req(fnic); in fnic_handle_fip_frame()
616 fcoe_ctlr_recv(&fnic->ctlr, skb); in fnic_handle_fip_frame()
627 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) in fnic_import_rq_eth_pkt() argument
644 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { in fnic_import_rq_eth_pkt()
650 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_import_rq_eth_pkt()
654 skb_queue_tail(&fnic->fip_frame_queue, skb); in fnic_import_rq_eth_pkt()
655 queue_work(fnic_fip_queue, &fnic->fip_frame_work); in fnic_import_rq_eth_pkt()
689 void fnic_update_mac_locked(struct fnic *fnic, u8 *new) in fnic_update_mac_locked() argument
691 u8 *ctl = fnic->ctlr.ctl_src_addr; in fnic_update_mac_locked()
692 u8 *data = fnic->data_src_addr; in fnic_update_mac_locked()
698 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); in fnic_update_mac_locked()
700 vnic_dev_del_addr(fnic->vdev, data); in fnic_update_mac_locked()
703 vnic_dev_add_addr(fnic->vdev, new); in fnic_update_mac_locked()
713 struct fnic *fnic = lport_priv(lport); in fnic_update_mac() local
715 spin_lock_irq(&fnic->fnic_lock); in fnic_update_mac()
716 fnic_update_mac_locked(fnic, new); in fnic_update_mac()
717 spin_unlock_irq(&fnic->fnic_lock); in fnic_update_mac()
736 struct fnic *fnic = lport_priv(lport); in fnic_set_port_id() local
748 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); in fnic_set_port_id()
749 fnic_set_eth_mode(fnic); in fnic_set_port_id()
757 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); in fnic_set_port_id()
763 spin_lock_irq(&fnic->fnic_lock); in fnic_set_port_id()
764 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) in fnic_set_port_id()
765 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; in fnic_set_port_id()
767 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_set_port_id()
770 fnic_state_to_str(fnic->state)); in fnic_set_port_id()
771 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
774 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
780 ret = fnic_flogi_reg_handler(fnic, port_id); in fnic_set_port_id()
783 spin_lock_irq(&fnic->fnic_lock); in fnic_set_port_id()
784 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) in fnic_set_port_id()
785 fnic->state = FNIC_IN_ETH_MODE; in fnic_set_port_id()
786 spin_unlock_irq(&fnic->fnic_lock); in fnic_set_port_id()
795 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_rq_cmpl_frame_recv() local
798 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_rq_cmpl_frame_recv()
814 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, in fnic_rq_cmpl_frame_recv()
850 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
854 if (fnic_import_rq_eth_pkt(fnic, skb)) in fnic_rq_cmpl_frame_recv()
859 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
866 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_rq_cmpl_frame_recv()
875 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
876 if (fnic->stop_rx_link_events) { in fnic_rq_cmpl_frame_recv()
877 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
880 fr_dev(fp) = fnic->lport; in fnic_rq_cmpl_frame_recv()
881 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rq_cmpl_frame_recv()
882 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, in fnic_rq_cmpl_frame_recv()
887 skb_queue_tail(&fnic->frame_queue, skb); in fnic_rq_cmpl_frame_recv()
888 queue_work(fnic_event_queue, &fnic->frame_work); in fnic_rq_cmpl_frame_recv()
900 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_rq_cmpl_handler_cont() local
902 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, in fnic_rq_cmpl_handler_cont()
908 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) in fnic_rq_cmpl_handler() argument
914 for (i = 0; i < fnic->rq_count; i++) { in fnic_rq_cmpl_handler()
915 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, in fnic_rq_cmpl_handler()
919 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_rq_cmpl_handler()
921 shost_printk(KERN_ERR, fnic->lport->host, in fnic_rq_cmpl_handler()
938 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_alloc_rq_frame() local
946 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_alloc_rq_frame()
954 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); in fnic_alloc_rq_frame()
962 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_free_rq_buf() local
964 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, in fnic_free_rq_buf()
978 struct fnic *fnic = fnic_from_ctlr(fip); in fnic_eth_send() local
979 struct vnic_wq *wq = &fnic->wq[0]; in fnic_eth_send()
985 if (!fnic->vlan_hw_insert) { in fnic_eth_send()
992 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); in fnic_eth_send()
993 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_eth_send()
998 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, in fnic_eth_send()
1004 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); in fnic_eth_send()
1006 spin_lock_irqsave(&fnic->wq_lock[0], flags); in fnic_eth_send()
1008 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); in fnic_eth_send()
1009 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_eth_send()
1016 fnic->vlan_id, 1); in fnic_eth_send()
1017 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_eth_send()
1023 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) in fnic_send_frame() argument
1025 struct vnic_wq *wq = &fnic->wq[0]; in fnic_send_frame()
1040 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) in fnic_send_frame()
1043 if (!fnic->vlan_hw_insert) { in fnic_send_frame()
1049 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); in fnic_send_frame()
1058 if (fnic->ctlr.map_dest) in fnic_send_frame()
1061 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); in fnic_send_frame()
1062 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); in fnic_send_frame()
1072 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); in fnic_send_frame()
1074 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, in fnic_send_frame()
1079 spin_lock_irqsave(&fnic->wq_lock[0], flags); in fnic_send_frame()
1082 pci_unmap_single(fnic->pdev, pa, in fnic_send_frame()
1090 fnic->vlan_id, 1, 1, 1); in fnic_send_frame()
1092 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); in fnic_send_frame()
1106 struct fnic *fnic = lport_priv(lp); in fnic_send() local
1109 if (fnic->in_remove) { in fnic_send()
1118 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_send()
1119 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { in fnic_send()
1120 skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); in fnic_send()
1121 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_send()
1124 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_send()
1126 return fnic_send_frame(fnic, fp); in fnic_send()
1139 void fnic_flush_tx(struct fnic *fnic) in fnic_flush_tx() argument
1144 while ((skb = skb_dequeue(&fnic->tx_queue))) { in fnic_flush_tx()
1146 fnic_send_frame(fnic, fp); in fnic_flush_tx()
1156 static void fnic_set_eth_mode(struct fnic *fnic) in fnic_set_eth_mode() argument
1162 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1164 old_state = fnic->state; in fnic_set_eth_mode()
1169 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_set_eth_mode()
1170 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1172 ret = fnic_fw_reset_handler(fnic); in fnic_set_eth_mode()
1174 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1175 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) in fnic_set_eth_mode()
1178 fnic->state = old_state; in fnic_set_eth_mode()
1185 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_set_eth_mode()
1194 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_complete_frame_send() local
1196 pci_unmap_single(fnic->pdev, buf->dma_addr, in fnic_wq_complete_frame_send()
1207 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_wq_cmpl_handler_cont() local
1210 spin_lock_irqsave(&fnic->wq_lock[q_number], flags); in fnic_wq_cmpl_handler_cont()
1211 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, in fnic_wq_cmpl_handler_cont()
1213 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); in fnic_wq_cmpl_handler_cont()
1218 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) in fnic_wq_cmpl_handler() argument
1223 for (i = 0; i < fnic->raw_wq_count; i++) { in fnic_wq_cmpl_handler()
1224 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], in fnic_wq_cmpl_handler()
1237 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_free_wq_buf() local
1239 pci_unmap_single(fnic->pdev, buf->dma_addr, in fnic_free_wq_buf()
1246 void fnic_fcoe_reset_vlans(struct fnic *fnic) in fnic_fcoe_reset_vlans() argument
1257 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_fcoe_reset_vlans()
1258 if (!list_empty(&fnic->vlans)) { in fnic_fcoe_reset_vlans()
1259 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { in fnic_fcoe_reset_vlans()
1264 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_fcoe_reset_vlans()
1267 void fnic_handle_fip_timer(struct fnic *fnic) in fnic_handle_fip_timer() argument
1271 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_handle_fip_timer()
1274 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1275 if (fnic->stop_rx_link_events) { in fnic_handle_fip_timer()
1276 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1279 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_handle_fip_timer()
1281 if (fnic->ctlr.mode == FIP_ST_NON_FIP) in fnic_handle_fip_timer()
1284 spin_lock_irqsave(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1285 if (list_empty(&fnic->vlans)) { in fnic_handle_fip_timer()
1287 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1289 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1290 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1294 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); in fnic_handle_fip_timer()
1295 shost_printk(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1300 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1302 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1306 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, in fnic_handle_fip_timer()
1308 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1309 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1317 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_timer()
1323 if (list_empty(&fnic->vlans)) { in fnic_handle_fip_timer()
1325 spin_unlock_irqrestore(&fnic->vlans_lock, in fnic_handle_fip_timer()
1327 shost_printk(KERN_INFO, fnic->lport->host, in fnic_handle_fip_timer()
1330 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); in fnic_handle_fip_timer()
1334 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, in fnic_handle_fip_timer()
1336 fnic->set_vlan(fnic, vlan->vid); in fnic_handle_fip_timer()
1339 spin_unlock_irqrestore(&fnic->vlans_lock, flags); in fnic_handle_fip_timer()
1344 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); in fnic_handle_fip_timer()