Lines Matching refs:phba
81 struct lpfc_hba *phba; in lpfc_terminate_rport_io() local
94 phba = ndlp->phba; in lpfc_terminate_rport_io()
102 &phba->sli.sli3_ring[LPFC_FCP_RING], in lpfc_terminate_rport_io()
117 struct lpfc_hba *phba; in lpfc_dev_loss_tmo_callbk() local
129 phba = vport->phba; in lpfc_dev_loss_tmo_callbk()
183 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_dev_loss_tmo_callbk()
186 list_add_tail(&evtp->evt_listp, &phba->work_list); in lpfc_dev_loss_tmo_callbk()
187 lpfc_worker_wake_up(phba); in lpfc_dev_loss_tmo_callbk()
189 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_dev_loss_tmo_callbk()
210 struct lpfc_hba *phba; in lpfc_dev_loss_tmo_handler() local
230 phba = vport->phba; in lpfc_dev_loss_tmo_handler()
232 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_dev_loss_tmo_handler()
233 fcf_inuse = lpfc_fcf_inuse(phba); in lpfc_dev_loss_tmo_handler()
258 &phba->sli.sli3_ring[LPFC_FCP_RING], in lpfc_dev_loss_tmo_handler()
294 lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], in lpfc_dev_loss_tmo_handler()
347 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, in lpfc_sli4_post_dev_loss_tmo_handler() argument
356 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { in lpfc_sli4_post_dev_loss_tmo_handler()
357 spin_lock_irq(&phba->hbalock); in lpfc_sli4_post_dev_loss_tmo_handler()
358 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { in lpfc_sli4_post_dev_loss_tmo_handler()
359 if (phba->hba_flag & HBA_DEVLOSS_TMO) { in lpfc_sli4_post_dev_loss_tmo_handler()
360 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_post_dev_loss_tmo_handler()
363 phba->hba_flag |= HBA_DEVLOSS_TMO; in lpfc_sli4_post_dev_loss_tmo_handler()
364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_post_dev_loss_tmo_handler()
368 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { in lpfc_sli4_post_dev_loss_tmo_handler()
369 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_post_dev_loss_tmo_handler()
370 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_post_dev_loss_tmo_handler()
375 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { in lpfc_sli4_post_dev_loss_tmo_handler()
376 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_post_dev_loss_tmo_handler()
377 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_post_dev_loss_tmo_handler()
381 lpfc_unregister_fcf_rescan(phba); in lpfc_sli4_post_dev_loss_tmo_handler()
384 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_post_dev_loss_tmo_handler()
385 if (phba->hba_flag & FCF_TS_INPROG) in lpfc_sli4_post_dev_loss_tmo_handler()
386 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_post_dev_loss_tmo_handler()
388 if (phba->hba_flag & FCF_RR_INPROG) in lpfc_sli4_post_dev_loss_tmo_handler()
389 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_post_dev_loss_tmo_handler()
393 lpfc_unregister_unused_fcf(phba); in lpfc_sli4_post_dev_loss_tmo_handler()
407 lpfc_alloc_fast_evt(struct lpfc_hba *phba) { in lpfc_alloc_fast_evt() argument
411 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) in lpfc_alloc_fast_evt()
417 atomic_inc(&phba->fast_event_count); in lpfc_alloc_fast_evt()
433 lpfc_free_fast_evt(struct lpfc_hba *phba, in lpfc_free_fast_evt() argument
436 atomic_dec(&phba->fast_event_count); in lpfc_free_fast_evt()
450 lpfc_send_fastpath_evt(struct lpfc_hba *phba, in lpfc_send_fastpath_evt() argument
476 lpfc_free_fast_evt(phba, fast_evt_data); in lpfc_send_fastpath_evt()
497 lpfc_free_fast_evt(phba, fast_evt_data); in lpfc_send_fastpath_evt()
501 lpfc_free_fast_evt(phba, fast_evt_data); in lpfc_send_fastpath_evt()
505 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) in lpfc_send_fastpath_evt()
512 lpfc_free_fast_evt(phba, fast_evt_data); in lpfc_send_fastpath_evt()
517 lpfc_work_list_done(struct lpfc_hba *phba) in lpfc_work_list_done() argument
525 spin_lock_irq(&phba->hbalock); in lpfc_work_list_done()
526 while (!list_empty(&phba->work_list)) { in lpfc_work_list_done()
527 list_remove_head((&phba->work_list), evtp, typeof(*evtp), in lpfc_work_list_done()
529 spin_unlock_irq(&phba->hbalock); in lpfc_work_list_done()
550 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_work_list_done()
551 lpfc_sli4_post_dev_loss_tmo_handler(phba, in lpfc_work_list_done()
565 if (phba->link_state < LPFC_LINK_DOWN) in lpfc_work_list_done()
566 *(int *) (evtp->evt_arg1) = lpfc_online(phba); in lpfc_work_list_done()
572 if (phba->link_state >= LPFC_LINK_DOWN) in lpfc_work_list_done()
573 lpfc_offline_prep(phba, LPFC_MBX_WAIT); in lpfc_work_list_done()
578 lpfc_offline(phba); in lpfc_work_list_done()
579 lpfc_sli_brdrestart(phba); in lpfc_work_list_done()
581 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); in lpfc_work_list_done()
582 lpfc_unblock_mgmt_io(phba); in lpfc_work_list_done()
586 lpfc_offline(phba); in lpfc_work_list_done()
587 lpfc_reset_barrier(phba); in lpfc_work_list_done()
588 lpfc_sli_brdreset(phba); in lpfc_work_list_done()
589 lpfc_hba_down_post(phba); in lpfc_work_list_done()
591 lpfc_sli_brdready(phba, HS_MBRDY); in lpfc_work_list_done()
592 lpfc_unblock_mgmt_io(phba); in lpfc_work_list_done()
596 lpfc_offline(phba); in lpfc_work_list_done()
598 = (phba->pport->stopped) in lpfc_work_list_done()
599 ? 0 : lpfc_sli_brdkill(phba); in lpfc_work_list_done()
600 lpfc_unblock_mgmt_io(phba); in lpfc_work_list_done()
604 lpfc_send_fastpath_evt(phba, evtp); in lpfc_work_list_done()
608 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_work_list_done()
609 lpfc_reset_hba(phba); in lpfc_work_list_done()
614 spin_lock_irq(&phba->hbalock); in lpfc_work_list_done()
616 spin_unlock_irq(&phba->hbalock); in lpfc_work_list_done()
621 lpfc_work_done(struct lpfc_hba *phba) in lpfc_work_done() argument
629 spin_lock_irq(&phba->hbalock); in lpfc_work_done()
630 ha_copy = phba->work_ha; in lpfc_work_done()
631 phba->work_ha = 0; in lpfc_work_done()
632 spin_unlock_irq(&phba->hbalock); in lpfc_work_done()
635 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) in lpfc_work_done()
636 lpfc_sli4_post_async_mbox(phba); in lpfc_work_done()
640 lpfc_handle_eratt(phba); in lpfc_work_done()
642 if (phba->fw_dump_cmpl) { in lpfc_work_done()
643 complete(phba->fw_dump_cmpl); in lpfc_work_done()
644 phba->fw_dump_cmpl = NULL; in lpfc_work_done()
649 lpfc_sli_handle_mb_event(phba); in lpfc_work_done()
652 lpfc_handle_latt(phba); in lpfc_work_done()
655 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { in lpfc_work_done()
656 if (phba->hba_flag & HBA_RRQ_ACTIVE) in lpfc_work_done()
657 lpfc_handle_rrq_active(phba); in lpfc_work_done()
658 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) in lpfc_work_done()
659 lpfc_sli4_els_xri_abort_event_proc(phba); in lpfc_work_done()
660 if (phba->hba_flag & ASYNC_EVENT) in lpfc_work_done()
661 lpfc_sli4_async_event_proc(phba); in lpfc_work_done()
662 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { in lpfc_work_done()
663 spin_lock_irq(&phba->hbalock); in lpfc_work_done()
664 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; in lpfc_work_done()
665 spin_unlock_irq(&phba->hbalock); in lpfc_work_done()
666 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); in lpfc_work_done()
668 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) in lpfc_work_done()
669 lpfc_sli4_fcf_redisc_event_proc(phba); in lpfc_work_done()
672 vports = lpfc_create_vport_work_array(phba); in lpfc_work_done()
674 for (i = 0; i <= phba->max_vports; i++) { in lpfc_work_done()
680 vport = phba->pport; in lpfc_work_done()
694 lpfc_hb_timeout_handler(phba); in lpfc_work_done()
696 lpfc_mbox_timeout_handler(phba); in lpfc_work_done()
698 lpfc_unblock_fabric_iocbs(phba); in lpfc_work_done()
700 lpfc_ramp_down_queue_handler(phba); in lpfc_work_done()
704 lpfc_destroy_vport_work_array(phba, vports); in lpfc_work_done()
706 pring = lpfc_phba_elsring(phba); in lpfc_work_done()
711 phba->hba_flag & HBA_SP_QUEUE_EVT)) { in lpfc_work_done()
715 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) in lpfc_work_done()
716 set_bit(LPFC_DATA_READY, &phba->data_flags); in lpfc_work_done()
721 if (phba->link_state >= LPFC_LINK_DOWN || in lpfc_work_done()
722 phba->link_flag & LS_MDS_LOOPBACK) { in lpfc_work_done()
724 lpfc_sli_handle_slow_ring_event(phba, pring, in lpfc_work_done()
729 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_work_done()
730 lpfc_drain_txq(phba); in lpfc_work_done()
734 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_work_done()
735 spin_lock_irq(&phba->hbalock); in lpfc_work_done()
736 control = readl(phba->HCregaddr); in lpfc_work_done()
738 lpfc_debugfs_slow_ring_trc(phba, in lpfc_work_done()
743 writel(control, phba->HCregaddr); in lpfc_work_done()
744 readl(phba->HCregaddr); /* flush */ in lpfc_work_done()
746 lpfc_debugfs_slow_ring_trc(phba, in lpfc_work_done()
750 spin_unlock_irq(&phba->hbalock); in lpfc_work_done()
753 lpfc_work_list_done(phba); in lpfc_work_done()
759 struct lpfc_hba *phba = p; in lpfc_do_work() local
764 phba->data_flags = 0; in lpfc_do_work()
768 rc = wait_event_interruptible(phba->work_waitq, in lpfc_do_work()
770 &phba->data_flags) in lpfc_do_work()
774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_do_work()
780 lpfc_work_done(phba); in lpfc_do_work()
782 phba->worker_thread = NULL; in lpfc_do_work()
783 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, in lpfc_do_work()
794 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, in lpfc_workq_post_event() argument
812 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_workq_post_event()
813 list_add_tail(&evtp->evt_listp, &phba->work_list); in lpfc_workq_post_event()
814 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_workq_post_event()
816 lpfc_worker_wake_up(phba); in lpfc_workq_post_event()
825 struct lpfc_hba *phba = vport->phba; in lpfc_cleanup_rpis() local
833 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || in lpfc_cleanup_rpis()
839 if ((phba->sli_rev < LPFC_SLI_REV4) && in lpfc_cleanup_rpis()
844 if (phba->nvmet_support && in lpfc_cleanup_rpis()
846 lpfc_nvmet_invalidate_host(phba, ndlp); in lpfc_cleanup_rpis()
853 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { in lpfc_cleanup_rpis()
854 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_cleanup_rpis()
906 lpfc_linkdown(struct lpfc_hba *phba) in lpfc_linkdown() argument
908 struct lpfc_vport *vport = phba->pport; in lpfc_linkdown()
914 if (phba->link_state == LPFC_LINK_DOWN) in lpfc_linkdown()
918 lpfc_scsi_dev_block(phba); in lpfc_linkdown()
920 phba->defer_flogi_acc_flag = false; in lpfc_linkdown()
922 spin_lock_irq(&phba->hbalock); in lpfc_linkdown()
923 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); in lpfc_linkdown()
924 spin_unlock_irq(&phba->hbalock); in lpfc_linkdown()
925 if (phba->link_state > LPFC_LINK_DOWN) { in lpfc_linkdown()
926 phba->link_state = LPFC_LINK_DOWN; in lpfc_linkdown()
927 if (phba->sli4_hba.conf_trunk) { in lpfc_linkdown()
928 phba->trunk_link.link0.state = 0; in lpfc_linkdown()
929 phba->trunk_link.link1.state = 0; in lpfc_linkdown()
930 phba->trunk_link.link2.state = 0; in lpfc_linkdown()
931 phba->trunk_link.link3.state = 0; in lpfc_linkdown()
932 phba->sli4_hba.link_state.logical_speed = in lpfc_linkdown()
936 phba->pport->fc_flag &= ~FC_LBIT; in lpfc_linkdown()
939 vports = lpfc_create_vport_work_array(phba); in lpfc_linkdown()
941 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_linkdown()
949 if (phba->nvmet_support) in lpfc_linkdown()
950 lpfc_nvmet_update_targetport(phba); in lpfc_linkdown()
956 lpfc_destroy_vport_work_array(phba, vports); in lpfc_linkdown()
959 if (phba->sli_rev > LPFC_SLI_REV3) in lpfc_linkdown()
962 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_linkdown()
964 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); in lpfc_linkdown()
967 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) in lpfc_linkdown()
969 mempool_free(mb, phba->mbox_mem_pool); in lpfc_linkdown()
975 if (phba->pport->fc_flag & FC_PT2PT) { in lpfc_linkdown()
976 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_linkdown()
978 lpfc_config_link(phba, mb); in lpfc_linkdown()
981 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) in lpfc_linkdown()
983 mempool_free(mb, phba->mbox_mem_pool); in lpfc_linkdown()
987 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); in lpfc_linkdown()
988 phba->pport->rcv_flogi_cnt = 0; in lpfc_linkdown()
1025 struct lpfc_hba *phba = vport->phba; in lpfc_linkup_port() local
1032 phba->fc_topology, phba->fc_linkspeed, phba->link_flag); in lpfc_linkup_port()
1035 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && in lpfc_linkup_port()
1036 (vport != phba->pport)) in lpfc_linkup_port()
1056 lpfc_linkup(struct lpfc_hba *phba) in lpfc_linkup() argument
1060 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); in lpfc_linkup()
1062 phba->link_state = LPFC_LINK_UP; in lpfc_linkup()
1065 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); in lpfc_linkup()
1066 del_timer_sync(&phba->fabric_block_timer); in lpfc_linkup()
1068 vports = lpfc_create_vport_work_array(phba); in lpfc_linkup()
1070 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) in lpfc_linkup()
1072 lpfc_destroy_vport_work_array(phba, vports); in lpfc_linkup()
1079 phba->pport->rcv_flogi_cnt = 0; in lpfc_linkup()
1083 phba->hba_flag &= ~(HBA_FLOGI_ISSUED); in lpfc_linkup()
1084 phba->defer_flogi_acc_flag = false; in lpfc_linkup()
1096 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_clear_la() argument
1100 struct lpfc_sli *psli = &phba->sli; in lpfc_mbx_cmpl_clear_la()
1115 phba->link_state = LPFC_HBA_ERROR; in lpfc_mbx_cmpl_clear_la()
1120 phba->link_state = LPFC_HBA_READY; in lpfc_mbx_cmpl_clear_la()
1122 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_clear_la()
1124 control = readl(phba->HCregaddr); in lpfc_mbx_cmpl_clear_la()
1126 writel(control, phba->HCregaddr); in lpfc_mbx_cmpl_clear_la()
1127 readl(phba->HCregaddr); /* flush */ in lpfc_mbx_cmpl_clear_la()
1128 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_clear_la()
1129 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_clear_la()
1136 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_clear_la()
1146 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_clear_la()
1148 control = readl(phba->HCregaddr); in lpfc_mbx_cmpl_clear_la()
1150 writel(control, phba->HCregaddr); in lpfc_mbx_cmpl_clear_la()
1151 readl(phba->HCregaddr); /* flush */ in lpfc_mbx_cmpl_clear_la()
1152 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_clear_la()
1158 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_local_config_link() argument
1166 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_local_config_link()
1172 if ((phba->sli_rev == LPFC_SLI_REV4) && in lpfc_mbx_cmpl_local_config_link()
1173 !(phba->hba_flag & HBA_FCOE_MODE) && in lpfc_mbx_cmpl_local_config_link()
1174 (phba->link_flag & LS_LOOPBACK_MODE)) in lpfc_mbx_cmpl_local_config_link()
1177 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && in lpfc_mbx_cmpl_local_config_link()
1195 if (phba->bbcredit_support && phba->cfg_enable_bbcr && in lpfc_mbx_cmpl_local_config_link()
1196 !(phba->link_flag & LS_LOOPBACK_MODE)) { in lpfc_mbx_cmpl_local_config_link()
1197 sparam_mb = mempool_alloc(phba->mbox_mem_pool, in lpfc_mbx_cmpl_local_config_link()
1202 rc = lpfc_read_sparam(phba, sparam_mb, 0); in lpfc_mbx_cmpl_local_config_link()
1204 mempool_free(sparam_mb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_local_config_link()
1209 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); in lpfc_mbx_cmpl_local_config_link()
1213 lpfc_mbuf_free(phba, sparam_mp->virt, in lpfc_mbx_cmpl_local_config_link()
1217 mempool_free(sparam_mb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_local_config_link()
1221 phba->hba_flag |= HBA_DEFER_FLOGI; in lpfc_mbx_cmpl_local_config_link()
1237 lpfc_linkdown(phba); in lpfc_mbx_cmpl_local_config_link()
1243 lpfc_issue_clear_la(phba, vport); in lpfc_mbx_cmpl_local_config_link()
1256 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) in lpfc_sli4_clear_fcf_rr_bmask() argument
1260 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); in lpfc_sli4_clear_fcf_rr_bmask()
1261 spin_lock_irq(&phba->hbalock); in lpfc_sli4_clear_fcf_rr_bmask()
1263 &phba->fcf.fcf_pri_list, list) { in lpfc_sli4_clear_fcf_rr_bmask()
1267 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_clear_fcf_rr_bmask()
1270 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_mbx_cmpl_reg_fcfi() argument
1283 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); in lpfc_mbx_cmpl_reg_fcfi()
1285 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1286 phba->fcf.fcf_flag |= FCF_REGISTERED; in lpfc_mbx_cmpl_reg_fcfi()
1287 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1290 if ((!(phba->hba_flag & FCF_RR_INPROG)) && in lpfc_mbx_cmpl_reg_fcfi()
1291 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) in lpfc_mbx_cmpl_reg_fcfi()
1295 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1296 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); in lpfc_mbx_cmpl_reg_fcfi()
1297 phba->hba_flag &= ~FCF_TS_INPROG; in lpfc_mbx_cmpl_reg_fcfi()
1299 phba->hba_flag |= FCF_RR_INPROG; in lpfc_mbx_cmpl_reg_fcfi()
1300 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1304 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1308 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1309 phba->hba_flag &= ~FCF_RR_INPROG; in lpfc_mbx_cmpl_reg_fcfi()
1310 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_reg_fcfi()
1312 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_mbx_cmpl_reg_fcfi()
1421 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, in __lpfc_update_fcf_record_pri() argument
1427 fcf_pri = &phba->fcf.fcf_pri[fcf_index]; in __lpfc_update_fcf_record_pri()
1507 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, in __lpfc_update_fcf_record() argument
1511 lockdep_assert_held(&phba->hbalock); in __lpfc_update_fcf_record()
1519 __lpfc_update_fcf_record_pri(phba, in __lpfc_update_fcf_record()
1532 lpfc_register_fcf(struct lpfc_hba *phba) in lpfc_register_fcf() argument
1537 spin_lock_irq(&phba->hbalock); in lpfc_register_fcf()
1539 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { in lpfc_register_fcf()
1540 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); in lpfc_register_fcf()
1541 spin_unlock_irq(&phba->hbalock); in lpfc_register_fcf()
1546 if (phba->fcf.fcf_flag & FCF_REGISTERED) { in lpfc_register_fcf()
1547 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); in lpfc_register_fcf()
1548 phba->hba_flag &= ~FCF_TS_INPROG; in lpfc_register_fcf()
1549 if (phba->pport->port_state != LPFC_FLOGI && in lpfc_register_fcf()
1550 phba->pport->fc_flag & FC_FABRIC) { in lpfc_register_fcf()
1551 phba->hba_flag |= FCF_RR_INPROG; in lpfc_register_fcf()
1552 spin_unlock_irq(&phba->hbalock); in lpfc_register_fcf()
1553 lpfc_initial_flogi(phba->pport); in lpfc_register_fcf()
1556 spin_unlock_irq(&phba->hbalock); in lpfc_register_fcf()
1559 spin_unlock_irq(&phba->hbalock); in lpfc_register_fcf()
1561 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_register_fcf()
1563 spin_lock_irq(&phba->hbalock); in lpfc_register_fcf()
1564 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); in lpfc_register_fcf()
1565 spin_unlock_irq(&phba->hbalock); in lpfc_register_fcf()
1569 lpfc_reg_fcfi(phba, fcf_mbxq); in lpfc_register_fcf()
1570 fcf_mbxq->vport = phba->pport; in lpfc_register_fcf()
1572 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); in lpfc_register_fcf()
1574 spin_lock_irq(&phba->hbalock); in lpfc_register_fcf()
1575 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); in lpfc_register_fcf()
1576 spin_unlock_irq(&phba->hbalock); in lpfc_register_fcf()
1577 mempool_free(fcf_mbxq, phba->mbox_mem_pool); in lpfc_register_fcf()
1602 lpfc_match_fcf_conn_list(struct lpfc_hba *phba, in lpfc_match_fcf_conn_list() argument
1629 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { in lpfc_match_fcf_conn_list()
1633 if (phba->valid_vlan) in lpfc_match_fcf_conn_list()
1634 *vlan_id = phba->vlan_id; in lpfc_match_fcf_conn_list()
1644 if (list_empty(&phba->fcf_conn_rec_list)) { in lpfc_match_fcf_conn_list()
1665 &phba->fcf_conn_rec_list, list) { in lpfc_match_fcf_conn_list()
1787 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) in lpfc_check_pending_fcoe_event() argument
1793 if ((phba->link_state >= LPFC_LINK_UP) && in lpfc_check_pending_fcoe_event()
1794 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) in lpfc_check_pending_fcoe_event()
1797 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_check_pending_fcoe_event()
1801 phba->link_state, phba->fcoe_eventtag_at_fcf_scan, in lpfc_check_pending_fcoe_event()
1802 phba->fcoe_eventtag); in lpfc_check_pending_fcoe_event()
1804 spin_lock_irq(&phba->hbalock); in lpfc_check_pending_fcoe_event()
1805 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; in lpfc_check_pending_fcoe_event()
1806 spin_unlock_irq(&phba->hbalock); in lpfc_check_pending_fcoe_event()
1808 if (phba->link_state >= LPFC_LINK_UP) { in lpfc_check_pending_fcoe_event()
1809 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, in lpfc_check_pending_fcoe_event()
1813 phba->fcoe_eventtag_at_fcf_scan, in lpfc_check_pending_fcoe_event()
1814 phba->fcoe_eventtag); in lpfc_check_pending_fcoe_event()
1815 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); in lpfc_check_pending_fcoe_event()
1821 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, in lpfc_check_pending_fcoe_event()
1823 "state change (x%x)\n", phba->link_state); in lpfc_check_pending_fcoe_event()
1824 spin_lock_irq(&phba->hbalock); in lpfc_check_pending_fcoe_event()
1825 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); in lpfc_check_pending_fcoe_event()
1826 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); in lpfc_check_pending_fcoe_event()
1827 spin_unlock_irq(&phba->hbalock); in lpfc_check_pending_fcoe_event()
1832 spin_lock_irq(&phba->hbalock); in lpfc_check_pending_fcoe_event()
1833 phba->fcf.fcf_flag &= ~FCF_REGISTERED; in lpfc_check_pending_fcoe_event()
1834 spin_unlock_irq(&phba->hbalock); in lpfc_check_pending_fcoe_event()
1835 lpfc_sli4_unregister_fcf(phba); in lpfc_check_pending_fcoe_event()
1856 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) in lpfc_sli4_new_fcf_random_select() argument
1884 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, in lpfc_sli4_fcf_rec_mbox_parse() argument
1899 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_sli4_fcf_rec_mbox_parse()
1910 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); in lpfc_sli4_fcf_rec_mbox_parse()
1915 lpfc_printf_log(phba, KERN_ERR, in lpfc_sli4_fcf_rec_mbox_parse()
1920 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_sli4_fcf_rec_mbox_parse()
1953 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, in lpfc_sli4_log_fcf_record_info() argument
1958 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_log_fcf_record_info()
2017 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, in lpfc_sli4_fcf_record_match() argument
2049 struct lpfc_hba *phba = vport->phba; in lpfc_sli4_fcf_rr_next_proc() local
2053 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_rr_next_proc()
2054 if (phba->hba_flag & HBA_DEVLOSS_TMO) { in lpfc_sli4_fcf_rr_next_proc()
2055 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_rr_next_proc()
2056 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_fcf_rr_next_proc()
2060 phba->fcf.current_rec.fcf_indx); in lpfc_sli4_fcf_rr_next_proc()
2061 lpfc_unregister_fcf_rescan(phba); in lpfc_sli4_fcf_rr_next_proc()
2065 phba->hba_flag &= ~FCF_RR_INPROG; in lpfc_sli4_fcf_rr_next_proc()
2067 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); in lpfc_sli4_fcf_rr_next_proc()
2068 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_rr_next_proc()
2069 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_fcf_rr_next_proc()
2072 phba->pport->port_state, LPFC_VPORT_UNKNOWN); in lpfc_sli4_fcf_rr_next_proc()
2073 phba->pport->port_state = LPFC_VPORT_UNKNOWN; in lpfc_sli4_fcf_rr_next_proc()
2075 if (!phba->fcf.fcf_redisc_attempted) { in lpfc_sli4_fcf_rr_next_proc()
2076 lpfc_unregister_fcf(phba); in lpfc_sli4_fcf_rr_next_proc()
2078 rc = lpfc_sli4_redisc_fcf_table(phba); in lpfc_sli4_fcf_rr_next_proc()
2080 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_fcf_rr_next_proc()
2082 phba->fcf.fcf_redisc_attempted = 1; in lpfc_sli4_fcf_rr_next_proc()
2083 lpfc_sli4_clear_fcf_rr_bmask(phba); in lpfc_sli4_fcf_rr_next_proc()
2085 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, in lpfc_sli4_fcf_rr_next_proc()
2090 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, in lpfc_sli4_fcf_rr_next_proc()
2096 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, in lpfc_sli4_fcf_rr_next_proc()
2099 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); in lpfc_sli4_fcf_rr_next_proc()
2101 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, in lpfc_sli4_fcf_rr_next_proc()
2104 rc, phba->fcf.current_rec.fcf_indx); in lpfc_sli4_fcf_rr_next_proc()
2124 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, in lpfc_sli4_fcf_pri_list_del() argument
2129 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; in lpfc_sli4_fcf_pri_list_del()
2130 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_fcf_pri_list_del()
2134 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_pri_list_del()
2136 if (phba->fcf.current_rec.priority == in lpfc_sli4_fcf_pri_list_del()
2138 phba->fcf.eligible_fcf_cnt--; in lpfc_sli4_fcf_pri_list_del()
2142 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_pri_list_del()
2156 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) in lpfc_sli4_set_fcf_flogi_fail() argument
2159 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; in lpfc_sli4_set_fcf_flogi_fail()
2160 spin_lock_irq(&phba->hbalock); in lpfc_sli4_set_fcf_flogi_fail()
2162 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_set_fcf_flogi_fail()
2181 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, in lpfc_sli4_fcf_pri_list_add() argument
2192 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; in lpfc_sli4_fcf_pri_list_add()
2193 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_sli4_fcf_pri_list_add()
2197 spin_lock_irq(&phba->hbalock); in lpfc_sli4_fcf_pri_list_add()
2202 if (list_empty(&phba->fcf.fcf_pri_list)) { in lpfc_sli4_fcf_pri_list_add()
2203 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); in lpfc_sli4_fcf_pri_list_add()
2204 ret = lpfc_sli4_fcf_rr_index_set(phba, in lpfc_sli4_fcf_pri_list_add()
2209 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, in lpfc_sli4_fcf_pri_list_add()
2215 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; in lpfc_sli4_fcf_pri_list_add()
2217 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); in lpfc_sli4_fcf_pri_list_add()
2219 memset(phba->fcf.fcf_rr_bmask, 0, in lpfc_sli4_fcf_pri_list_add()
2220 sizeof(*phba->fcf.fcf_rr_bmask)); in lpfc_sli4_fcf_pri_list_add()
2222 phba->fcf.eligible_fcf_cnt = 1; in lpfc_sli4_fcf_pri_list_add()
2225 phba->fcf.eligible_fcf_cnt++; in lpfc_sli4_fcf_pri_list_add()
2226 ret = lpfc_sli4_fcf_rr_index_set(phba, in lpfc_sli4_fcf_pri_list_add()
2232 &phba->fcf.fcf_pri_list, list) { in lpfc_sli4_fcf_pri_list_add()
2235 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) in lpfc_sli4_fcf_pri_list_add()
2237 &phba->fcf.fcf_pri_list); in lpfc_sli4_fcf_pri_list_add()
2244 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list in lpfc_sli4_fcf_pri_list_add()
2259 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_fcf_pri_list_add()
2279 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec() argument
2290 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2291 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2296 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2303 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2304 phba->hba_flag &= ~FCF_TS_INPROG; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2305 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2306 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2311 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2315 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2324 lpfc_sli4_fcf_pri_list_del(phba, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2327 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2338 if ((phba->fcf.fcf_flag & FCF_IN_USE) && in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2339 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2342 phba->fcf.current_rec.fcf_indx) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2343 lpfc_printf_log(phba, KERN_ERR, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2349 phba->fcf.current_rec.fcf_indx); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2358 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2359 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2360 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2364 phba->fcf.current_rec.fcf_indx); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2365 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2366 phba->fcf.fcf_flag |= FCF_REDISC_FOV; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2367 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2368 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2369 lpfc_sli4_fcf_scan_read_fcf_rec(phba, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2377 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2389 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2390 if (phba->fcf.fcf_flag & FCF_IN_USE) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2391 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2392 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2395 phba->fcf.current_rec.fcf_indx) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2396 phba->fcf.fcf_flag |= FCF_AVAILABLE; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2397 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2400 phba); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2401 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2403 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2404 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2405 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2409 phba->fcf.current_rec.fcf_indx, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2410 phba->pport->port_state, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2411 phba->pport->fc_flag); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2419 phba->fcf.current_rec.fcf_indx); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2428 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2429 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2437 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2438 fcf_rec = &phba->fcf.failover_rec; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2440 fcf_rec = &phba->fcf.current_rec; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2442 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2450 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2456 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2458 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2467 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2476 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2482 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2485 phba->fcf.eligible_fcf_cnt = 1; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2488 phba->fcf.eligible_fcf_cnt++; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2489 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2490 phba->fcf.eligible_fcf_cnt); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2492 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2499 __lpfc_update_fcf_record(phba, fcf_rec, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2504 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2512 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2517 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2520 phba->fcf.fcf_flag |= FCF_AVAILABLE; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2522 phba->fcf.eligible_fcf_cnt = 1; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2524 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2528 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2530 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2539 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2540 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2543 phba->fcoe_eventtag_at_fcf_scan, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2546 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2547 if (phba->hba_flag & HBA_DEVLOSS_TMO) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2548 phba->hba_flag &= ~FCF_TS_INPROG; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2549 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2551 lpfc_printf_log(phba, KERN_INFO, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2556 lpfc_unregister_fcf_rescan(phba); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2562 phba->hba_flag &= ~FCF_TS_INPROG; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2563 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2576 lpfc_unregister_fcf(phba); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2579 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2582 phba->fcf.current_rec.fcf_indx, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2583 phba->fcf.failover_rec.fcf_indx); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2584 memcpy(&phba->fcf.current_rec, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2585 &phba->fcf.failover_rec, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2592 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2593 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2594 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2596 lpfc_register_fcf(phba); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2602 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2603 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2606 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2607 phba->fcf.fcf_flag & FCF_IN_USE) { in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2614 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2618 phba->fcf.current_rec.fcf_indx); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2619 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2620 phba->fcf.fcf_flag |= FCF_REDISC_FOV; in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2621 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2622 lpfc_sli4_fcf_scan_read_fcf_rec(phba, in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2627 lpfc_register_fcf(phba); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2630 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2634 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2635 lpfc_register_fcf(phba); in lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
2656 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec() argument
2666 if (phba->link_state < LPFC_LINK_UP) { in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2667 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2668 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2669 phba->hba_flag &= ~FCF_RR_INPROG; in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2670 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2675 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2678 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2681 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2682 phba->fcf.fcf_flag); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2683 lpfc_unregister_fcf_rescan(phba); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2688 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2692 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2697 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2701 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2703 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2704 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2710 if (fcf_index == phba->fcf.current_rec.fcf_indx) { in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2711 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2714 phba->fcf.current_rec.fcf_indx, fcf_index); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2717 lpfc_issue_init_vfi(phba->pport); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2722 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2724 phba->fcf.failover_rec.fcf_indx, fcf_index); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2725 spin_lock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2726 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2729 spin_unlock_irq(&phba->hbalock); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2731 current_fcf_index = phba->fcf.current_rec.fcf_indx; in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2734 lpfc_unregister_fcf(phba); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2737 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2740 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2745 lpfc_register_fcf(phba); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2747 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
2762 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_mbx_cmpl_read_fcf_rec() argument
2771 if (phba->link_state < LPFC_LINK_UP) in lpfc_mbx_cmpl_read_fcf_rec()
2775 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) in lpfc_mbx_cmpl_read_fcf_rec()
2779 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, in lpfc_mbx_cmpl_read_fcf_rec()
2782 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, in lpfc_mbx_cmpl_read_fcf_rec()
2789 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, in lpfc_mbx_cmpl_read_fcf_rec()
2793 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, in lpfc_mbx_cmpl_read_fcf_rec()
2802 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); in lpfc_mbx_cmpl_read_fcf_rec()
2805 lpfc_sli4_mbox_cmd_free(phba, mboxq); in lpfc_mbx_cmpl_read_fcf_rec()
2816 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_init_vfi_cmpl() argument
2825 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != in lpfc_init_vfi_cmpl()
2831 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_init_vfi_cmpl()
2837 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_init_vfi_cmpl()
2853 struct lpfc_hba *phba = vport->phba; in lpfc_issue_init_vfi() local
2855 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_issue_init_vfi()
2864 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); in lpfc_issue_init_vfi()
2868 mempool_free(mboxq, vport->phba->mbox_mem_pool); in lpfc_issue_init_vfi()
2880 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_init_vpi_cmpl() argument
2890 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_init_vpi_cmpl()
2899 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { in lpfc_init_vpi_cmpl()
2907 lpfc_register_new_vport(phba, vport, ndlp); in lpfc_init_vpi_cmpl()
2908 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_init_vpi_cmpl()
2912 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) in lpfc_init_vpi_cmpl()
2919 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_init_vpi_cmpl()
2937 vpi = lpfc_alloc_vpi(vport->phba); in lpfc_issue_init_vpi()
2947 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); in lpfc_issue_init_vpi()
2954 lpfc_init_vpi(vport->phba, mboxq, vport->vpi); in lpfc_issue_init_vpi()
2957 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); in lpfc_issue_init_vpi()
2961 mempool_free(mboxq, vport->phba->mbox_mem_pool); in lpfc_issue_init_vpi()
2973 lpfc_start_fdiscs(struct lpfc_hba *phba) in lpfc_start_fdiscs() argument
2978 vports = lpfc_create_vport_work_array(phba); in lpfc_start_fdiscs()
2980 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_start_fdiscs()
2984 if (vports[i]->vpi > phba->max_vpi) { in lpfc_start_fdiscs()
2989 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { in lpfc_start_fdiscs()
2998 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) in lpfc_start_fdiscs()
3010 lpfc_destroy_vport_work_array(phba, vports); in lpfc_start_fdiscs()
3014 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_mbx_cmpl_reg_vfi() argument
3025 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != in lpfc_mbx_cmpl_reg_vfi()
3032 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { in lpfc_mbx_cmpl_reg_vfi()
3048 if (!(phba->sli_rev == LPFC_SLI_REV4 && in lpfc_mbx_cmpl_reg_vfi()
3061 if ((phba->sli_rev == LPFC_SLI_REV4) && in lpfc_mbx_cmpl_reg_vfi()
3062 (phba->link_flag & LS_LOOPBACK_MODE)) { in lpfc_mbx_cmpl_reg_vfi()
3063 phba->link_state = LPFC_HBA_READY; in lpfc_mbx_cmpl_reg_vfi()
3071 vport->phba->alpa_map[0], in lpfc_mbx_cmpl_reg_vfi()
3072 phba->link_state, phba->fc_topology); in lpfc_mbx_cmpl_reg_vfi()
3080 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && in lpfc_mbx_cmpl_reg_vfi()
3091 lpfc_start_fdiscs(phba); in lpfc_mbx_cmpl_reg_vfi()
3092 lpfc_do_scr_ns_plogi(phba, vport); in lpfc_mbx_cmpl_reg_vfi()
3097 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_mbx_cmpl_reg_vfi()
3099 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); in lpfc_mbx_cmpl_reg_vfi()
3106 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_read_sparam() argument
3122 lpfc_linkdown(phba); in lpfc_mbx_cmpl_read_sparam()
3133 phba->fc_edtov = ed_tov; in lpfc_mbx_cmpl_read_sparam()
3134 phba->fc_ratov = (2 * ed_tov) / 1000; in lpfc_mbx_cmpl_read_sparam()
3135 if (phba->fc_ratov < FF_DEF_RATOV) { in lpfc_mbx_cmpl_read_sparam()
3137 phba->fc_ratov = FF_DEF_RATOV; in lpfc_mbx_cmpl_read_sparam()
3143 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); in lpfc_mbx_cmpl_read_sparam()
3144 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); in lpfc_mbx_cmpl_read_sparam()
3147 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_read_sparam()
3149 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_read_sparam()
3154 if (phba->hba_flag & HBA_DEFER_FLOGI) { in lpfc_mbx_cmpl_read_sparam()
3156 phba->hba_flag &= ~HBA_DEFER_FLOGI; in lpfc_mbx_cmpl_read_sparam()
3162 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_read_sparam()
3164 lpfc_issue_clear_la(phba, vport); in lpfc_mbx_cmpl_read_sparam()
3165 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_read_sparam()
3170 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) in lpfc_mbx_process_link_up() argument
3172 struct lpfc_vport *vport = phba->pport; in lpfc_mbx_process_link_up()
3182 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3183 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); in lpfc_mbx_process_link_up()
3185 if (!(phba->hba_flag & HBA_FCOE_MODE)) { in lpfc_mbx_process_link_up()
3198 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; in lpfc_mbx_process_link_up()
3203 if (phba->fc_topology && in lpfc_mbx_process_link_up()
3204 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { in lpfc_mbx_process_link_up()
3205 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, in lpfc_mbx_process_link_up()
3207 phba->fc_topology, in lpfc_mbx_process_link_up()
3209 phba->fc_topology_changed = 1; in lpfc_mbx_process_link_up()
3212 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); in lpfc_mbx_process_link_up()
3213 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; in lpfc_mbx_process_link_up()
3216 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { in lpfc_mbx_process_link_up()
3217 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; in lpfc_mbx_process_link_up()
3222 if (phba->cfg_enable_npiv && phba->max_vpi) in lpfc_mbx_process_link_up()
3223 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_process_link_up()
3234 phba->alpa_map[0] = 0; in lpfc_mbx_process_link_up()
3247 numalpa = phba->alpa_map[0]; in lpfc_mbx_process_link_up()
3253 phba->alpa_map[j + 1]; in lpfc_mbx_process_link_up()
3259 lpfc_printf_log(phba, in lpfc_mbx_process_link_up()
3271 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { in lpfc_mbx_process_link_up()
3272 if (phba->max_vpi && phba->cfg_enable_npiv && in lpfc_mbx_process_link_up()
3273 (phba->sli_rev >= LPFC_SLI_REV3)) in lpfc_mbx_process_link_up()
3274 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; in lpfc_mbx_process_link_up()
3276 vport->fc_myDID = phba->fc_pref_DID; in lpfc_mbx_process_link_up()
3279 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3287 lpfc_linkup(phba); in lpfc_mbx_process_link_up()
3290 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_mbx_process_link_up()
3294 rc = lpfc_read_sparam(phba, sparam_mbox, 0); in lpfc_mbx_process_link_up()
3296 mempool_free(sparam_mbox, phba->mbox_mem_pool); in lpfc_mbx_process_link_up()
3301 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); in lpfc_mbx_process_link_up()
3304 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_process_link_up()
3306 mempool_free(sparam_mbox, phba->mbox_mem_pool); in lpfc_mbx_process_link_up()
3310 if (!(phba->hba_flag & HBA_FCOE_MODE)) { in lpfc_mbx_process_link_up()
3311 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_mbx_process_link_up()
3315 lpfc_config_link(phba, cfglink_mbox); in lpfc_mbx_process_link_up()
3318 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); in lpfc_mbx_process_link_up()
3320 mempool_free(cfglink_mbox, phba->mbox_mem_pool); in lpfc_mbx_process_link_up()
3330 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { in lpfc_mbx_process_link_up()
3334 lpfc_printf_log(phba, KERN_ERR, in lpfc_mbx_process_link_up()
3342 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, in lpfc_mbx_process_link_up()
3344 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); in lpfc_mbx_process_link_up()
3346 lpfc_printf_log(phba, KERN_ERR, in lpfc_mbx_process_link_up()
3360 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3361 if (phba->hba_flag & FCF_TS_INPROG) { in lpfc_mbx_process_link_up()
3362 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3366 phba->fcf.fcf_flag |= FCF_INIT_DISC; in lpfc_mbx_process_link_up()
3367 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3368 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, in lpfc_mbx_process_link_up()
3370 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, in lpfc_mbx_process_link_up()
3373 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3374 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; in lpfc_mbx_process_link_up()
3375 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbx_process_link_up()
3379 lpfc_sli4_clear_fcf_rr_bmask(phba); in lpfc_mbx_process_link_up()
3383 memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); in lpfc_mbx_process_link_up()
3384 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", in lpfc_mbx_process_link_up()
3392 lpfc_issue_clear_la(phba, vport); in lpfc_mbx_process_link_up()
3397 lpfc_enable_la(struct lpfc_hba *phba) in lpfc_enable_la() argument
3400 struct lpfc_sli *psli = &phba->sli; in lpfc_enable_la()
3401 spin_lock_irq(&phba->hbalock); in lpfc_enable_la()
3403 if (phba->sli_rev <= LPFC_SLI_REV3) { in lpfc_enable_la()
3404 control = readl(phba->HCregaddr); in lpfc_enable_la()
3406 writel(control, phba->HCregaddr); in lpfc_enable_la()
3407 readl(phba->HCregaddr); /* flush */ in lpfc_enable_la()
3409 spin_unlock_irq(&phba->hbalock); in lpfc_enable_la()
3413 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) in lpfc_mbx_issue_link_down() argument
3415 lpfc_linkdown(phba); in lpfc_mbx_issue_link_down()
3416 lpfc_enable_la(phba); in lpfc_mbx_issue_link_down()
3417 lpfc_unregister_unused_fcf(phba); in lpfc_mbx_issue_link_down()
3429 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_read_topology() argument
3441 pring = lpfc_phba_elsring(phba); in lpfc_mbx_cmpl_read_topology()
3447 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3450 lpfc_mbx_issue_link_down(phba); in lpfc_mbx_cmpl_read_topology()
3451 phba->link_state = LPFC_HBA_ERROR; in lpfc_mbx_cmpl_read_topology()
3458 memcpy(&phba->alpa_map[0], mp->virt, 128); in lpfc_mbx_cmpl_read_topology()
3467 if (phba->fc_eventTag <= la->eventTag) { in lpfc_mbx_cmpl_read_topology()
3468 phba->fc_stat.LinkMultiEvent++; in lpfc_mbx_cmpl_read_topology()
3470 if (phba->fc_eventTag != 0) in lpfc_mbx_cmpl_read_topology()
3471 lpfc_linkdown(phba); in lpfc_mbx_cmpl_read_topology()
3474 phba->fc_eventTag = la->eventTag; in lpfc_mbx_cmpl_read_topology()
3475 if (phba->sli_rev < LPFC_SLI_REV4) { in lpfc_mbx_cmpl_read_topology()
3476 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbx_cmpl_read_topology()
3478 phba->sli.sli_flag |= LPFC_MENLO_MAINT; in lpfc_mbx_cmpl_read_topology()
3480 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; in lpfc_mbx_cmpl_read_topology()
3481 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbx_cmpl_read_topology()
3484 phba->link_events++; in lpfc_mbx_cmpl_read_topology()
3486 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) { in lpfc_mbx_cmpl_read_topology()
3487 phba->fc_stat.LinkUp++; in lpfc_mbx_cmpl_read_topology()
3488 if (phba->link_flag & LS_LOOPBACK_MODE) { in lpfc_mbx_cmpl_read_topology()
3489 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3492 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3496 phba->alpa_map[0]); in lpfc_mbx_cmpl_read_topology()
3498 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3501 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3505 phba->alpa_map[0], in lpfc_mbx_cmpl_read_topology()
3508 phba->wait_4_mlo_maint_flg); in lpfc_mbx_cmpl_read_topology()
3510 lpfc_mbx_process_link_up(phba, la); in lpfc_mbx_cmpl_read_topology()
3513 phba->fc_stat.LinkDown++; in lpfc_mbx_cmpl_read_topology()
3514 if (phba->link_flag & LS_LOOPBACK_MODE) in lpfc_mbx_cmpl_read_topology()
3515 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3519 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3520 phba->pport->port_state, vport->fc_flag); in lpfc_mbx_cmpl_read_topology()
3522 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3525 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3526 phba->pport->port_state, vport->fc_flag, in lpfc_mbx_cmpl_read_topology()
3530 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3533 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3534 phba->pport->port_state, vport->fc_flag, in lpfc_mbx_cmpl_read_topology()
3537 lpfc_mbx_issue_link_down(phba); in lpfc_mbx_cmpl_read_topology()
3539 if (phba->sli.sli_flag & LPFC_MENLO_MAINT && in lpfc_mbx_cmpl_read_topology()
3541 if (phba->link_state != LPFC_LINK_DOWN) { in lpfc_mbx_cmpl_read_topology()
3542 phba->fc_stat.LinkDown++; in lpfc_mbx_cmpl_read_topology()
3543 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3546 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3547 phba->pport->port_state, vport->fc_flag); in lpfc_mbx_cmpl_read_topology()
3548 lpfc_mbx_issue_link_down(phba); in lpfc_mbx_cmpl_read_topology()
3550 lpfc_enable_la(phba); in lpfc_mbx_cmpl_read_topology()
3552 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3555 la->eventTag, phba->fc_eventTag, in lpfc_mbx_cmpl_read_topology()
3556 phba->pport->port_state, vport->fc_flag); in lpfc_mbx_cmpl_read_topology()
3562 if (phba->wait_4_mlo_maint_flg) { in lpfc_mbx_cmpl_read_topology()
3563 phba->wait_4_mlo_maint_flg = 0; in lpfc_mbx_cmpl_read_topology()
3564 wake_up_interruptible(&phba->wait_4_mlo_m_q); in lpfc_mbx_cmpl_read_topology()
3568 if ((phba->sli_rev < LPFC_SLI_REV4) && in lpfc_mbx_cmpl_read_topology()
3570 if (phba->sli.sli_flag & LPFC_MENLO_MAINT) in lpfc_mbx_cmpl_read_topology()
3571 lpfc_issue_clear_la(phba, vport); in lpfc_mbx_cmpl_read_topology()
3572 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, in lpfc_mbx_cmpl_read_topology()
3578 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_read_topology()
3580 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_read_topology()
3591 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_reg_login() argument
3635 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_reg_login()
3637 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_reg_login()
3647 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_unreg_vpi() argument
3665 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_mbx_cmpl_unreg_vpi()
3666 lpfc_workq_post_event(phba, NULL, NULL, in lpfc_mbx_cmpl_unreg_vpi()
3674 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_unreg_vpi()
3680 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) in lpfc_mbx_cmpl_unreg_vpi()
3687 struct lpfc_hba *phba = vport->phba; in lpfc_mbx_unreg_vpi() local
3691 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_mbx_unreg_vpi()
3695 lpfc_unreg_vpi(phba, vport->vpi, mbox); in lpfc_mbx_unreg_vpi()
3698 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); in lpfc_mbx_unreg_vpi()
3702 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_mbx_unreg_vpi()
3710 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_reg_vpi() argument
3731 if (phba->nvmet_support) in lpfc_mbx_cmpl_reg_vpi()
3732 lpfc_nvmet_update_targetport(phba); in lpfc_mbx_cmpl_reg_vpi()
3757 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_reg_vpi()
3770 lpfc_create_static_vport(struct lpfc_hba *phba) in lpfc_create_static_vport() argument
3785 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_create_static_vport()
3787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_create_static_vport()
3797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_create_static_vport()
3800 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_create_static_vport()
3809 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_create_static_vport()
3812 if (lpfc_dump_static_vport(phba, pmb, offset)) in lpfc_create_static_vport()
3815 pmb->vport = phba->pport; in lpfc_create_static_vport()
3816 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, in lpfc_create_static_vport()
3820 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, in lpfc_create_static_vport()
3828 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_create_static_vport()
3858 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_create_static_vport()
3868 shost = lpfc_shost_from_vport(phba->pport); in lpfc_create_static_vport()
3883 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, in lpfc_create_static_vport()
3898 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_create_static_vport()
3901 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_create_static_vport()
3914 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_fabric_reg_login() argument
3930 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_fabric_reg_login()
3932 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_fabric_reg_login()
3934 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { in lpfc_mbx_cmpl_fabric_reg_login()
3955 if (phba->sli_rev < LPFC_SLI_REV4) in lpfc_mbx_cmpl_fabric_reg_login()
3965 lpfc_start_fdiscs(phba); in lpfc_mbx_cmpl_fabric_reg_login()
3972 lpfc_do_scr_ns_plogi(phba, vport); in lpfc_mbx_cmpl_fabric_reg_login()
3975 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_fabric_reg_login()
3977 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_fabric_reg_login()
4068 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_ns_reg_login() argument
4089 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_ns_reg_login()
4091 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_ns_reg_login()
4096 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { in lpfc_mbx_cmpl_ns_reg_login()
4111 if (phba->sli_rev < LPFC_SLI_REV4) in lpfc_mbx_cmpl_ns_reg_login()
4156 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_ns_reg_login()
4158 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_ns_reg_login()
4170 struct lpfc_hba *phba = vport->phba; in lpfc_register_remote_port() local
4212 dev_printk(KERN_WARNING, &phba->pcidev->dev, in lpfc_register_remote_port()
4328 vport->phba->nport_event_cnt++; in lpfc_nlp_state_cleanup()
4333 vport->phba->nport_event_cnt++; in lpfc_nlp_state_cleanup()
4334 if (vport->phba->nvmet_support == 0) { in lpfc_nlp_state_cleanup()
4353 vport->phba->nport_event_cnt++; in lpfc_nlp_state_cleanup()
4361 if (vport->phba->sli_rev >= LPFC_SLI_REV4 && in lpfc_nlp_state_cleanup()
4363 if (vport->phba->nvmet_support == 0) { in lpfc_nlp_state_cleanup()
4369 vport->phba->nport_event_cnt++; in lpfc_nlp_state_cleanup()
4536 ndlp->phba = vport->phba; in lpfc_initialize_node()
4550 struct lpfc_hba *phba = vport->phba; in lpfc_enable_node() local
4560 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_enable_node()
4562 rpi = lpfc_sli4_alloc_rpi(vport->phba); in lpfc_enable_node()
4578 spin_lock_irqsave(&phba->ndlp_lock, flags); in lpfc_enable_node()
4581 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_enable_node()
4591 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_enable_node()
4605 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_enable_node()
4617 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_enable_node()
4620 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_enable_node()
4621 if (vport->phba->sli_rev == LPFC_SLI_REV4) { in lpfc_enable_node()
4649 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_enable_node()
4650 lpfc_sli4_free_rpi(vport->phba, rpi); in lpfc_enable_node()
4669 if (vport->phba->sli_rev == LPFC_SLI_REV4) { in lpfc_drop_node()
4685 struct lpfc_hba *phba = vport->phba; in lpfc_set_disctmo() local
4690 tmo = (((phba->fc_edtov + 999) / 1000) + 1); in lpfc_set_disctmo()
4695 tmo = ((phba->fc_ratov * 3) + 3); in lpfc_set_disctmo()
4759 lpfc_check_sli_ndlp(struct lpfc_hba *phba, in lpfc_check_sli_ndlp() argument
4798 __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, in __lpfc_dequeue_nport_iocbs() argument
4806 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) in __lpfc_dequeue_nport_iocbs()
4813 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, in lpfc_sli3_dequeue_nport_iocbs() argument
4816 struct lpfc_sli *psli = &phba->sli; in lpfc_sli3_dequeue_nport_iocbs()
4819 spin_lock_irq(&phba->hbalock); in lpfc_sli3_dequeue_nport_iocbs()
4821 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], in lpfc_sli3_dequeue_nport_iocbs()
4823 spin_unlock_irq(&phba->hbalock); in lpfc_sli3_dequeue_nport_iocbs()
4827 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, in lpfc_sli4_dequeue_nport_iocbs() argument
4833 spin_lock_irq(&phba->hbalock); in lpfc_sli4_dequeue_nport_iocbs()
4834 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { in lpfc_sli4_dequeue_nport_iocbs()
4839 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); in lpfc_sli4_dequeue_nport_iocbs()
4842 spin_unlock_irq(&phba->hbalock); in lpfc_sli4_dequeue_nport_iocbs()
4850 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) in lpfc_no_rpi() argument
4861 if (phba->sli_rev != LPFC_SLI_REV4) in lpfc_no_rpi()
4862 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); in lpfc_no_rpi()
4864 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); in lpfc_no_rpi()
4868 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, in lpfc_no_rpi()
4883 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_nlp_logo_unreg() argument
4892 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_nlp_logo_unreg()
4908 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); in lpfc_nlp_logo_unreg()
4922 lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, in lpfc_set_unreg_login_mbx_cmpl() argument
4931 } else if (phba->sli_rev == LPFC_SLI_REV4 && in lpfc_set_unreg_login_mbx_cmpl()
4933 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= in lpfc_set_unreg_login_mbx_cmpl()
4940 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_set_unreg_login_mbx_cmpl()
4941 spin_lock_irqsave(&vport->phba->ndlp_lock, in lpfc_set_unreg_login_mbx_cmpl()
4944 spin_unlock_irqrestore(&vport->phba->ndlp_lock, in lpfc_set_unreg_login_mbx_cmpl()
4966 struct lpfc_hba *phba = vport->phba; in lpfc_unreg_rpi() local
4997 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_unreg_rpi()
5001 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_unreg_rpi()
5002 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; in lpfc_unreg_rpi()
5004 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); in lpfc_unreg_rpi()
5006 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); in lpfc_unreg_rpi()
5025 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); in lpfc_unreg_rpi()
5027 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_unreg_rpi()
5054 lpfc_no_rpi(phba, ndlp); in lpfc_unreg_rpi()
5056 if (phba->sli_rev != LPFC_SLI_REV4) in lpfc_unreg_rpi()
5076 lpfc_unreg_hba_rpis(struct lpfc_hba *phba) in lpfc_unreg_hba_rpis() argument
5083 vports = lpfc_create_vport_work_array(phba); in lpfc_unreg_hba_rpis()
5085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_unreg_hba_rpis()
5089 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_unreg_hba_rpis()
5102 lpfc_destroy_vport_work_array(phba, vports); in lpfc_unreg_hba_rpis()
5108 struct lpfc_hba *phba = vport->phba; in lpfc_unreg_all_rpis() local
5112 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_unreg_all_rpis()
5117 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_unreg_all_rpis()
5119 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, in lpfc_unreg_all_rpis()
5124 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); in lpfc_unreg_all_rpis()
5126 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_unreg_all_rpis()
5139 struct lpfc_hba *phba = vport->phba; in lpfc_unreg_default_rpis() local
5144 if (phba->sli_rev > LPFC_SLI_REV3) in lpfc_unreg_default_rpis()
5147 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_unreg_default_rpis()
5149 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, in lpfc_unreg_default_rpis()
5154 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); in lpfc_unreg_default_rpis()
5156 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_unreg_default_rpis()
5174 struct lpfc_hba *phba = vport->phba; in lpfc_cleanup_node() local
5205 if ((mb = phba->sli.mbox_active)) { in lpfc_cleanup_node()
5214 spin_lock_irq(&phba->hbalock); in lpfc_cleanup_node()
5216 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { in lpfc_cleanup_node()
5226 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { in lpfc_cleanup_node()
5232 __lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_cleanup_node()
5236 mempool_free(mb, phba->mbox_mem_pool); in lpfc_cleanup_node()
5243 spin_unlock_irq(&phba->hbalock); in lpfc_cleanup_node()
5245 lpfc_els_abort(phba, ndlp); in lpfc_cleanup_node()
5258 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_cleanup_node()
5264 lpfc_sli4_free_rpi(vport->phba, in lpfc_cleanup_node()
5266 spin_lock_irqsave(&vport->phba->ndlp_lock, in lpfc_cleanup_node()
5270 spin_unlock_irqrestore(&vport->phba->ndlp_lock, in lpfc_cleanup_node()
5285 struct lpfc_hba *phba = vport->phba; in lpfc_nlp_remove() local
5295 phba->sli_rev != LPFC_SLI_REV4) { in lpfc_nlp_remove()
5306 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) in lpfc_nlp_remove()
5308 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, in lpfc_nlp_remove()
5311 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_nlp_remove()
5318 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); in lpfc_nlp_remove()
5320 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_nlp_remove()
5385 vport->phba->fc_topology == in lpfc_matchdid()
5490 if (vport->phba->nvmet_support) in lpfc_setup_disc_node()
5511 if (vport->phba->nvmet_support) in lpfc_setup_disc_node()
5555 if (vport->phba->nvmet_support) in lpfc_setup_disc_node()
5593 (!vport->phba->nvmet_support && in lpfc_setup_disc_node()
5597 if (vport->phba->nvmet_support) in lpfc_setup_disc_node()
5616 struct lpfc_hba *phba = vport->phba; in lpfc_disc_list_loopmap() local
5620 if (!lpfc_is_link_up(phba)) in lpfc_disc_list_loopmap()
5623 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) in lpfc_disc_list_loopmap()
5627 if (phba->alpa_map[0]) { in lpfc_disc_list_loopmap()
5628 for (j = 1; j <= phba->alpa_map[0]; j++) { in lpfc_disc_list_loopmap()
5629 alpa = phba->alpa_map[j]; in lpfc_disc_list_loopmap()
5655 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) in lpfc_issue_clear_la() argument
5658 struct lpfc_sli *psli = &phba->sli; in lpfc_issue_clear_la()
5667 if ((phba->link_state >= LPFC_CLEAR_LA) || in lpfc_issue_clear_la()
5669 (phba->sli_rev == LPFC_SLI_REV4)) in lpfc_issue_clear_la()
5673 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { in lpfc_issue_clear_la()
5674 phba->link_state = LPFC_CLEAR_LA; in lpfc_issue_clear_la()
5675 lpfc_clear_la(phba, mbox); in lpfc_issue_clear_la()
5678 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); in lpfc_issue_clear_la()
5680 mempool_free(mbox, phba->mbox_mem_pool); in lpfc_issue_clear_la()
5684 phba->link_state = LPFC_HBA_ERROR; in lpfc_issue_clear_la()
5691 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) in lpfc_issue_reg_vpi() argument
5695 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_issue_reg_vpi()
5700 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) in lpfc_issue_reg_vpi()
5702 mempool_free(regvpimbox, phba->mbox_mem_pool); in lpfc_issue_reg_vpi()
5712 struct lpfc_hba *phba = vport->phba; in lpfc_disc_start() local
5716 if (!lpfc_is_link_up(phba)) { in lpfc_disc_start()
5719 phba->link_state); in lpfc_disc_start()
5723 if (phba->link_state == LPFC_CLEAR_LA) in lpfc_disc_start()
5750 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && in lpfc_disc_start()
5753 (phba->sli_rev < LPFC_SLI_REV4)) { in lpfc_disc_start()
5754 lpfc_issue_clear_la(phba, vport); in lpfc_disc_start()
5755 lpfc_issue_reg_vpi(phba, vport); in lpfc_disc_start()
5765 lpfc_issue_clear_la(phba, vport); in lpfc_disc_start()
5810 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) in lpfc_free_tx() argument
5817 pring = lpfc_phba_elsring(phba); in lpfc_free_tx()
5824 spin_lock_irq(&phba->hbalock); in lpfc_free_tx()
5845 lpfc_sli_issue_abort_iotag(phba, pring, iocb); in lpfc_free_tx()
5848 spin_unlock_irq(&phba->hbalock); in lpfc_free_tx()
5851 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, in lpfc_free_tx()
5859 struct lpfc_hba *phba = vport->phba; in lpfc_disc_flush_list() local
5868 lpfc_free_tx(phba, ndlp); in lpfc_disc_flush_list()
5901 struct lpfc_hba *phba = vport->phba; in lpfc_disc_timeout() local
5905 if (unlikely(!phba)) in lpfc_disc_timeout()
5915 lpfc_worker_wake_up(phba); in lpfc_disc_timeout()
5923 struct lpfc_hba *phba = vport->phba; in lpfc_disc_timeout_handler() local
5924 struct lpfc_sli *psli = &phba->sli; in lpfc_disc_timeout_handler()
5969 if (phba->sli_rev <= LPFC_SLI_REV3) in lpfc_disc_timeout_handler()
6007 lpfc_els_abort(phba, ndlp); in lpfc_disc_timeout_handler()
6036 if (phba->sli_rev < LPFC_SLI_REV4) { in lpfc_disc_timeout_handler()
6037 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) in lpfc_disc_timeout_handler()
6038 lpfc_issue_reg_vpi(phba, vport); in lpfc_disc_timeout_handler()
6040 lpfc_issue_clear_la(phba, vport); in lpfc_disc_timeout_handler()
6046 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_disc_timeout_handler()
6052 phba->link_state = LPFC_HBA_ERROR; in lpfc_disc_timeout_handler()
6056 lpfc_linkdown(phba); in lpfc_disc_timeout_handler()
6057 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, in lpfc_disc_timeout_handler()
6058 phba->cfg_link_speed); in lpfc_disc_timeout_handler()
6062 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); in lpfc_disc_timeout_handler()
6063 lpfc_set_loopback_flag(phba); in lpfc_disc_timeout_handler()
6065 mempool_free(initlinkmbox, phba->mbox_mem_pool); in lpfc_disc_timeout_handler()
6080 if (phba->sli_rev < LPFC_SLI_REV4) { in lpfc_disc_timeout_handler()
6081 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) in lpfc_disc_timeout_handler()
6082 lpfc_issue_reg_vpi(phba, vport); in lpfc_disc_timeout_handler()
6084 lpfc_issue_clear_la(phba, vport); in lpfc_disc_timeout_handler()
6114 switch (phba->link_state) { in lpfc_disc_timeout_handler()
6124 lpfc_issue_clear_la(phba, vport); in lpfc_disc_timeout_handler()
6135 "state x%x\n", phba->link_state); in lpfc_disc_timeout_handler()
6145 if (phba->sli_rev != LPFC_SLI_REV4) { in lpfc_disc_timeout_handler()
6163 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) in lpfc_mbx_cmpl_fdmi_reg_login() argument
6173 if (phba->sli_rev < LPFC_SLI_REV4) in lpfc_mbx_cmpl_fdmi_reg_login()
6199 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_mbx_cmpl_fdmi_reg_login()
6201 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_mbx_cmpl_fdmi_reg_login()
6306 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) in lpfc_find_vport_by_vpid() argument
6318 for (i = 0; i < phba->max_vpi; i++) { in lpfc_find_vport_by_vpid()
6319 if (vpi == phba->vpi_ids[i]) in lpfc_find_vport_by_vpid()
6323 if (i >= phba->max_vpi) { in lpfc_find_vport_by_vpid()
6324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_find_vport_by_vpid()
6331 spin_lock_irqsave(&phba->port_list_lock, flags); in lpfc_find_vport_by_vpid()
6332 list_for_each_entry(vport, &phba->port_list, listentry) { in lpfc_find_vport_by_vpid()
6334 spin_unlock_irqrestore(&phba->port_list_lock, flags); in lpfc_find_vport_by_vpid()
6338 spin_unlock_irqrestore(&phba->port_list_lock, flags); in lpfc_find_vport_by_vpid()
6348 if (vport->phba->sli_rev == LPFC_SLI_REV4) { in lpfc_nlp_init()
6349 rpi = lpfc_sli4_alloc_rpi(vport->phba); in lpfc_nlp_init()
6354 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); in lpfc_nlp_init()
6356 if (vport->phba->sli_rev == LPFC_SLI_REV4) in lpfc_nlp_init()
6357 lpfc_sli4_free_rpi(vport->phba, rpi); in lpfc_nlp_init()
6365 if (vport->phba->sli_rev == LPFC_SLI_REV4) { in lpfc_nlp_init()
6375 mempool_alloc(vport->phba->active_rrq_pool, in lpfc_nlp_init()
6379 ndlp->phba->cfg_rrq_xri_bitmap_sz); in lpfc_nlp_init()
6397 struct lpfc_hba *phba; in lpfc_nlp_release() local
6417 phba = ndlp->phba; in lpfc_nlp_release()
6418 spin_lock_irqsave(&phba->ndlp_lock, flags); in lpfc_nlp_release()
6420 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_nlp_release()
6425 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_nlp_release()
6427 ndlp->phba->active_rrq_pool); in lpfc_nlp_release()
6428 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); in lpfc_nlp_release()
6439 struct lpfc_hba *phba; in lpfc_nlp_get() local
6451 phba = ndlp->phba; in lpfc_nlp_get()
6452 spin_lock_irqsave(&phba->ndlp_lock, flags); in lpfc_nlp_get()
6454 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_nlp_get()
6463 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_nlp_get()
6477 struct lpfc_hba *phba; in lpfc_nlp_put() local
6487 phba = ndlp->phba; in lpfc_nlp_put()
6488 spin_lock_irqsave(&phba->ndlp_lock, flags); in lpfc_nlp_put()
6494 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_nlp_put()
6507 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_nlp_put()
6527 spin_unlock_irqrestore(&phba->ndlp_lock, flags); in lpfc_nlp_put()
6566 lpfc_fcf_inuse(struct lpfc_hba *phba) in lpfc_fcf_inuse() argument
6573 vports = lpfc_create_vport_work_array(phba); in lpfc_fcf_inuse()
6579 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_fcf_inuse()
6601 lpfc_printf_log(phba, KERN_INFO, in lpfc_fcf_inuse()
6612 lpfc_destroy_vport_work_array(phba, vports); in lpfc_fcf_inuse()
6624 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_unregister_vfi_cmpl() argument
6630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_unregister_vfi_cmpl()
6636 phba->pport->fc_flag &= ~FC_VFI_REGISTERED; in lpfc_unregister_vfi_cmpl()
6638 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_unregister_vfi_cmpl()
6650 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) in lpfc_unregister_fcfi_cmpl() argument
6655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_unregister_fcfi_cmpl()
6660 mempool_free(mboxq, phba->mbox_mem_pool); in lpfc_unregister_fcfi_cmpl()
6673 lpfc_unregister_fcf_prep(struct lpfc_hba *phba) in lpfc_unregister_fcf_prep() argument
6681 if (lpfc_fcf_inuse(phba)) in lpfc_unregister_fcf_prep()
6682 lpfc_unreg_hba_rpis(phba); in lpfc_unregister_fcf_prep()
6685 phba->pport->port_state = LPFC_VPORT_UNKNOWN; in lpfc_unregister_fcf_prep()
6688 vports = lpfc_create_vport_work_array(phba); in lpfc_unregister_fcf_prep()
6689 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) in lpfc_unregister_fcf_prep()
6690 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { in lpfc_unregister_fcf_prep()
6696 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_unregister_fcf_prep()
6705 lpfc_destroy_vport_work_array(phba, vports); in lpfc_unregister_fcf_prep()
6706 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { in lpfc_unregister_fcf_prep()
6707 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); in lpfc_unregister_fcf_prep()
6709 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); in lpfc_unregister_fcf_prep()
6710 lpfc_cleanup_pending_mbox(phba->pport); in lpfc_unregister_fcf_prep()
6711 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_unregister_fcf_prep()
6712 lpfc_sli4_unreg_all_rpis(phba->pport); in lpfc_unregister_fcf_prep()
6713 lpfc_mbx_unreg_vpi(phba->pport); in lpfc_unregister_fcf_prep()
6714 shost = lpfc_shost_from_vport(phba->pport); in lpfc_unregister_fcf_prep()
6716 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; in lpfc_unregister_fcf_prep()
6717 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; in lpfc_unregister_fcf_prep()
6722 lpfc_els_flush_all_cmd(phba); in lpfc_unregister_fcf_prep()
6725 rc = lpfc_issue_unreg_vfi(phba->pport); in lpfc_unregister_fcf_prep()
6740 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) in lpfc_sli4_unregister_fcf() argument
6745 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_sli4_unregister_fcf()
6747 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_sli4_unregister_fcf()
6749 "HBA state x%x\n", phba->pport->port_state); in lpfc_sli4_unregister_fcf()
6752 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); in lpfc_sli4_unregister_fcf()
6753 mbox->vport = phba->pport; in lpfc_sli4_unregister_fcf()
6755 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); in lpfc_sli4_unregister_fcf()
6758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_sli4_unregister_fcf()
6761 rc, phba->pport->port_state); in lpfc_sli4_unregister_fcf()
6775 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) in lpfc_unregister_fcf_rescan() argument
6780 rc = lpfc_unregister_fcf_prep(phba); in lpfc_unregister_fcf_rescan()
6782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_unregister_fcf_rescan()
6789 rc = lpfc_sli4_unregister_fcf(phba); in lpfc_unregister_fcf_rescan()
6793 phba->fcf.fcf_flag = 0; in lpfc_unregister_fcf_rescan()
6794 phba->fcf.current_rec.flag = 0; in lpfc_unregister_fcf_rescan()
6800 if ((phba->pport->load_flag & FC_UNLOADING) || in lpfc_unregister_fcf_rescan()
6801 (phba->link_state < LPFC_LINK_UP)) in lpfc_unregister_fcf_rescan()
6805 spin_lock_irq(&phba->hbalock); in lpfc_unregister_fcf_rescan()
6806 phba->fcf.fcf_flag |= FCF_INIT_DISC; in lpfc_unregister_fcf_rescan()
6807 spin_unlock_irq(&phba->hbalock); in lpfc_unregister_fcf_rescan()
6810 lpfc_sli4_clear_fcf_rr_bmask(phba); in lpfc_unregister_fcf_rescan()
6812 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); in lpfc_unregister_fcf_rescan()
6815 spin_lock_irq(&phba->hbalock); in lpfc_unregister_fcf_rescan()
6816 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; in lpfc_unregister_fcf_rescan()
6817 spin_unlock_irq(&phba->hbalock); in lpfc_unregister_fcf_rescan()
6818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_unregister_fcf_rescan()
6821 phba->pport->port_state); in lpfc_unregister_fcf_rescan()
6833 lpfc_unregister_fcf(struct lpfc_hba *phba) in lpfc_unregister_fcf() argument
6838 rc = lpfc_unregister_fcf_prep(phba); in lpfc_unregister_fcf()
6840 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_unregister_fcf()
6847 rc = lpfc_sli4_unregister_fcf(phba); in lpfc_unregister_fcf()
6851 spin_lock_irq(&phba->hbalock); in lpfc_unregister_fcf()
6852 phba->fcf.fcf_flag &= ~FCF_REGISTERED; in lpfc_unregister_fcf()
6853 spin_unlock_irq(&phba->hbalock); in lpfc_unregister_fcf()
6865 lpfc_unregister_unused_fcf(struct lpfc_hba *phba) in lpfc_unregister_unused_fcf() argument
6872 spin_lock_irq(&phba->hbalock); in lpfc_unregister_unused_fcf()
6873 if (!(phba->hba_flag & HBA_FCOE_MODE) || in lpfc_unregister_unused_fcf()
6874 !(phba->fcf.fcf_flag & FCF_REGISTERED) || in lpfc_unregister_unused_fcf()
6875 !(phba->hba_flag & HBA_FIP_SUPPORT) || in lpfc_unregister_unused_fcf()
6876 (phba->fcf.fcf_flag & FCF_DISCOVERY) || in lpfc_unregister_unused_fcf()
6877 (phba->pport->port_state == LPFC_FLOGI)) { in lpfc_unregister_unused_fcf()
6878 spin_unlock_irq(&phba->hbalock); in lpfc_unregister_unused_fcf()
6881 spin_unlock_irq(&phba->hbalock); in lpfc_unregister_unused_fcf()
6883 if (lpfc_fcf_inuse(phba)) in lpfc_unregister_unused_fcf()
6886 lpfc_unregister_fcf_rescan(phba); in lpfc_unregister_unused_fcf()
6898 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, in lpfc_read_fcf_conn_tbl() argument
6909 &phba->fcf_conn_rec_list, list) { in lpfc_read_fcf_conn_tbl()
6927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_read_fcf_conn_tbl()
6936 &phba->fcf_conn_rec_list); in lpfc_read_fcf_conn_tbl()
6939 if (!list_empty(&phba->fcf_conn_rec_list)) { in lpfc_read_fcf_conn_tbl()
6941 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, in lpfc_read_fcf_conn_tbl()
6944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, in lpfc_read_fcf_conn_tbl()
6981 lpfc_read_fcoe_param(struct lpfc_hba *phba, in lpfc_read_fcoe_param() argument
6997 phba->valid_vlan = 1; in lpfc_read_fcoe_param()
6998 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & in lpfc_read_fcoe_param()
7002 phba->fc_map[0] = fcoe_param->fc_map[0]; in lpfc_read_fcoe_param()
7003 phba->fc_map[1] = fcoe_param->fc_map[1]; in lpfc_read_fcoe_param()
7004 phba->fc_map[2] = fcoe_param->fc_map[2]; in lpfc_read_fcoe_param()
7057 lpfc_parse_fcoe_conf(struct lpfc_hba *phba, in lpfc_parse_fcoe_conf() argument
7073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_parse_fcoe_conf()
7082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_parse_fcoe_conf()
7092 lpfc_read_fcoe_param(phba, rec_ptr); in lpfc_parse_fcoe_conf()
7098 lpfc_read_fcf_conn_tbl(phba, rec_ptr); in lpfc_parse_fcoe_conf()