Lines Matching refs:phba
88 lpfc_alloc_vpi(struct lpfc_hba *phba) in lpfc_alloc_vpi() argument
92 spin_lock_irq(&phba->hbalock); in lpfc_alloc_vpi()
94 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1); in lpfc_alloc_vpi()
95 if (vpi > phba->max_vpi) in lpfc_alloc_vpi()
98 set_bit(vpi, phba->vpi_bmask); in lpfc_alloc_vpi()
99 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_alloc_vpi()
100 phba->sli4_hba.max_cfg_param.vpi_used++; in lpfc_alloc_vpi()
101 spin_unlock_irq(&phba->hbalock); in lpfc_alloc_vpi()
106 lpfc_free_vpi(struct lpfc_hba *phba, int vpi) in lpfc_free_vpi() argument
110 spin_lock_irq(&phba->hbalock); in lpfc_free_vpi()
111 clear_bit(vpi, phba->vpi_bmask); in lpfc_free_vpi()
112 if (phba->sli_rev == LPFC_SLI_REV4) in lpfc_free_vpi()
113 phba->sli4_hba.max_cfg_param.vpi_used--; in lpfc_free_vpi()
114 spin_unlock_irq(&phba->hbalock); in lpfc_free_vpi()
118 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) in lpfc_vport_sparm() argument
125 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); in lpfc_vport_sparm()
131 rc = lpfc_read_sparam(phba, pmb, vport->vpi); in lpfc_vport_sparm()
133 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_vport_sparm()
145 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); in lpfc_vport_sparm()
151 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_vport_sparm()
154 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_vport_sparm()
161 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_vport_sparm()
164 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_vport_sparm()
175 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_vport_sparm()
177 mempool_free(pmb, phba->mbox_mem_pool); in lpfc_vport_sparm()
183 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, in lpfc_valid_wwn_format() argument
193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_valid_wwn_format()
205 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) in lpfc_unique_wwpn() argument
210 spin_lock_irqsave(&phba->port_list_lock, flags); in lpfc_unique_wwpn()
211 list_for_each_entry(vport, &phba->port_list, listentry) { in lpfc_unique_wwpn()
218 spin_unlock_irqrestore(&phba->port_list_lock, flags); in lpfc_unique_wwpn()
222 spin_unlock_irqrestore(&phba->port_list_lock, flags); in lpfc_unique_wwpn()
243 struct lpfc_hba *phba = vport->phba; in lpfc_discovery_wait() local
256 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); in lpfc_discovery_wait()
300 struct lpfc_hba *phba = pport->phba; in lpfc_vport_create() local
307 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { in lpfc_vport_create()
308 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_create()
311 phba->sli_rev); in lpfc_vport_create()
317 if (phba->nvmet_support) { in lpfc_vport_create()
318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_create()
325 vpi = lpfc_alloc_vpi(phba); in lpfc_vport_create()
327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_create()
330 phba->max_vpi); in lpfc_vport_create()
337 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_create()
340 lpfc_free_vpi(phba, vpi); in lpfc_vport_create()
345 vport = lpfc_create_port(phba, instance, &fc_vport->dev); in lpfc_vport_create()
347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_create()
349 lpfc_free_vpi(phba, vpi); in lpfc_vport_create()
357 if ((status = lpfc_vport_sparm(phba, vport))) { in lpfc_vport_create()
368 lpfc_free_vpi(phba, vpi); in lpfc_vport_create()
379 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || in lpfc_vport_create()
380 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { in lpfc_vport_create()
384 lpfc_free_vpi(phba, vpi); in lpfc_vport_create()
390 if (!lpfc_unique_wwpn(phba, vport)) { in lpfc_vport_create()
394 lpfc_free_vpi(phba, vpi); in lpfc_vport_create()
404 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; in lpfc_vport_create()
414 if (phba->cfg_enable_SmartSAN || in lpfc_vport_create()
415 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { in lpfc_vport_create()
417 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; in lpfc_vport_create()
418 vport->fdmi_port_mask = phba->pport->fdmi_port_mask; in lpfc_vport_create()
425 if ((phba->sli_rev == LPFC_SLI_REV4) && in lpfc_vport_create()
429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_create()
433 lpfc_free_vpi(phba, vpi); in lpfc_vport_create()
436 } else if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_vport_create()
447 if ((phba->link_state < LPFC_LINK_UP) || in lpfc_vport_create()
449 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { in lpfc_vport_create()
464 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); in lpfc_vport_create()
467 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { in lpfc_vport_create()
492 struct lpfc_hba *phba = vport->phba; in disable_vport() local
499 && phba->link_state >= LPFC_LINK_UP) { in disable_vport()
501 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); in disable_vport()
530 if (phba->sli_rev == LPFC_SLI_REV4) { in disable_vport()
546 struct lpfc_hba *phba = vport->phba; in enable_vport() local
550 if ((phba->link_state < LPFC_LINK_UP) || in enable_vport()
551 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { in enable_vport()
570 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); in enable_vport()
573 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { in enable_vport()
607 struct lpfc_hba *phba = vport->phba; in lpfc_vport_delete() local
620 !(phba->pport->load_flag & FC_UNLOADING)) { in lpfc_vport_delete()
626 spin_lock_irq(&phba->hbalock); in lpfc_vport_delete()
628 spin_unlock_irq(&phba->hbalock); in lpfc_vport_delete()
633 if (!(phba->pport->load_flag & FC_UNLOADING)) { in lpfc_vport_delete()
635 while (check_count < ((phba->fc_ratov * 3) + 3) && in lpfc_vport_delete()
675 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); in lpfc_vport_delete()
681 if (phba->pport->load_flag & FC_UNLOADING) { in lpfc_vport_delete()
684 phba->link_state >= LPFC_LINK_UP) { in lpfc_vport_delete()
699 spin_lock_irq(&phba->ndlp_lock); in lpfc_vport_delete()
701 spin_unlock_irq(&phba->ndlp_lock); in lpfc_vport_delete()
711 phba->link_state >= LPFC_LINK_UP && in lpfc_vport_delete()
712 phba->fc_topology != LPFC_TOPOLOGY_LOOP) { in lpfc_vport_delete()
714 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); in lpfc_vport_delete()
719 lpfc_printf_log(vport->phba, KERN_WARNING, in lpfc_vport_delete()
743 spin_lock_irq(&phba->ndlp_lock); in lpfc_vport_delete()
749 spin_unlock_irq(&phba->ndlp_lock); in lpfc_vport_delete()
752 spin_unlock_irq(&phba->ndlp_lock); in lpfc_vport_delete()
766 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); in lpfc_vport_delete()
772 if (!(phba->pport->load_flag & FC_UNLOADING)) in lpfc_vport_delete()
791 if (!(phba->pport->load_flag & FC_UNLOADING)) { in lpfc_vport_delete()
805 lpfc_free_vpi(phba, vport->vpi); in lpfc_vport_delete()
807 spin_lock_irq(&phba->port_list_lock); in lpfc_vport_delete()
809 spin_unlock_irq(&phba->port_list_lock); in lpfc_vport_delete()
817 lpfc_create_vport_work_array(struct lpfc_hba *phba) in lpfc_create_vport_work_array() argument
822 vports = kcalloc(phba->max_vports + 1, sizeof(struct lpfc_vport *), in lpfc_create_vport_work_array()
826 spin_lock_irq(&phba->port_list_lock); in lpfc_create_vport_work_array()
827 list_for_each_entry(port_iterator, &phba->port_list, listentry) { in lpfc_create_vport_work_array()
839 spin_unlock_irq(&phba->port_list_lock); in lpfc_create_vport_work_array()
844 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) in lpfc_destroy_vport_work_array() argument
849 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) in lpfc_destroy_vport_work_array()