Lines Matching refs:bfad

38 	struct bfad_s         *bfad = drv;  in bfa_cb_ioim_done()  local
45 bfa_trc(bfad, scsi_status); in bfa_cb_ioim_done()
49 bfa_trc(bfad, sns_len); in bfa_cb_ioim_done()
56 bfa_trc(bfad, residue); in bfa_cb_ioim_done()
61 bfa_trc(bfad, 0); in bfa_cb_ioim_done()
84 bfa_trc(bfad, cmnd->result); in bfa_cb_ioim_done()
134 struct bfad_s *bfad = drv; in bfa_cb_ioim_abort() local
142 bfa_trc(bfad, cmnd->result); in bfa_cb_ioim_abort()
147 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, in bfa_cb_tskim_done() argument
174 struct bfad_s *bfad = im_port->bfad; in bfad_im_info() local
179 bfad->pci_name, BFAD_DRIVER_VERSION); in bfad_im_info()
195 struct bfad_s *bfad = im_port->bfad; in bfad_im_abort_handler() local
201 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_abort_handler()
213 bfa_trc(bfad, hal_io->iotag); in bfad_im_abort_handler()
214 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_abort_handler()
218 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_abort_handler()
230 bfa_trc(bfad, hal_io->iotag); in bfad_im_abort_handler()
231 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_abort_handler()
236 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_abort_handler()
241 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, in bfad_im_target_reset_send() argument
249 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); in bfad_im_target_reset_send()
251 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_target_reset_send()
271 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_target_reset_send()
297 struct bfad_s *bfad = im_port->bfad; in bfad_im_reset_lun_handler() local
307 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
310 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
315 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); in bfad_im_reset_lun_handler()
317 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_lun_handler()
319 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
339 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_lun_handler()
341 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
348 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_lun_handler()
355 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_lun_handler()
374 struct bfad_s *bfad = im_port->bfad; in bfad_im_reset_target_handler() local
381 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
385 rc = bfad_im_target_reset_send(bfad, cmnd, itnim); in bfad_im_reset_target_handler()
388 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
391 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
395 BFA_LOG(KERN_ERR, bfad, bfa_log_level, in bfad_im_reset_target_handler()
402 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_reset_target_handler()
426 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, in bfa_fcb_itnim_alloc() argument
433 (*itnim_drv)->im = bfad->im; in bfa_fcb_itnim_alloc()
441 bfad->bfad_flags |= BFAD_RPORT_ONLINE; in bfa_fcb_itnim_alloc()
450 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) in bfa_fcb_itnim_free() argument
473 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfa_fcb_itnim_free()
512 struct bfad_s *bfad; in bfa_fcb_itnim_offline() local
516 bfad = port->bfad; in bfa_fcb_itnim_offline()
517 if ((bfad->pport.flags & BFAD_PORT_DELETE) || in bfa_fcb_itnim_offline()
535 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, in bfad_im_scsi_host_alloc() argument
551 im_port->shost = bfad_scsi_host_alloc(im_port, bfad); in bfad_im_scsi_host_alloc()
564 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; in bfad_im_scsi_host_alloc()
571 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); in bfad_im_scsi_host_alloc()
591 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) in bfad_im_scsi_host_free() argument
593 bfa_trc(bfad, bfad->inst_no); in bfad_im_scsi_host_free()
594 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", in bfad_im_scsi_host_free()
620 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) in bfad_im_port_new() argument
632 im_port->bfad = bfad; in bfad_im_port_new()
643 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) in bfad_im_port_delete() argument
647 queue_work(bfad->im->drv_workq, in bfad_im_port_delete()
656 struct bfad_s *bfad = im_port->bfad; in bfad_im_port_clean() local
658 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_port_clean()
668 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_port_clean()
676 struct bfad_s *bfad = im->bfad; in bfad_aen_im_notify_handler() local
677 struct Scsi_Host *shost = bfad->pport.im_port->shost; in bfad_aen_im_notify_handler()
681 while (!list_empty(&bfad->active_aen_q)) { in bfad_aen_im_notify_handler()
682 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
683 bfa_q_deq(&bfad->active_aen_q, &aen_entry); in bfad_aen_im_notify_handler()
684 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
690 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
691 list_add_tail(&aen_entry->qe, &bfad->free_aen_q); in bfad_aen_im_notify_handler()
692 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); in bfad_aen_im_notify_handler()
697 bfad_im_probe(struct bfad_s *bfad) in bfad_im_probe() argument
705 bfad->im = im; in bfad_im_probe()
706 im->bfad = bfad; in bfad_im_probe()
708 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { in bfad_im_probe()
718 bfad_im_probe_undo(struct bfad_s *bfad) in bfad_im_probe_undo() argument
720 if (bfad->im) { in bfad_im_probe_undo()
721 bfad_destroy_workq(bfad->im); in bfad_im_probe_undo()
722 kfree(bfad->im); in bfad_im_probe_undo()
723 bfad->im = NULL; in bfad_im_probe_undo()
728 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) in bfad_scsi_host_alloc() argument
740 sht->sg_tablesize = bfad->cfg_data.io_max_sge; in bfad_scsi_host_alloc()
746 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) in bfad_scsi_host_free() argument
749 flush_workqueue(bfad->im->drv_workq); in bfad_scsi_host_free()
750 bfad_im_scsi_host_free(im_port->bfad, im_port); in bfad_scsi_host_free()
766 bfad_thread_workq(struct bfad_s *bfad) in bfad_thread_workq() argument
768 struct bfad_im_s *im = bfad->im; in bfad_thread_workq()
770 bfa_trc(bfad, 0); in bfad_thread_workq()
772 bfad->inst_no); in bfad_thread_workq()
1028 struct bfad_s *bfad = im_port->bfad; in bfad_fc_host_init() local
1031 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); in bfad_fc_host_init()
1037 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); in bfad_fc_host_init()
1049 strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, in bfad_fc_host_init()
1053 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); in bfad_fc_host_init()
1109 struct bfad_s *bfad = im->bfad; in bfad_im_itnim_work_handler() local
1117 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1119 bfa_trc(bfad, itnim->state); in bfad_im_itnim_work_handler()
1123 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1125 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1132 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_itnim_work_handler()
1154 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1156 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; in bfad_im_itnim_work_handler()
1158 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1165 BFA_LOG(KERN_INFO, bfad, bfa_log_level, in bfad_im_itnim_work_handler()
1180 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1182 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; in bfad_im_itnim_work_handler()
1184 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1196 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_itnim_work_handler()
1207 struct bfad_s *bfad = im_port->bfad; in bfad_im_queuecommand_lck() local
1223 if (bfad->bfad_flags & BFAD_EEH_BUSY) { in bfad_im_queuecommand_lck()
1224 if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) in bfad_im_queuecommand_lck()
1238 spin_lock_irqsave(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1239 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { in bfad_im_queuecommand_lck()
1242 bfad->inst_no, cmnd, cmnd->cmnd[0]); in bfad_im_queuecommand_lck()
1254 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, in bfad_im_queuecommand_lck()
1258 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1265 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1270 spin_unlock_irqrestore(&bfad->bfad_lock, flags); in bfad_im_queuecommand_lck()
1281 bfad_rport_online_wait(struct bfad_s *bfad) in DEF_SCSI_QCMD()
1286 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) in DEF_SCSI_QCMD()
1292 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { in DEF_SCSI_QCMD()
1295 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) in DEF_SCSI_QCMD()
1301 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { in DEF_SCSI_QCMD()
1309 bfad_get_linkup_delay(struct bfad_s *bfad) in bfad_get_linkup_delay() argument
1322 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); in bfad_get_linkup_delay()