Lines Matching refs:fcport

24 	struct qedf_rport *fcport;  in qedf_cmd_timeout()  local
31 fcport = io_req->fcport; in qedf_cmd_timeout()
32 if (io_req->fcport == NULL) { in qedf_cmd_timeout()
37 qedf = fcport->qedf; in qedf_cmd_timeout()
68 qedf_restart_rport(fcport); in qedf_cmd_timeout()
303 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) in qedf_alloc_cmd() argument
305 struct qedf_ctx *qedf = fcport->qedf; in qedf_alloc_cmd()
314 free_sqes = atomic_read(&fcport->free_sqes); in qedf_alloc_cmd()
324 if ((atomic_read(&fcport->num_active_ios) >= in qedf_alloc_cmd()
328 atomic_read(&fcport->num_active_ios)); in qedf_alloc_cmd()
367 atomic_inc(&fcport->num_active_ios); in qedf_alloc_cmd()
368 atomic_dec(&fcport->free_sqes); in qedf_alloc_cmd()
373 io_req->fcport = fcport; in qedf_alloc_cmd()
411 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_free_mp_resc()
442 struct qedf_rport *fcport = io_req->fcport; in qedf_release_cmd() local
446 QEDF_WARN(&fcport->qedf->dbg_ctx, in qedf_release_cmd()
457 atomic_dec(&fcport->num_active_ios); in qedf_release_cmd()
459 if (atomic_read(&fcport->num_active_ios) < 0) { in qedf_release_cmd()
460 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); in qedf_release_cmd()
466 io_req->fcport = NULL; in qedf_release_cmd()
471 io_req->fcport = NULL; in qedf_release_cmd()
591 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, in qedf_init_task() argument
601 struct qedf_ctx *qedf = fcport->qedf; in qedf_init_task()
633 io_req->task_params->conn_cid = fcport->fw_cid; in qedf_init_task()
636 io_req->task_params->is_tape_device = fcport->dev_type; in qedf_init_task()
685 struct qedf_rport *fcport = io_req->fcport; in qedf_init_mp_task() local
686 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_init_mp_task()
713 io_req->task_params->conn_cid = fcport->fw_cid; in qedf_init_mp_task()
717 io_req->task_params->is_tape_device = fcport->dev_type; in qedf_init_mp_task()
763 u16 qedf_get_sqe_idx(struct qedf_rport *fcport) in qedf_get_sqe_idx() argument
765 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); in qedf_get_sqe_idx()
768 rval = fcport->sq_prod_idx; in qedf_get_sqe_idx()
771 fcport->sq_prod_idx++; in qedf_get_sqe_idx()
772 fcport->fw_sq_prod_idx++; in qedf_get_sqe_idx()
773 if (fcport->sq_prod_idx == total_sqe) in qedf_get_sqe_idx()
774 fcport->sq_prod_idx = 0; in qedf_get_sqe_idx()
779 void qedf_ring_doorbell(struct qedf_rport *fcport) in qedf_ring_doorbell() argument
790 dbell.sq_prod = fcport->fw_sq_prod_idx; in qedf_ring_doorbell()
796 writel(*(u32 *)&dbell, fcport->p_doorbell); in qedf_ring_doorbell()
805 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, in qedf_trace_io() argument
808 struct qedf_ctx *qedf = fcport->qedf; in qedf_trace_io()
819 io_log->port_id = fcport->rdata->ids.port_id; in qedf_trace_io()
852 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) in qedf_post_io_req() argument
893 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || in qedf_post_io_req()
894 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_post_io_req()
906 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_post_io_req()
907 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req()
921 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); in qedf_post_io_req()
924 qedf_ring_doorbell(fcport); in qedf_post_io_req()
930 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); in qedf_post_io_req()
942 struct qedf_rport *fcport; in qedf_queuecommand() local
1003 fcport = (struct qedf_rport *)&rp[1]; in qedf_queuecommand()
1005 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || in qedf_queuecommand()
1006 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_queuecommand()
1015 atomic_inc(&fcport->ios_to_queue); in qedf_queuecommand()
1017 if (fcport->retry_delay_timestamp) { in qedf_queuecommand()
1019 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_queuecommand()
1020 if (time_after(jiffies, fcport->retry_delay_timestamp)) { in qedf_queuecommand()
1021 fcport->retry_delay_timestamp = 0; in qedf_queuecommand()
1023 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1026 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1029 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1032 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); in qedf_queuecommand()
1035 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1042 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_queuecommand()
1043 if (qedf_post_io_req(fcport, io_req)) { in qedf_queuecommand()
1046 atomic_inc(&fcport->free_sqes); in qedf_queuecommand()
1049 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1050 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1060 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_parse_fcp_rsp()
1129 struct qedf_rport *fcport; in qedf_scsi_completion() local
1182 fcport = io_req->fcport; in qedf_scsi_completion()
1188 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || in qedf_scsi_completion()
1189 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && in qedf_scsi_completion()
1190 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { in qedf_scsi_completion()
1296 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_scsi_completion()
1297 fcport->retry_delay_timestamp = in qedf_scsi_completion()
1299 spin_unlock_irqrestore(&fcport->rport_lock, in qedf_scsi_completion()
1317 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); in qedf_scsi_completion()
1427 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); in qedf_scsi_done()
1452 struct qedf_rport *fcport = io_req->fcport; in qedf_process_warning_compl() local
1463 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " in qedf_process_warning_compl()
1465 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), in qedf_process_warning_compl()
1469 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " in qedf_process_warning_compl()
1487 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { in qedf_process_warning_compl()
1528 if (io_req->fcport == NULL) { in qedf_process_error_detect()
1539 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " in qedf_process_error_detect()
1541 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), in qedf_process_error_detect()
1545 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " in qedf_process_error_detect()
1552 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) || in qedf_process_error_detect()
1553 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) && in qedf_process_error_detect()
1554 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) { in qedf_process_error_detect()
1603 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) in qedf_flush_active_ios() argument
1614 if (!fcport) { in qedf_flush_active_ios()
1620 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_flush_active_ios()
1625 qedf = fcport->qedf; in qedf_flush_active_ios()
1633 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && in qedf_flush_active_ios()
1635 while (atomic_read(&fcport->ios_to_queue)) { in qedf_flush_active_ios()
1638 atomic_read(&fcport->ios_to_queue)); in qedf_flush_active_ios()
1642 atomic_read(&fcport->ios_to_queue)); in qedf_flush_active_ios()
1653 atomic_read(&fcport->num_active_ios), fcport, in qedf_flush_active_ios()
1654 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); in qedf_flush_active_ios()
1659 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); in qedf_flush_active_ios()
1661 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); in qedf_flush_active_ios()
1662 fcport->lun_reset_lun = lun; in qedf_flush_active_ios()
1670 if (!io_req->fcport) in qedf_flush_active_ios()
1688 if (io_req->fcport != fcport) in qedf_flush_active_ios()
1821 flush_cnt, atomic_read(&fcport->num_active_ios)); in qedf_flush_active_ios()
1823 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && in qedf_flush_active_ios()
1825 while (atomic_read(&fcport->num_active_ios)) { in qedf_flush_active_ios()
1829 atomic_read(&fcport->num_active_ios), in qedf_flush_active_ios()
1835 atomic_read(&fcport->num_active_ios)); in qedf_flush_active_ios()
1838 if (io_req->fcport && in qedf_flush_active_ios()
1839 io_req->fcport == fcport) { in qedf_flush_active_ios()
1861 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); in qedf_flush_active_ios()
1862 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); in qedf_flush_active_ios()
1874 struct qedf_rport *fcport = io_req->fcport; in qedf_initiate_abts() local
1885 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_initiate_abts()
1891 qedf = fcport->qedf; in qedf_initiate_abts()
1892 rdata = fcport->rdata; in qedf_initiate_abts()
1915 if (!atomic_read(&fcport->free_sqes)) { in qedf_initiate_abts()
1921 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_abts()
1955 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_initiate_abts()
1957 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_initiate_abts()
1958 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts()
1963 qedf_ring_doorbell(fcport); in qedf_initiate_abts()
1965 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_initiate_abts()
1978 struct qedf_rport *fcport = io_req->fcport; in qedf_process_abts_compl() local
1989 if (!fcport) { in qedf_process_abts_compl()
2000 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || in qedf_process_abts_compl()
2001 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { in qedf_process_abts_compl()
2066 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_init_mp_req()
2168 struct qedf_rport *fcport; in qedf_initiate_cleanup() local
2177 fcport = io_req->fcport; in qedf_initiate_cleanup()
2178 if (!fcport) { in qedf_initiate_cleanup()
2184 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_initiate_cleanup()
2189 qedf = fcport->qedf; in qedf_initiate_cleanup()
2210 if (!atomic_read(&fcport->free_sqes)) { in qedf_initiate_cleanup()
2230 refcount, fcport, fcport->rdata->ids.port_id); in qedf_initiate_cleanup()
2238 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_initiate_cleanup()
2240 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_initiate_cleanup()
2241 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_cleanup()
2246 qedf_ring_doorbell(fcport); in qedf_initiate_cleanup()
2248 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_initiate_cleanup()
2304 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, in qedf_execute_tmf() argument
2309 struct qedf_ctx *qedf = fcport->qedf; in qedf_execute_tmf()
2325 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_execute_tmf()
2331 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); in qedf_execute_tmf()
2345 io_req->fcport = fcport; in qedf_execute_tmf()
2370 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_execute_tmf()
2372 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_execute_tmf()
2373 sqe = &fcport->sq[sqe_idx]; in qedf_execute_tmf()
2376 qedf_init_task(fcport, lport, io_req, task, sqe); in qedf_execute_tmf()
2377 qedf_ring_doorbell(fcport); in qedf_execute_tmf()
2379 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_execute_tmf()
2402 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_execute_tmf()
2412 qedf_flush_active_ios(fcport, lun); in qedf_execute_tmf()
2414 qedf_flush_active_ios(fcport, -1); in qedf_execute_tmf()
2431 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; in qedf_initiate_tmf() local
2438 struct fc_rport_priv *rdata = fcport->rdata; in qedf_initiate_tmf()
2473 if (!fcport) { in qedf_initiate_tmf()
2479 qedf = fcport->qedf; in qedf_initiate_tmf()
2487 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_tmf()
2505 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_tmf()
2506 if (!fcport->rdata) in qedf_initiate_tmf()
2508 fcport); in qedf_initiate_tmf()
2512 fcport, fcport->rdata->ids.port_id); in qedf_initiate_tmf()
2517 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); in qedf_initiate_tmf()