Lines Matching refs:fcport
24 struct qedf_rport *fcport; in qedf_cmd_timeout() local
26 fcport = io_req->fcport; in qedf_cmd_timeout()
27 if (io_req->fcport == NULL) { in qedf_cmd_timeout()
32 qedf = fcport->qedf; in qedf_cmd_timeout()
63 qedf_restart_rport(fcport); in qedf_cmd_timeout()
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) in qedf_alloc_cmd() argument
300 struct qedf_ctx *qedf = fcport->qedf; in qedf_alloc_cmd()
309 free_sqes = atomic_read(&fcport->free_sqes); in qedf_alloc_cmd()
319 if ((atomic_read(&fcport->num_active_ios) >= in qedf_alloc_cmd()
323 atomic_read(&fcport->num_active_ios)); in qedf_alloc_cmd()
362 atomic_inc(&fcport->num_active_ios); in qedf_alloc_cmd()
363 atomic_dec(&fcport->free_sqes); in qedf_alloc_cmd()
368 io_req->fcport = fcport; in qedf_alloc_cmd()
406 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_free_mp_resc()
437 struct qedf_rport *fcport = io_req->fcport; in qedf_release_cmd() local
441 QEDF_WARN(&fcport->qedf->dbg_ctx, in qedf_release_cmd()
452 atomic_dec(&fcport->num_active_ios); in qedf_release_cmd()
454 if (atomic_read(&fcport->num_active_ios) < 0) { in qedf_release_cmd()
455 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); in qedf_release_cmd()
461 io_req->fcport = NULL; in qedf_release_cmd()
466 io_req->fcport = NULL; in qedf_release_cmd()
586 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, in qedf_init_task() argument
596 struct qedf_ctx *qedf = fcport->qedf; in qedf_init_task()
628 io_req->task_params->conn_cid = fcport->fw_cid; in qedf_init_task()
631 io_req->task_params->is_tape_device = fcport->dev_type; in qedf_init_task()
680 struct qedf_rport *fcport = io_req->fcport; in qedf_init_mp_task() local
681 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_init_mp_task()
708 io_req->task_params->conn_cid = fcport->fw_cid; in qedf_init_mp_task()
712 io_req->task_params->is_tape_device = fcport->dev_type; in qedf_init_mp_task()
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport) in qedf_get_sqe_idx() argument
760 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); in qedf_get_sqe_idx()
763 rval = fcport->sq_prod_idx; in qedf_get_sqe_idx()
766 fcport->sq_prod_idx++; in qedf_get_sqe_idx()
767 fcport->fw_sq_prod_idx++; in qedf_get_sqe_idx()
768 if (fcport->sq_prod_idx == total_sqe) in qedf_get_sqe_idx()
769 fcport->sq_prod_idx = 0; in qedf_get_sqe_idx()
774 void qedf_ring_doorbell(struct qedf_rport *fcport) in qedf_ring_doorbell() argument
785 dbell.sq_prod = fcport->fw_sq_prod_idx; in qedf_ring_doorbell()
791 writel(*(u32 *)&dbell, fcport->p_doorbell); in qedf_ring_doorbell()
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, in qedf_trace_io() argument
803 struct qedf_ctx *qedf = fcport->qedf; in qedf_trace_io()
813 io_log->port_id = fcport->rdata->ids.port_id; in qedf_trace_io()
846 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) in qedf_post_io_req() argument
887 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || in qedf_post_io_req()
888 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_post_io_req()
900 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_post_io_req()
901 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req()
915 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); in qedf_post_io_req()
918 qedf_ring_doorbell(fcport); in qedf_post_io_req()
924 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); in qedf_post_io_req()
936 struct qedf_rport *fcport; in qedf_queuecommand() local
997 fcport = (struct qedf_rport *)&rp[1]; in qedf_queuecommand()
999 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || in qedf_queuecommand()
1000 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_queuecommand()
1009 atomic_inc(&fcport->ios_to_queue); in qedf_queuecommand()
1011 if (fcport->retry_delay_timestamp) { in qedf_queuecommand()
1013 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_queuecommand()
1014 if (time_after(jiffies, fcport->retry_delay_timestamp)) { in qedf_queuecommand()
1015 fcport->retry_delay_timestamp = 0; in qedf_queuecommand()
1017 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1020 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1023 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1026 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); in qedf_queuecommand()
1029 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1036 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_queuecommand()
1037 if (qedf_post_io_req(fcport, io_req)) { in qedf_queuecommand()
1040 atomic_inc(&fcport->free_sqes); in qedf_queuecommand()
1043 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_queuecommand()
1044 atomic_dec(&fcport->ios_to_queue); in qedf_queuecommand()
1054 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_parse_fcp_rsp()
1122 struct qedf_rport *fcport; in qedf_scsi_completion() local
1169 fcport = io_req->fcport; in qedf_scsi_completion()
1175 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || in qedf_scsi_completion()
1176 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && in qedf_scsi_completion()
1177 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { in qedf_scsi_completion()
1283 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_scsi_completion()
1284 fcport->retry_delay_timestamp = in qedf_scsi_completion()
1286 spin_unlock_irqrestore(&fcport->rport_lock, in qedf_scsi_completion()
1304 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); in qedf_scsi_completion()
1407 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); in qedf_scsi_done()
1432 struct qedf_rport *fcport = io_req->fcport; in qedf_process_warning_compl() local
1443 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " in qedf_process_warning_compl()
1445 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), in qedf_process_warning_compl()
1449 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " in qedf_process_warning_compl()
1467 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { in qedf_process_warning_compl()
1508 if (io_req->fcport == NULL) { in qedf_process_error_detect()
1519 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " in qedf_process_error_detect()
1521 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), in qedf_process_error_detect()
1525 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " in qedf_process_error_detect()
1532 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) || in qedf_process_error_detect()
1533 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) && in qedf_process_error_detect()
1534 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) { in qedf_process_error_detect()
1583 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) in qedf_flush_active_ios() argument
1594 if (!fcport) { in qedf_flush_active_ios()
1600 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_flush_active_ios()
1605 qedf = fcport->qedf; in qedf_flush_active_ios()
1613 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && in qedf_flush_active_ios()
1615 while (atomic_read(&fcport->ios_to_queue)) { in qedf_flush_active_ios()
1618 atomic_read(&fcport->ios_to_queue)); in qedf_flush_active_ios()
1622 atomic_read(&fcport->ios_to_queue)); in qedf_flush_active_ios()
1633 atomic_read(&fcport->num_active_ios), fcport, in qedf_flush_active_ios()
1634 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); in qedf_flush_active_ios()
1639 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); in qedf_flush_active_ios()
1641 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); in qedf_flush_active_ios()
1642 fcport->lun_reset_lun = lun; in qedf_flush_active_ios()
1650 if (!io_req->fcport) in qedf_flush_active_ios()
1668 if (io_req->fcport != fcport) in qedf_flush_active_ios()
1801 flush_cnt, atomic_read(&fcport->num_active_ios)); in qedf_flush_active_ios()
1803 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && in qedf_flush_active_ios()
1805 while (atomic_read(&fcport->num_active_ios)) { in qedf_flush_active_ios()
1809 atomic_read(&fcport->num_active_ios), in qedf_flush_active_ios()
1815 atomic_read(&fcport->num_active_ios)); in qedf_flush_active_ios()
1818 if (io_req->fcport && in qedf_flush_active_ios()
1819 io_req->fcport == fcport) { in qedf_flush_active_ios()
1841 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); in qedf_flush_active_ios()
1842 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); in qedf_flush_active_ios()
1854 struct qedf_rport *fcport = io_req->fcport; in qedf_initiate_abts() local
1865 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_initiate_abts()
1871 qedf = fcport->qedf; in qedf_initiate_abts()
1872 rdata = fcport->rdata; in qedf_initiate_abts()
1895 if (!atomic_read(&fcport->free_sqes)) { in qedf_initiate_abts()
1901 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_abts()
1935 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_initiate_abts()
1937 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_initiate_abts()
1938 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts()
1943 qedf_ring_doorbell(fcport); in qedf_initiate_abts()
1945 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_initiate_abts()
1958 struct qedf_rport *fcport = io_req->fcport; in qedf_process_abts_compl() local
1969 if (!fcport) { in qedf_process_abts_compl()
1980 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || in qedf_process_abts_compl()
1981 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { in qedf_process_abts_compl()
2046 struct qedf_ctx *qedf = io_req->fcport->qedf; in qedf_init_mp_req()
2148 struct qedf_rport *fcport; in qedf_initiate_cleanup() local
2157 fcport = io_req->fcport; in qedf_initiate_cleanup()
2158 if (!fcport) { in qedf_initiate_cleanup()
2164 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_initiate_cleanup()
2169 qedf = fcport->qedf; in qedf_initiate_cleanup()
2190 if (!atomic_read(&fcport->free_sqes)) { in qedf_initiate_cleanup()
2210 refcount, fcport, fcport->rdata->ids.port_id); in qedf_initiate_cleanup()
2218 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_initiate_cleanup()
2220 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_initiate_cleanup()
2221 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_cleanup()
2226 qedf_ring_doorbell(fcport); in qedf_initiate_cleanup()
2228 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_initiate_cleanup()
2284 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, in qedf_execute_tmf() argument
2289 struct qedf_ctx *qedf = fcport->qedf; in qedf_execute_tmf()
2305 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_execute_tmf()
2311 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); in qedf_execute_tmf()
2325 io_req->fcport = fcport; in qedf_execute_tmf()
2350 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_execute_tmf()
2352 sqe_idx = qedf_get_sqe_idx(fcport); in qedf_execute_tmf()
2353 sqe = &fcport->sq[sqe_idx]; in qedf_execute_tmf()
2356 qedf_init_task(fcport, lport, io_req, task, sqe); in qedf_execute_tmf()
2357 qedf_ring_doorbell(fcport); in qedf_execute_tmf()
2359 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_execute_tmf()
2382 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_execute_tmf()
2392 qedf_flush_active_ios(fcport, lun); in qedf_execute_tmf()
2394 qedf_flush_active_ios(fcport, -1); in qedf_execute_tmf()
2411 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; in qedf_initiate_tmf() local
2418 struct fc_rport_priv *rdata = fcport->rdata; in qedf_initiate_tmf()
2453 if (!fcport) { in qedf_initiate_tmf()
2459 qedf = fcport->qedf; in qedf_initiate_tmf()
2467 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_tmf()
2485 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_initiate_tmf()
2486 if (!fcport->rdata) in qedf_initiate_tmf()
2488 fcport); in qedf_initiate_tmf()
2492 fcport, fcport->rdata->ids.port_id); in qedf_initiate_tmf()
2497 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); in qedf_initiate_tmf()