nvme: remove redundant transport_qp_is_failed checks
The qpair state transport_qpair_is_failed is actually equivalent to NVME_QPAIR_IS_CONNECTED in the qpair state machine. There are a couple of places where we check against transport_qp_is_failed and then immediately check to see if we are in the connected state. If we are failed, or we are not in the connected state we return the same value to the calling function. Since the checks for transport_qpair_is_failed are not necessary, they can be removed. As a result, there is no need to keep track of it and it can be removed from the qpair structure. Change-Id: I4aef5d20eb267bfd6118e5d1d088df05574d9ffd Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/475802 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
parent
3369105f31
commit
3911922005
@ -408,7 +408,7 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!qpair->transport_qp_is_failed) {
|
||||
if (!nvme_qpair_state_equals(qpair, NVME_QPAIR_DISABLED)) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
@ -419,12 +419,10 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
|
||||
rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
|
||||
if (rc) {
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
||||
qpair->transport_qp_is_failed = true;
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
|
||||
qpair->transport_qp_is_failed = false;
|
||||
|
||||
out:
|
||||
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
|
||||
@ -1079,7 +1077,6 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
||||
/* Disable all queues before disabling the controller hardware. */
|
||||
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
||||
qpair->transport_qp_is_failed = true;
|
||||
}
|
||||
nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISABLED);
|
||||
nvme_qpair_complete_error_reqs(ctrlr->adminq);
|
||||
@ -1124,7 +1121,6 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
|
||||
continue;
|
||||
}
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
|
||||
qpair->transport_qp_is_failed = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,8 +356,6 @@ struct spdk_nvme_qpair {
|
||||
uint8_t in_completion_context : 1;
|
||||
uint8_t delete_after_completion_context: 1;
|
||||
|
||||
uint8_t transport_qp_is_failed: 1;
|
||||
|
||||
/*
|
||||
* Set when no deletion notification is needed. For example, the process
|
||||
* which allocated this qpair exited unexpectedly.
|
||||
|
@ -448,10 +448,6 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(qpair->transport_qp_is_failed == true)) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(!nvme_qpair_check_enabled(qpair) &&
|
||||
!nvme_qpair_state_equals(qpair, NVME_QPAIR_CONNECTING))) {
|
||||
/*
|
||||
@ -708,10 +704,6 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(qpair->transport_qp_is_failed == true)) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
rc = _nvme_qpair_submit_request(qpair, req);
|
||||
if (rc == -EAGAIN) {
|
||||
STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
|
||||
|
@ -288,13 +288,13 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
|
||||
break;
|
||||
case RDMA_CM_EVENT_DISCONNECTED:
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||
rqpair->qpair.transport_qp_is_failed = true;
|
||||
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
|
||||
break;
|
||||
case RDMA_CM_EVENT_MULTICAST_JOIN:
|
||||
case RDMA_CM_EVENT_MULTICAST_ERROR:
|
||||
break;
|
||||
case RDMA_CM_EVENT_ADDR_CHANGE:
|
||||
rqpair->qpair.transport_qp_is_failed = true;
|
||||
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
|
||||
break;
|
||||
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
|
||||
break;
|
||||
@ -1058,10 +1058,9 @@ nvme_rdma_qpair_connect(struct nvme_rdma_qpair *rqpair)
|
||||
return -1;
|
||||
}
|
||||
|
||||
rqpair->qpair.transport_qp_is_failed = false;
|
||||
rc = nvme_fabric_qpair_connect(&rqpair->qpair, rqpair->num_entries);
|
||||
if (rc < 0) {
|
||||
rqpair->qpair.transport_qp_is_failed = true;
|
||||
nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
|
||||
SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
|
||||
return -1;
|
||||
}
|
||||
@ -1514,7 +1513,7 @@ nvme_rdma_qpair_disconnect(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
|
||||
|
||||
qpair->transport_qp_is_failed = true;
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
||||
nvme_rdma_unregister_mem(rqpair);
|
||||
nvme_rdma_unregister_reqs(rqpair);
|
||||
nvme_rdma_unregister_rsps(rqpair);
|
||||
@ -1895,7 +1894,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
|
||||
}
|
||||
nvme_rdma_qpair_process_cm_event(rqpair);
|
||||
|
||||
if (spdk_unlikely(qpair->transport_qp_is_failed)) {
|
||||
if (spdk_unlikely(nvme_qpair_state_equals(qpair, NVME_QPAIR_DISABLED))) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ nvme_tcp_qpair_disconnect(struct spdk_nvme_qpair *qpair)
|
||||
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
||||
struct nvme_tcp_pdu *pdu;
|
||||
|
||||
qpair->transport_qp_is_failed = true;
|
||||
nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
|
||||
spdk_sock_close(&tqpair->sock);
|
||||
|
||||
/* clear the send_queue */
|
||||
@ -1624,10 +1624,9 @@ nvme_tcp_qpair_connect(struct nvme_tcp_qpair *tqpair)
|
||||
return -1;
|
||||
}
|
||||
|
||||
tqpair->qpair.transport_qp_is_failed = false;
|
||||
rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
|
||||
if (rc < 0) {
|
||||
tqpair->qpair.transport_qp_is_failed = true;
|
||||
nvme_qpair_set_state(&tqpair->qpair, NVME_QPAIR_DISABLED);
|
||||
SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -1468,14 +1468,14 @@ test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
|
||||
|
||||
/* qpair not failed. Make sure we don't call down to the transport */
|
||||
ctrlr.is_failed = 0;
|
||||
qpair.transport_qp_is_failed = false;
|
||||
qpair.state = NVME_QPAIR_CONNECTED;
|
||||
g_connect_qpair_called = false;
|
||||
rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
|
||||
CU_ASSERT(g_connect_qpair_called == false);
|
||||
CU_ASSERT(rc == 0)
|
||||
|
||||
/* transport qpair is failed. make sure we call down to the transport */
|
||||
qpair.transport_qp_is_failed = true;
|
||||
qpair.state = NVME_QPAIR_DISABLED;
|
||||
rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
|
||||
CU_ASSERT(g_connect_qpair_called == true);
|
||||
CU_ASSERT(rc == 0)
|
||||
|
@ -218,7 +218,7 @@ static void test_nvme_qpair_process_completions(void)
|
||||
/* Same if the qpair is failed at the transport layer. */
|
||||
ctrlr.is_failed = false;
|
||||
ctrlr.is_removed = false;
|
||||
qpair.transport_qp_is_failed = true;
|
||||
qpair.state = NVME_QPAIR_DISABLED;
|
||||
rc = spdk_nvme_qpair_process_completions(&qpair, 0);
|
||||
CU_ASSERT(rc == -ENXIO);
|
||||
CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
|
||||
@ -228,7 +228,7 @@ static void test_nvme_qpair_process_completions(void)
|
||||
/* If the controller is removed, make sure we abort the requests. */
|
||||
ctrlr.is_failed = true;
|
||||
ctrlr.is_removed = true;
|
||||
qpair.transport_qp_is_failed = false;
|
||||
qpair.state = NVME_QPAIR_CONNECTED;
|
||||
rc = spdk_nvme_qpair_process_completions(&qpair, 0);
|
||||
CU_ASSERT(rc == -ENXIO);
|
||||
CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
|
||||
|
Loading…
x
Reference in New Issue
Block a user