rdma: use LAST_WQE_REACHED event in the SRQ path

This event is generated by NICs utilizing the SRQ feature when the last
RECV for that qpair is processed. I have confirmed this feature.

Change-Id: Ib6d6b6d02987f789b4d5dd3daf734e3351ee1974
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448063
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Seth Howell 2019-03-14 10:02:45 -07:00 committed by Ben Walker
parent add76a3515
commit a9fc7e1db8

View File

@ -368,6 +368,8 @@ struct spdk_nvmf_rdma_qpair {
* that we only initialize one of these paths.
*/
bool disconnect_started;
/* Lets us know that we have received the last_wqe event. */
bool last_wqe_reached;
};
struct spdk_nvmf_rdma_poller {
@ -2332,11 +2334,21 @@ static void nvmf_rdma_destroy_drained_qpair(void *ctx)
struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
struct spdk_nvmf_rdma_transport, transport);
if (rqpair->current_send_depth == 0 && rqpair->current_recv_depth == rqpair->max_queue_depth) {
/* The qpair has been drained. Free the resources. */
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
spdk_nvmf_rdma_qpair_destroy(rqpair);
/* In non SRQ path, we will reach rqpair->max_queue_depth. In SRQ path, we will get the last_wqe event. */
if (rqpair->current_send_depth != 0) {
return;
}
if (rqpair->srq == NULL && rqpair->current_recv_depth != rqpair->max_queue_depth) {
return;
}
if (rqpair->srq != NULL && rqpair->last_wqe_reached == false) {
return;
}
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
spdk_nvmf_rdma_qpair_destroy(rqpair);
}
@ -2473,7 +2485,7 @@ static void
spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
{
int rc;
struct spdk_nvmf_rdma_qpair *rqpair;
struct spdk_nvmf_rdma_qpair *rqpair = NULL;
struct ibv_async_event event;
enum ibv_qp_state state;
@ -2497,7 +2509,17 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
spdk_nvmf_rdma_start_disconnect(rqpair);
break;
case IBV_EVENT_QP_LAST_WQE_REACHED:
/* This event only occurs for shared receive queues, which are not currently supported. */
/* This event only occurs for shared receive queues. */
rqpair = event.element.qp->qp_context;
rqpair->last_wqe_reached = true;
/* This must be handled on the polling thread if it exists. Otherwise the timeout will catch it. */
if (rqpair->qpair.group) {
spdk_thread_send_msg(rqpair->qpair.group->thread, nvmf_rdma_destroy_drained_qpair, rqpair);
} else {
SPDK_ERRLOG("Unable to destroy the qpair %p since it does not have a poll group.\n", rqpair);
}
break;
case IBV_EVENT_SQ_DRAINED:
/* This event occurs frequently in both error and non-error states.