rdma: fix rw_depth to read_depth:
rw_depth was a misinterpretation of the spec. It is based on the value of max_qp_rd_atom which only governs the number of read and atomic operations. However, we were using rw_depth to block both read and write operations which is an unnecessary restriction. write operations should only be governed by the number of Work Requests posted to the send queue. We currently guarantee that we will never overshoot the queue depth for Work requests since they are embedded in the requests and limited to a size of max_queue_depth. Change-Id: Ib945ade4ef9a63420afce5af7e4852932345a460 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/c/441165 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
5301be93cd
commit
7289d370f7
@ -261,8 +261,8 @@ struct spdk_nvmf_rdma_qpair {
|
||||
/* The maximum number of I/O outstanding on this connection at one time */
|
||||
uint16_t max_queue_depth;
|
||||
|
||||
/* The maximum number of active RDMA READ and WRITE operations at one time */
|
||||
uint16_t max_rw_depth;
|
||||
/* The maximum number of active RDMA READ and ATOMIC operations at one time */
|
||||
uint16_t max_read_depth;
|
||||
|
||||
/* The maximum number of SGEs per WR on the send queue */
|
||||
uint32_t max_send_sge;
|
||||
@ -593,10 +593,9 @@ spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_nvmf_rdma_cur_rw_depth(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||
spdk_nvmf_rdma_cur_read_depth(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||
{
|
||||
return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER] +
|
||||
rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST];
|
||||
return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER];
|
||||
}
|
||||
|
||||
static int
|
||||
@ -992,7 +991,7 @@ spdk_nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *
|
||||
ctrlr_event_data.private_data_len = sizeof(accept_data);
|
||||
if (id->ps == RDMA_PS_TCP) {
|
||||
ctrlr_event_data.responder_resources = 0; /* We accept 0 reads from the host */
|
||||
ctrlr_event_data.initiator_depth = rqpair->max_rw_depth;
|
||||
ctrlr_event_data.initiator_depth = rqpair->max_read_depth;
|
||||
}
|
||||
|
||||
rc = rdma_accept(id, &ctrlr_event_data);
|
||||
@ -1026,7 +1025,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
||||
struct rdma_conn_param *rdma_param = NULL;
|
||||
const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
|
||||
uint16_t max_queue_depth;
|
||||
uint16_t max_rw_depth;
|
||||
uint16_t max_read_depth;
|
||||
|
||||
rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
|
||||
|
||||
@ -1063,7 +1062,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
||||
|
||||
/* Start with the maximum queue depth allowed by the target */
|
||||
max_queue_depth = rtransport->transport.opts.max_queue_depth;
|
||||
max_rw_depth = rtransport->transport.opts.max_queue_depth;
|
||||
max_read_depth = rtransport->transport.opts.max_queue_depth;
|
||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n",
|
||||
rtransport->transport.opts.max_queue_depth);
|
||||
|
||||
@ -1072,14 +1071,14 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
||||
"Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n",
|
||||
port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom);
|
||||
max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr);
|
||||
max_rw_depth = spdk_min(max_rw_depth, port->device->attr.max_qp_rd_atom);
|
||||
max_read_depth = spdk_min(max_read_depth, port->device->attr.max_qp_rd_atom);
|
||||
|
||||
/* Next check the remote NIC's hardware limitations */
|
||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA,
|
||||
"Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n",
|
||||
rdma_param->initiator_depth, rdma_param->responder_resources);
|
||||
if (rdma_param->initiator_depth > 0) {
|
||||
max_rw_depth = spdk_min(max_rw_depth, rdma_param->initiator_depth);
|
||||
max_read_depth = spdk_min(max_read_depth, rdma_param->initiator_depth);
|
||||
}
|
||||
|
||||
/* Finally check for the host software requested values, which are
|
||||
@ -1093,7 +1092,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
|
||||
max_queue_depth, max_rw_depth);
|
||||
max_queue_depth, max_read_depth);
|
||||
|
||||
rqpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair));
|
||||
if (rqpair == NULL) {
|
||||
@ -1104,7 +1103,7 @@ nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *e
|
||||
|
||||
rqpair->port = port;
|
||||
rqpair->max_queue_depth = max_queue_depth;
|
||||
rqpair->max_rw_depth = max_rw_depth;
|
||||
rqpair->max_read_depth = max_read_depth;
|
||||
rqpair->cm_id = event->id;
|
||||
rqpair->listen_id = event->listen_id;
|
||||
rqpair->qpair.transport = transport;
|
||||
@ -1439,7 +1438,6 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
enum spdk_nvmf_rdma_request_state prev_state;
|
||||
bool progress = false;
|
||||
int data_posted;
|
||||
int cur_rdma_rw_depth;
|
||||
|
||||
rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||
device = rqpair->port->device;
|
||||
@ -1539,14 +1537,13 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
|
||||
/* This request needs to wait in line to perform RDMA */
|
||||
break;
|
||||
}
|
||||
cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair);
|
||||
|
||||
if (cur_rdma_rw_depth >= rqpair->max_rw_depth) {
|
||||
/* R/W queue is full, need to wait */
|
||||
break;
|
||||
}
|
||||
|
||||
if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
|
||||
|
||||
if (spdk_nvmf_rdma_cur_read_depth(rqpair) >= rqpair->max_read_depth) {
|
||||
/* Read operation queue is full, need to wait */
|
||||
break;
|
||||
}
|
||||
rc = request_transfer_in(&rdma_req->req);
|
||||
if (!rc) {
|
||||
spdk_nvmf_rdma_request_set_state(rdma_req,
|
||||
@ -2099,14 +2096,11 @@ spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
|
||||
static bool
|
||||
spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
|
||||
{
|
||||
int cur_queue_depth, cur_rdma_rw_depth;
|
||||
struct spdk_nvmf_rdma_qpair *rqpair;
|
||||
|
||||
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||
cur_queue_depth = spdk_nvmf_rdma_cur_queue_depth(rqpair);
|
||||
cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair);
|
||||
|
||||
if (cur_queue_depth == 0 && cur_rdma_rw_depth == 0) {
|
||||
if (spdk_nvmf_rdma_cur_queue_depth(rqpair) == 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
Loading…
x
Reference in New Issue
Block a user