nvmf/fc: Move pending_data_buf_queue from fc_conn to fc_poll_group

I/O buffer cache is per transport_poll_group now. Hence moving
pending_data_buf_queue from struct spdk_nvmf_fc_conn to struct
spdk_nvmf_fc_poll_group is reasonable and do it in this patch.

This change is based on RDMA and TCP transport.

Further unification among transports will be done in subsequent
patches.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ic857046be8da238cb3ff9e89b83cdac5f6349bcf
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466844
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-09-02 14:57:10 +09:00 committed by Jim Harris
parent 2ed1b6c253
commit cb5c661274
3 changed files with 15 additions and 19 deletions

View File

@ -1137,7 +1137,7 @@ nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
{
struct spdk_nvmf_fc_request *tmp = NULL;
STAILQ_FOREACH(tmp, &fc_req->fc_conn->pending_data_buf_queue, pending_link) {
STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->pending_data_buf_queue, pending_link) {
if (tmp == fc_req) {
return true;
}
@ -1258,7 +1258,7 @@ spdk_nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
} else if (nvmf_fc_req_in_pending(fc_req)) {
/* Remove from pending */
STAILQ_REMOVE(&fc_req->fc_conn->pending_data_buf_queue, fc_req,
STAILQ_REMOVE(&fc_req->hwqp->fgroup->pending_data_buf_queue, fc_req,
spdk_nvmf_fc_request, pending_link);
goto complete;
} else {
@ -1454,7 +1454,7 @@ nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_
nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
if (nvmf_fc_request_execute(fc_req)) {
STAILQ_INSERT_TAIL(&fc_conn->pending_data_buf_queue, fc_req, pending_link);
STAILQ_INSERT_TAIL(&hwqp->fgroup->pending_data_buf_queue, fc_req, pending_link);
}
return 0;
@ -1626,22 +1626,19 @@ spdk_nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
void
spdk_nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
{
struct spdk_nvmf_fc_conn *fc_conn = NULL;
struct spdk_nvmf_fc_request *fc_req = NULL, *tmp;
int budget = 64;
TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
STAILQ_FOREACH_SAFE(fc_req, &fc_conn->pending_data_buf_queue, pending_link, tmp) {
if (!nvmf_fc_request_execute(fc_req)) {
/* Succesfuly posted, Delete from pending. */
STAILQ_REMOVE_HEAD(&fc_conn->pending_data_buf_queue, pending_link);
}
STAILQ_FOREACH_SAFE(fc_req, &hwqp->fgroup->pending_data_buf_queue, pending_link, tmp) {
if (!nvmf_fc_request_execute(fc_req)) {
/* Succesfuly posted, Delete from pending. */
STAILQ_REMOVE_HEAD(&hwqp->fgroup->pending_data_buf_queue, pending_link);
}
if (budget) {
budget--;
} else {
return;
}
if (budget) {
budget--;
} else {
return;
}
}
}
@ -2002,6 +1999,7 @@ nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
}
TAILQ_INIT(&fgroup->hwqp_list);
STAILQ_INIT(&fgroup->pending_data_buf_queue);
pthread_mutex_lock(&ftransport->lock);
TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);

View File

@ -357,7 +357,6 @@ nvmf_fc_ls_new_connection(struct spdk_nvmf_fc_association *assoc, uint16_t qid,
*/
spdk_nvmf_fc_create_trid(&fc_conn->trid, tgtport->fc_nodename.u.wwn,
tgtport->fc_portname.u.wwn);
STAILQ_INIT(&fc_conn->pending_data_buf_queue);
return fc_conn;
}

View File

@ -226,9 +226,6 @@ struct spdk_nvmf_fc_conn {
/* number of read/write requests that are outstanding */
uint16_t cur_fc_rw_depth;
/* requests that are waiting to obtain xchg/buffer */
STAILQ_HEAD(, spdk_nvmf_fc_request) pending_data_buf_queue;
struct spdk_nvmf_fc_association *fc_assoc;
uint16_t rpi;
@ -267,6 +264,8 @@ struct spdk_nvmf_fc_poll_group {
struct spdk_nvmf_tgt *nvmf_tgt;
uint32_t hwqp_count; /* number of hwqp's assigned to this pg */
TAILQ_HEAD(, spdk_nvmf_fc_hwqp) hwqp_list;
/* requests that are waiting to obtain xchg/buffer */
STAILQ_HEAD(, spdk_nvmf_fc_request) pending_data_buf_queue;
TAILQ_ENTRY(spdk_nvmf_fc_poll_group) link;
};