nvmf/fc: Rename pending_queue to pending_data_buf_queue

This is an effort to unify I/O buffer management further among
transports. RDMA and TCP transport have named pending_queue
pending_data_buf_queue. So FC transport follows RDMA and TCP transport.

The next patch will change pending_data_buf_queue to use STAILQ
instead of TAILQ.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I57c3c678a1e92ec262eb8940418529a62b6768c3
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466675
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-29 15:42:26 +09:00 committed by Jim Harris
parent 2bc819dd52
commit 6c8b297262
3 changed files with 7 additions and 7 deletions

View File

@ -1137,7 +1137,7 @@ nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
{
struct spdk_nvmf_fc_request *tmp = NULL;
TAILQ_FOREACH(tmp, &fc_req->fc_conn->pending_queue, pending_link) {
TAILQ_FOREACH(tmp, &fc_req->fc_conn->pending_data_buf_queue, pending_link) {
if (tmp == fc_req) {
return true;
}
@ -1258,7 +1258,7 @@ spdk_nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
} else if (nvmf_fc_req_in_pending(fc_req)) {
/* Remove from pending */
TAILQ_REMOVE(&fc_req->fc_conn->pending_queue, fc_req, pending_link);
TAILQ_REMOVE(&fc_req->fc_conn->pending_data_buf_queue, fc_req, pending_link);
goto complete;
} else {
/* Should never happen */
@ -1453,7 +1453,7 @@ nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_
nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
if (nvmf_fc_request_execute(fc_req)) {
TAILQ_INSERT_TAIL(&fc_conn->pending_queue, fc_req, pending_link);
TAILQ_INSERT_TAIL(&fc_conn->pending_data_buf_queue, fc_req, pending_link);
}
return 0;
@ -1630,10 +1630,10 @@ spdk_nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
int budget = 64;
TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
TAILQ_FOREACH_SAFE(fc_req, &fc_conn->pending_queue, pending_link, tmp) {
TAILQ_FOREACH_SAFE(fc_req, &fc_conn->pending_data_buf_queue, pending_link, tmp) {
if (!nvmf_fc_request_execute(fc_req)) {
/* Succesfuly posted, Delete from pending. */
TAILQ_REMOVE(&fc_conn->pending_queue, fc_req, pending_link);
TAILQ_REMOVE(&fc_conn->pending_data_buf_queue, fc_req, pending_link);
}
if (budget) {

View File

@ -357,7 +357,7 @@ nvmf_fc_ls_new_connection(struct spdk_nvmf_fc_association *assoc, uint16_t qid,
*/
spdk_nvmf_fc_create_trid(&fc_conn->trid, tgtport->fc_nodename.u.wwn,
tgtport->fc_portname.u.wwn);
TAILQ_INIT(&fc_conn->pending_queue);
TAILQ_INIT(&fc_conn->pending_data_buf_queue);
return fc_conn;
}

View File

@ -227,7 +227,7 @@ struct spdk_nvmf_fc_conn {
uint16_t cur_fc_rw_depth;
/* requests that are waiting to obtain xchg/buffer */
TAILQ_HEAD(, spdk_nvmf_fc_request) pending_queue;
TAILQ_HEAD(, spdk_nvmf_fc_request) pending_data_buf_queue;
struct spdk_nvmf_fc_association *fc_assoc;