nvmf/fc: Use common buffer pool for FC transport

NVMe-oF FC transport have used its own buffer pool and have not used
common buffer pool yet.

It looks that there is no particular reason to prevent FC transport
from using the common buffer pool.

This patch removes FC transport specific buffer pool and changes
FC transport to use common buffer pool instead. Add transport
as a parameter to nvmf_fc_request_free_buffers() because similar
APIs of RDMA and TCP transport do that.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Iae3a117466c21eaddbe78a8e8023d80ef37bb3e9
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465865
Reviewed-by: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: Anil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Shuhei Matsumoto 2019-08-23 07:49:13 +09:00 committed by Jim Harris
parent cdf80adccc
commit c5b15dde18

View File

@ -238,7 +238,6 @@ struct spdk_nvmf_fc_adm_port_link_break_cb_data {
struct spdk_nvmf_fc_transport {
struct spdk_nvmf_transport transport;
struct spdk_mempool *data_buff_pool;
pthread_mutex_t lock;
};
@ -419,11 +418,12 @@ nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
static void
nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req)
{
struct spdk_nvmf_fc_transport *fc_transport = fc_req->hwqp->fc_poll_group->fc_transport;
struct spdk_nvmf_transport *transport = &fc_transport->transport;
uint32_t i;
for (i = 0; i < fc_req->req.iovcnt; i++) {
spdk_mempool_put(fc_req->hwqp->fc_poll_group->fc_transport->data_buff_pool,
fc_req->buffers[i]);
spdk_mempool_put(transport->data_buf_pool, fc_req->buffers[i]);
fc_req->req.iov[i].iov_base = NULL;
fc_req->buffers[i] = NULL;
}
@ -1295,11 +1295,12 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
uint32_t length = fc_req->req.length;
uint32_t i = 0;
struct spdk_nvmf_fc_transport *fc_transport = fc_req->hwqp->fc_poll_group->fc_transport;
struct spdk_nvmf_transport *transport = &fc_transport->transport;
fc_req->req.iovcnt = 0;
fc_req->data_from_pool = true;
while (length) {
buf = spdk_mempool_get(fc_transport->data_buff_pool);
buf = spdk_mempool_get(transport->data_buf_pool);
if (!buf) {
goto nomem;
}
@ -1307,8 +1308,7 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
fc_req->req.iov[i].iov_base = (void *)((uintptr_t)((char *)buf +
NVMF_DATA_BUFFER_MASK) &
~NVMF_DATA_BUFFER_MASK);
fc_req->req.iov[i].iov_len = spdk_min(length,
fc_transport->transport.opts.io_unit_size);
fc_req->req.iov[i].iov_len = spdk_min(length, transport->opts.io_unit_size);
fc_req->req.iovcnt++;
fc_req->buffers[i] = buf;
length -= fc_req->req.iov[i++].iov_len;
@ -1911,19 +1911,6 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
return NULL;
}
/* Create a databuff pool */
g_nvmf_fc_transport->data_buff_pool = spdk_mempool_create("spdk_nvmf_fc_data_buff",
opts->num_shared_buffers,
opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
SPDK_ENV_SOCKET_ID_ANY);
if (!g_nvmf_fc_transport->data_buff_pool) {
free(g_nvmf_fc_transport);
g_nvmf_fc_transport = NULL;
return NULL;
}
/* initialize the low level FC driver */
nvmf_fc_lld_init();
@ -1938,7 +1925,6 @@ nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
struct spdk_nvmf_fc_poll_group *fc_poll_group, *pg_tmp;
fc_transport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
spdk_mempool_free(fc_transport->data_buff_pool);
free(fc_transport);