2016-06-06 14:44:30 -07:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
2018-12-11 19:57:21 +00:00
|
|
|
* Copyright (c) Intel Corporation. All rights reserved.
|
2019-05-23 12:04:49 +03:00
|
|
|
* Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved.
|
2016-06-06 14:44:30 -07:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2017-05-02 11:18:25 -07:00
|
|
|
#include "spdk/stdinc.h"
|
2016-06-06 14:44:30 -07:00
|
|
|
|
2017-08-29 13:22:37 -07:00
|
|
|
#include "nvmf_internal.h"
|
2017-05-02 11:18:25 -07:00
|
|
|
#include "transport.h"
|
2016-06-06 14:44:30 -07:00
|
|
|
|
2018-09-27 21:38:15 +02:00
|
|
|
#include "spdk/config.h"
|
2016-07-14 15:25:23 -07:00
|
|
|
#include "spdk/log.h"
|
2016-09-19 10:01:52 -07:00
|
|
|
#include "spdk/nvmf.h"
|
2016-07-14 15:25:23 -07:00
|
|
|
#include "spdk/queue.h"
|
2017-03-03 13:44:04 -07:00
|
|
|
#include "spdk/util.h"
|
2016-07-01 13:18:24 -07:00
|
|
|
|
2017-07-24 16:30:07 -07:00
|
|
|
static const struct spdk_nvmf_transport_ops *const g_transport_ops[] = {
|
2016-07-14 15:25:23 -07:00
|
|
|
#ifdef SPDK_CONFIG_RDMA
|
|
|
|
&spdk_nvmf_transport_rdma,
|
|
|
|
#endif
|
2018-08-02 10:21:45 +08:00
|
|
|
&spdk_nvmf_transport_tcp,
|
2019-04-22 18:28:02 +09:00
|
|
|
#ifdef SPDK_CONFIG_FC
|
|
|
|
&spdk_nvmf_transport_fc,
|
|
|
|
#endif
|
2016-07-14 15:25:23 -07:00
|
|
|
};
|
2016-07-01 13:18:24 -07:00
|
|
|
|
2017-07-24 16:30:07 -07:00
|
|
|
#define NUM_TRANSPORTS (SPDK_COUNTOF(g_transport_ops))
|
2019-01-14 12:55:06 -07:00
|
|
|
#define MAX_MEMPOOL_NAME_LENGTH 40
|
2016-06-06 14:44:30 -07:00
|
|
|
|
2018-08-22 16:04:16 -07:00
|
|
|
static inline const struct spdk_nvmf_transport_ops *
|
|
|
|
spdk_nvmf_get_transport_ops(enum spdk_nvme_transport_type type)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i != NUM_TRANSPORTS; i++) {
|
|
|
|
if (g_transport_ops[i]->type == type) {
|
|
|
|
return g_transport_ops[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-10-23 14:37:22 -07:00
|
|
|
const struct spdk_nvmf_transport_opts *
|
|
|
|
spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return &transport->opts;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_nvme_transport_type_t
|
|
|
|
spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return transport->ops->type;
|
|
|
|
}
|
|
|
|
|
2017-07-24 16:30:07 -07:00
|
|
|
struct spdk_nvmf_transport *
|
2018-08-27 15:27:47 -07:00
|
|
|
spdk_nvmf_transport_create(enum spdk_nvme_transport_type type,
|
2018-08-22 16:04:16 -07:00
|
|
|
struct spdk_nvmf_transport_opts *opts)
|
2016-07-14 15:25:23 -07:00
|
|
|
{
|
2017-07-24 16:30:07 -07:00
|
|
|
const struct spdk_nvmf_transport_ops *ops = NULL;
|
|
|
|
struct spdk_nvmf_transport *transport;
|
2019-01-14 12:55:06 -07:00
|
|
|
char spdk_mempool_name[MAX_MEMPOOL_NAME_LENGTH];
|
|
|
|
int chars_written;
|
2016-06-27 10:14:41 -07:00
|
|
|
|
2018-08-22 16:04:16 -07:00
|
|
|
ops = spdk_nvmf_get_transport_ops(type);
|
2017-07-24 16:30:07 -07:00
|
|
|
if (!ops) {
|
2019-04-22 18:28:02 +09:00
|
|
|
SPDK_ERRLOG("Transport type '%s' unavailable.\n",
|
2017-07-24 16:30:07 -07:00
|
|
|
spdk_nvme_transport_id_trtype_str(type));
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-07-14 15:25:23 -07:00
|
|
|
|
2018-08-22 16:04:16 -07:00
|
|
|
transport = ops->create(opts);
|
2017-07-24 16:30:07 -07:00
|
|
|
if (!transport) {
|
|
|
|
SPDK_ERRLOG("Unable to create new transport of type %s\n",
|
|
|
|
spdk_nvme_transport_id_trtype_str(type));
|
|
|
|
return NULL;
|
2016-07-14 15:25:23 -07:00
|
|
|
}
|
|
|
|
|
2018-08-22 16:04:16 -07:00
|
|
|
transport->ops = ops;
|
|
|
|
transport->opts = *opts;
|
2019-01-14 12:55:06 -07:00
|
|
|
chars_written = snprintf(spdk_mempool_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s_%s", "spdk_nvmf",
|
|
|
|
spdk_nvme_transport_id_trtype_str(type), "data");
|
|
|
|
if (chars_written < 0) {
|
|
|
|
SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
|
|
|
|
ops->destroy(transport);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
transport->data_buf_pool = spdk_mempool_create(spdk_mempool_name,
|
|
|
|
opts->num_shared_buffers,
|
2019-01-30 13:40:29 -07:00
|
|
|
opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
|
2019-01-14 12:55:06 -07:00
|
|
|
SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
|
|
|
|
SPDK_ENV_SOCKET_ID_ANY);
|
|
|
|
|
|
|
|
if (!transport->data_buf_pool) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
|
|
|
|
ops->destroy(transport);
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-08-22 16:04:16 -07:00
|
|
|
|
2017-07-24 16:30:07 -07:00
|
|
|
return transport;
|
2016-07-14 15:25:23 -07:00
|
|
|
}
|
|
|
|
|
2018-10-23 14:37:22 -07:00
|
|
|
struct spdk_nvmf_transport *
|
|
|
|
spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
|
|
|
|
{
|
|
|
|
return TAILQ_FIRST(&tgt->transports);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_nvmf_transport *
|
|
|
|
spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
|
|
|
|
{
|
|
|
|
return TAILQ_NEXT(transport, link);
|
|
|
|
}
|
|
|
|
|
2017-07-24 16:30:07 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport)
|
2016-07-14 15:25:23 -07:00
|
|
|
{
|
2019-01-14 12:55:06 -07:00
|
|
|
if (transport->data_buf_pool != NULL) {
|
|
|
|
if (spdk_mempool_count(transport->data_buf_pool) !=
|
|
|
|
transport->opts.num_shared_buffers) {
|
|
|
|
SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
|
|
|
|
spdk_mempool_count(transport->data_buf_pool),
|
|
|
|
transport->opts.num_shared_buffers);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_mempool_free(transport->data_buf_pool);
|
|
|
|
|
2017-07-24 16:30:07 -07:00
|
|
|
return transport->ops->destroy(transport);
|
2016-08-16 09:35:59 -07:00
|
|
|
}
|
|
|
|
|
2017-07-28 10:40:40 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
|
|
|
|
const struct spdk_nvme_transport_id *trid)
|
2016-07-14 15:25:23 -07:00
|
|
|
{
|
2017-07-28 10:40:40 -07:00
|
|
|
return transport->ops->listen(transport, trid);
|
2016-07-14 15:25:23 -07:00
|
|
|
}
|
2017-07-25 13:47:41 -07:00
|
|
|
|
|
|
|
int
|
2017-07-28 10:40:40 -07:00
|
|
|
spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
|
|
|
|
const struct spdk_nvme_transport_id *trid)
|
2017-07-25 13:47:41 -07:00
|
|
|
{
|
2017-07-28 10:40:40 -07:00
|
|
|
return transport->ops->stop_listen(transport, trid);
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
2017-07-28 10:40:40 -07:00
|
|
|
void
|
2017-08-30 09:36:33 -07:00
|
|
|
spdk_nvmf_transport_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
|
2017-07-25 13:47:41 -07:00
|
|
|
{
|
2017-08-30 09:36:33 -07:00
|
|
|
transport->ops->accept(transport, cb_fn);
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-08-23 10:23:44 -07:00
|
|
|
spdk_nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvme_transport_id *trid,
|
|
|
|
struct spdk_nvmf_discovery_log_page_entry *entry)
|
2017-07-25 13:47:41 -07:00
|
|
|
{
|
2017-08-23 10:23:44 -07:00
|
|
|
transport->ops->listener_discover(transport, trid, entry);
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
2017-08-28 16:24:33 -07:00
|
|
|
struct spdk_nvmf_transport_poll_group *
|
2017-07-28 11:21:45 -07:00
|
|
|
spdk_nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
|
2017-07-25 13:47:41 -07:00
|
|
|
{
|
2017-08-28 16:24:33 -07:00
|
|
|
struct spdk_nvmf_transport_poll_group *group;
|
2019-01-14 13:24:35 -07:00
|
|
|
struct spdk_nvmf_transport_pg_cache_buf *buf;
|
2017-07-25 13:47:41 -07:00
|
|
|
|
2017-07-28 11:21:45 -07:00
|
|
|
group = transport->ops->poll_group_create(transport);
|
2018-12-11 19:57:21 +00:00
|
|
|
if (!group) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-07-28 11:21:45 -07:00
|
|
|
group->transport = transport;
|
2017-07-25 13:47:41 -07:00
|
|
|
|
2019-09-02 15:00:58 +09:00
|
|
|
STAILQ_INIT(&group->pending_buf_queue);
|
2019-01-14 13:24:35 -07:00
|
|
|
STAILQ_INIT(&group->buf_cache);
|
|
|
|
|
|
|
|
if (transport->opts.buf_cache_size) {
|
|
|
|
group->buf_cache_count = 0;
|
|
|
|
group->buf_cache_size = transport->opts.buf_cache_size;
|
|
|
|
while (group->buf_cache_count < group->buf_cache_size) {
|
|
|
|
buf = (struct spdk_nvmf_transport_pg_cache_buf *)spdk_mempool_get(transport->data_buf_pool);
|
|
|
|
if (!buf) {
|
|
|
|
SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache.\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
STAILQ_INSERT_HEAD(&group->buf_cache, buf, link);
|
|
|
|
group->buf_cache_count++;
|
|
|
|
}
|
|
|
|
}
|
2017-07-28 11:21:45 -07:00
|
|
|
return group;
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
2019-05-15 21:53:39 +08:00
|
|
|
struct spdk_nvmf_transport_poll_group *
|
|
|
|
spdk_nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
if (transport->ops->get_optimal_poll_group) {
|
|
|
|
return transport->ops->get_optimal_poll_group(qpair);
|
|
|
|
} else {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-25 13:47:41 -07:00
|
|
|
void
|
2017-08-28 16:24:33 -07:00
|
|
|
spdk_nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
2017-07-25 13:47:41 -07:00
|
|
|
{
|
2019-01-14 13:24:35 -07:00
|
|
|
struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp;
|
|
|
|
|
2019-09-02 15:00:58 +09:00
|
|
|
if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
|
|
|
|
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
|
|
|
|
}
|
|
|
|
|
2019-01-14 13:24:35 -07:00
|
|
|
STAILQ_FOREACH_SAFE(buf, &group->buf_cache, link, tmp) {
|
|
|
|
STAILQ_REMOVE(&group->buf_cache, buf, spdk_nvmf_transport_pg_cache_buf, link);
|
|
|
|
spdk_mempool_put(group->transport->data_buf_pool, buf);
|
|
|
|
}
|
2017-07-28 11:21:45 -07:00
|
|
|
group->transport->ops->poll_group_destroy(group);
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-08-28 16:24:33 -07:00
|
|
|
spdk_nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
|
2017-07-28 11:21:45 -07:00
|
|
|
struct spdk_nvmf_qpair *qpair)
|
2017-07-25 13:47:41 -07:00
|
|
|
{
|
|
|
|
if (qpair->transport) {
|
2017-07-28 11:21:45 -07:00
|
|
|
assert(qpair->transport == group->transport);
|
|
|
|
if (qpair->transport != group->transport) {
|
2017-07-25 13:47:41 -07:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
2017-07-28 11:21:45 -07:00
|
|
|
qpair->transport = group->transport;
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
2017-07-28 11:21:45 -07:00
|
|
|
return group->transport->ops->poll_group_add(group, qpair);
|
2017-07-25 13:47:41 -07:00
|
|
|
}
|
|
|
|
|
2018-11-21 10:11:25 +08:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
int rc = ENOTSUP;
|
|
|
|
|
|
|
|
assert(qpair->transport == group->transport);
|
|
|
|
if (group->transport->ops->poll_group_remove) {
|
|
|
|
rc = group->transport->ops->poll_group_remove(group, qpair);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-08-28 13:48:39 -07:00
|
|
|
int
|
2017-08-28 16:24:33 -07:00
|
|
|
spdk_nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
|
2017-08-28 13:48:39 -07:00
|
|
|
{
|
|
|
|
return group->transport->ops->poll_group_poll(group);
|
|
|
|
}
|
|
|
|
|
2018-07-18 08:47:16 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_req_free(struct spdk_nvmf_request *req)
|
|
|
|
{
|
|
|
|
return req->qpair->transport->ops->req_free(req);
|
|
|
|
}
|
|
|
|
|
2017-07-25 13:47:41 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_req_complete(struct spdk_nvmf_request *req)
|
|
|
|
{
|
|
|
|
return req->qpair->transport->ops->req_complete(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
qpair->transport->ops->qpair_fini(qpair);
|
|
|
|
}
|
|
|
|
|
2018-08-02 15:08:12 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
|
|
|
return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
|
|
|
|
}
|
2018-09-07 13:41:41 -07:00
|
|
|
|
2018-09-10 14:28:04 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
|
|
|
return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
|
|
|
|
}
|
|
|
|
|
2018-09-07 13:41:41 -07:00
|
|
|
int
|
|
|
|
spdk_nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
|
|
|
|
struct spdk_nvme_transport_id *trid)
|
|
|
|
{
|
|
|
|
return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
|
|
|
|
}
|
2018-08-27 15:27:47 -07:00
|
|
|
|
|
|
|
bool
|
|
|
|
spdk_nvmf_transport_opts_init(enum spdk_nvme_transport_type type,
|
|
|
|
struct spdk_nvmf_transport_opts *opts)
|
|
|
|
{
|
|
|
|
const struct spdk_nvmf_transport_ops *ops;
|
|
|
|
|
|
|
|
ops = spdk_nvmf_get_transport_ops(type);
|
|
|
|
if (!ops) {
|
|
|
|
SPDK_ERRLOG("Transport type %s unavailable.\n",
|
|
|
|
spdk_nvme_transport_id_trtype_str(type));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ops->opts_init(opts);
|
|
|
|
return true;
|
|
|
|
}
|
2018-08-02 10:21:45 +08:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_transport_qpair_set_sqsize(struct spdk_nvmf_qpair *qpair)
|
|
|
|
{
|
|
|
|
if (qpair->transport->ops->qpair_set_sqsize) {
|
|
|
|
return qpair->transport->ops->qpair_set_sqsize(qpair);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-05-23 12:04:49 +03:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_transport_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvmf_transport_poll_group_stat **stat)
|
|
|
|
{
|
|
|
|
if (transport->ops->poll_group_get_stat) {
|
|
|
|
return transport->ops->poll_group_get_stat(tgt, stat);
|
|
|
|
} else {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_nvmf_transport_poll_group_free_stat(struct spdk_nvmf_transport *transport,
|
|
|
|
struct spdk_nvmf_transport_poll_group_stat *stat)
|
|
|
|
{
|
|
|
|
if (transport->ops->poll_group_free_stat) {
|
|
|
|
transport->ops->poll_group_free_stat(stat);
|
|
|
|
}
|
|
|
|
}
|
2019-08-21 14:36:13 +09:00
|
|
|
|
|
|
|
void
|
|
|
|
spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
2019-09-24 10:01:53 +09:00
|
|
|
struct spdk_nvmf_transport *transport)
|
2019-08-21 14:36:13 +09:00
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
2019-09-26 08:43:31 +09:00
|
|
|
for (i = 0; i < req->iovcnt; i++) {
|
2019-08-21 14:36:13 +09:00
|
|
|
if (group->buf_cache_count < group->buf_cache_size) {
|
|
|
|
STAILQ_INSERT_HEAD(&group->buf_cache,
|
|
|
|
(struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
|
|
|
|
link);
|
|
|
|
group->buf_cache_count++;
|
|
|
|
} else {
|
|
|
|
spdk_mempool_put(transport->data_buf_pool, req->buffers[i]);
|
|
|
|
}
|
|
|
|
req->iov[i].iov_base = NULL;
|
|
|
|
req->buffers[i] = NULL;
|
|
|
|
req->iov[i].iov_len = 0;
|
|
|
|
}
|
|
|
|
req->data_from_pool = false;
|
|
|
|
}
|
|
|
|
|
2019-09-26 08:43:31 +09:00
|
|
|
static inline int
|
|
|
|
nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
|
|
|
|
uint32_t io_unit_size)
|
|
|
|
{
|
|
|
|
req->buffers[req->iovcnt] = buf;
|
|
|
|
req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
|
|
|
|
~NVMF_DATA_BUFFER_MASK);
|
|
|
|
req->iov[req->iovcnt].iov_len = spdk_min(length, io_unit_size);
|
|
|
|
length -= req->iov[req->iovcnt].iov_len;
|
|
|
|
req->iovcnt++;
|
|
|
|
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2019-09-24 13:21:13 +09:00
|
|
|
static int
|
|
|
|
nvmf_request_get_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
uint32_t length)
|
2019-08-21 14:36:13 +09:00
|
|
|
{
|
2019-09-26 08:43:31 +09:00
|
|
|
uint32_t io_unit_size = transport->opts.io_unit_size;
|
2019-09-24 09:53:14 +09:00
|
|
|
uint32_t num_buffers;
|
2019-09-26 08:43:31 +09:00
|
|
|
uint32_t i = 0, j;
|
|
|
|
void *buffer, *buffers[NVMF_REQ_MAX_BUFFERS];
|
2019-08-21 14:36:13 +09:00
|
|
|
|
2019-09-20 14:19:28 +09:00
|
|
|
/* If the number of buffers is too large, then we know the I/O is larger than allowed.
|
|
|
|
* Fail it.
|
|
|
|
*/
|
2019-09-26 08:43:31 +09:00
|
|
|
num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
|
|
|
|
if (num_buffers + req->iovcnt > NVMF_REQ_MAX_BUFFERS) {
|
2019-09-20 14:19:28 +09:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-08-21 14:36:13 +09:00
|
|
|
while (i < num_buffers) {
|
|
|
|
if (!(STAILQ_EMPTY(&group->buf_cache))) {
|
|
|
|
group->buf_cache_count--;
|
2019-09-26 08:43:31 +09:00
|
|
|
buffer = STAILQ_FIRST(&group->buf_cache);
|
2019-08-21 14:36:13 +09:00
|
|
|
STAILQ_REMOVE_HEAD(&group->buf_cache, link);
|
2019-09-26 08:43:31 +09:00
|
|
|
assert(buffer != NULL);
|
|
|
|
|
|
|
|
length = nvmf_request_set_buffer(req, buffer, length, io_unit_size);
|
2019-08-21 14:36:13 +09:00
|
|
|
i++;
|
|
|
|
} else {
|
2019-09-26 08:43:31 +09:00
|
|
|
if (spdk_mempool_get_bulk(transport->data_buf_pool, buffers,
|
2019-08-21 14:36:13 +09:00
|
|
|
num_buffers - i)) {
|
2019-09-24 13:21:13 +09:00
|
|
|
return -ENOMEM;
|
2019-08-21 14:36:13 +09:00
|
|
|
}
|
2019-09-26 08:43:31 +09:00
|
|
|
for (j = 0; j < num_buffers - i; j++) {
|
|
|
|
length = nvmf_request_set_buffer(req, buffers[j], length, io_unit_size);
|
|
|
|
}
|
2019-08-21 14:36:13 +09:00
|
|
|
i += num_buffers - i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-26 08:43:31 +09:00
|
|
|
assert(length == 0);
|
2019-09-26 18:37:19 +09:00
|
|
|
|
|
|
|
req->data_from_pool = true;
|
2019-08-21 14:36:13 +09:00
|
|
|
return 0;
|
2019-09-24 13:21:13 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
uint32_t length)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
req->iovcnt = 0;
|
|
|
|
|
|
|
|
rc = nvmf_request_get_buffers(req, group, transport, length);
|
|
|
|
if (rc == -ENOMEM) {
|
|
|
|
spdk_nvmf_request_free_buffers(req, group, transport);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_nvmf_request_get_buffers_multi(struct spdk_nvmf_request *req,
|
|
|
|
struct spdk_nvmf_transport_poll_group *group,
|
|
|
|
struct spdk_nvmf_transport *transport,
|
|
|
|
uint32_t *lengths, uint32_t num_lengths)
|
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
req->iovcnt = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < num_lengths; i++) {
|
|
|
|
rc = nvmf_request_get_buffers(req, group, transport, lengths[i]);
|
|
|
|
if (rc != 0) {
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2019-08-21 14:36:13 +09:00
|
|
|
|
|
|
|
err_exit:
|
2019-09-24 10:01:53 +09:00
|
|
|
spdk_nvmf_request_free_buffers(req, group, transport);
|
2019-09-24 13:21:13 +09:00
|
|
|
return rc;
|
2019-08-21 14:36:13 +09:00
|
|
|
}
|