nvmf/tcp: Eliminate management channel in favor of poll group
The management channel was used in the RDMA transport prior to the introduction of poll groups and made its way over to the TCP transport when it was written. Eliminate it in favor of just using the poll group. Change-Id: Icde631dd97a6a29190c4a4a6a10a0cb7c4f07a0e Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.gerrithub.io/c/442432 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
This commit is contained in:
parent
1de99be6cf
commit
63de221bf6
107
lib/nvmf/tcp.c
107
lib/nvmf/tcp.c
@ -203,6 +203,7 @@ struct spdk_nvmf_tcp_req {
|
||||
|
||||
struct spdk_nvmf_tcp_qpair {
|
||||
struct spdk_nvmf_qpair qpair;
|
||||
struct spdk_nvmf_tcp_poll_group *group;
|
||||
struct spdk_nvmf_tcp_port *port;
|
||||
struct spdk_sock *sock;
|
||||
struct spdk_poller *flush_poller;
|
||||
@ -249,10 +250,6 @@ struct spdk_nvmf_tcp_qpair {
|
||||
/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
|
||||
uint32_t maxh2cdata;
|
||||
|
||||
/* Mgmt channel */
|
||||
struct spdk_io_channel *mgmt_channel;
|
||||
struct spdk_nvmf_tcp_mgmt_channel *ch;
|
||||
|
||||
uint32_t c2h_data_pdu_cnt;
|
||||
|
||||
/* IP address */
|
||||
@ -269,7 +266,11 @@ struct spdk_nvmf_tcp_qpair {
|
||||
struct spdk_nvmf_tcp_poll_group {
|
||||
struct spdk_nvmf_transport_poll_group group;
|
||||
struct spdk_sock_group *sock_group;
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs;
|
||||
|
||||
/* Requests that are waiting to obtain a data buffer */
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) pending_data_buf_queue;
|
||||
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_tcp_port {
|
||||
@ -287,15 +288,6 @@ struct spdk_nvmf_tcp_transport {
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_port) ports;
|
||||
};
|
||||
|
||||
struct spdk_nvmf_tcp_mgmt_channel {
|
||||
/* Requests that are waiting to obtain a data buffer */
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) pending_data_buf_queue;
|
||||
|
||||
/* Point to the transport polling group */
|
||||
struct spdk_nvmf_tcp_poll_group *tgroup;
|
||||
|
||||
};
|
||||
|
||||
static void spdk_nvmf_tcp_qpair_process_pending(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
struct spdk_nvmf_tcp_qpair *tqpair);
|
||||
static bool spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
@ -406,25 +398,6 @@ spdk_nvmf_tcp_req_free(struct spdk_nvmf_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_nvmf_tcp_mgmt_channel_create(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct spdk_nvmf_tcp_mgmt_channel *ch = ctx_buf;
|
||||
|
||||
TAILQ_INIT(&ch->pending_data_buf_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_nvmf_tcp_mgmt_channel_destroy(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct spdk_nvmf_tcp_mgmt_channel *ch = ctx_buf;
|
||||
|
||||
if (!TAILQ_EMPTY(&ch->pending_data_buf_queue)) {
|
||||
SPDK_ERRLOG("Pending I/O list wasn't empty on channel destruction\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
enum spdk_nvmf_tcp_req_state state)
|
||||
@ -465,7 +438,7 @@ spdk_nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
/* Wipe the requests waiting for buffer from the global list */
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[TCP_REQUEST_STATE_NEED_BUFFER], state_link,
|
||||
req_tmp) {
|
||||
TAILQ_REMOVE(&tqpair->ch->pending_data_buf_queue, tcp_req, link);
|
||||
TAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
}
|
||||
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
|
||||
@ -499,9 +472,6 @@ spdk_nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
spdk_poller_unregister(&tqpair->flush_poller);
|
||||
spdk_sock_close(&tqpair->sock);
|
||||
spdk_nvmf_tcp_cleanup_all_states(tqpair);
|
||||
if (tqpair->mgmt_channel) {
|
||||
spdk_put_io_channel(tqpair->mgmt_channel);
|
||||
}
|
||||
|
||||
if (tqpair->free_pdu_num != (tqpair->max_queue_depth + NVMF_TCP_QPAIR_MAX_C2H_PDU_NUM)) {
|
||||
SPDK_ERRLOG("tqpair(%p) free pdu pool num is %u but should be %u\n", tqpair,
|
||||
@ -544,7 +514,6 @@ spdk_nvmf_tcp_destroy(struct spdk_nvmf_transport *transport)
|
||||
assert(transport != NULL);
|
||||
ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
|
||||
|
||||
spdk_io_device_unregister(ttransport, NULL);
|
||||
pthread_mutex_destroy(&ttransport->lock);
|
||||
free(ttransport);
|
||||
return 0;
|
||||
@ -605,10 +574,6 @@ spdk_nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
|
||||
|
||||
pthread_mutex_init(&ttransport->lock, NULL);
|
||||
|
||||
spdk_io_device_register(ttransport, spdk_nvmf_tcp_mgmt_channel_create,
|
||||
spdk_nvmf_tcp_mgmt_channel_destroy,
|
||||
sizeof(struct spdk_nvmf_tcp_mgmt_channel), "tcp_transport");
|
||||
|
||||
return &ttransport->transport;
|
||||
}
|
||||
|
||||
@ -1076,11 +1041,9 @@ spdk_nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair, uint16
|
||||
static int
|
||||
spdk_nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
|
||||
{
|
||||
struct spdk_nvmf_tcp_transport *ttransport;
|
||||
struct spdk_nvmf_tcp_qpair *tqpair;
|
||||
int i;
|
||||
|
||||
ttransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_tcp_transport, transport);
|
||||
tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "New TCP Connection: %p\n", qpair);
|
||||
@ -1097,13 +1060,6 @@ spdk_nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
|
||||
tqpair->host_hdgst_enable = true;
|
||||
tqpair->host_ddgst_enable = true;
|
||||
|
||||
tqpair->mgmt_channel = spdk_get_io_channel(ttransport);
|
||||
if (!tqpair->mgmt_channel) {
|
||||
return -1;
|
||||
}
|
||||
tqpair->ch = spdk_io_channel_get_ctx(tqpair->mgmt_channel);
|
||||
assert(tqpair->ch != NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1236,6 +1192,7 @@ spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
}
|
||||
|
||||
TAILQ_INIT(&tgroup->qpairs);
|
||||
TAILQ_INIT(&tgroup->pending_data_buf_queue);
|
||||
|
||||
return &tgroup->group;
|
||||
|
||||
@ -1252,6 +1209,10 @@ spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
||||
tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
|
||||
spdk_sock_group_close(&tgroup->sock_group);
|
||||
|
||||
if (!TAILQ_EMPTY(&tgroup->pending_data_buf_queue)) {
|
||||
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
|
||||
}
|
||||
|
||||
free(tgroup);
|
||||
}
|
||||
|
||||
@ -2125,12 +2086,14 @@ static int
|
||||
spdk_nvmf_tcp_req_fill_iovs(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
struct spdk_nvmf_tcp_req *tcp_req)
|
||||
{
|
||||
void *buf = NULL;
|
||||
uint32_t length = tcp_req->req.length;
|
||||
uint32_t i = 0;
|
||||
struct spdk_nvmf_tcp_qpair *tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair,
|
||||
struct spdk_nvmf_tcp_qpair, qpair);
|
||||
struct spdk_nvmf_transport_poll_group *group = &tqpair->ch->tgroup->group;
|
||||
void *buf = NULL;
|
||||
uint32_t length = tcp_req->req.length;
|
||||
uint32_t i = 0;
|
||||
struct spdk_nvmf_tcp_qpair *tqpair;
|
||||
struct spdk_nvmf_transport_poll_group *group;
|
||||
|
||||
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
|
||||
group = &tqpair->group->group;
|
||||
|
||||
tcp_req->req.iovcnt = 0;
|
||||
while (length) {
|
||||
@ -2433,15 +2396,15 @@ static bool
|
||||
spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
struct spdk_nvmf_tcp_req *tcp_req)
|
||||
{
|
||||
struct spdk_nvmf_tcp_qpair *tqpair;
|
||||
struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl;
|
||||
int rc;
|
||||
enum spdk_nvmf_tcp_req_state prev_state;
|
||||
bool progress = false;
|
||||
struct spdk_nvmf_transport_poll_group *group;
|
||||
struct spdk_nvmf_tcp_qpair *tqpair;
|
||||
struct spdk_nvme_cpl *rsp = &tcp_req->req.rsp->nvme_cpl;
|
||||
int rc;
|
||||
enum spdk_nvmf_tcp_req_state prev_state;
|
||||
bool progress = false;
|
||||
struct spdk_nvmf_transport_poll_group *group;
|
||||
|
||||
tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
|
||||
group = &tqpair->ch->tgroup->group;
|
||||
group = &tqpair->group->group;
|
||||
assert(tcp_req->state != TCP_REQUEST_STATE_FREE);
|
||||
|
||||
/* The loop here is to allow for several back-to-back state changes. */
|
||||
@ -2480,14 +2443,15 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
}
|
||||
|
||||
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER);
|
||||
TAILQ_INSERT_TAIL(&tqpair->ch->pending_data_buf_queue, tcp_req, link);
|
||||
TAILQ_INSERT_TAIL(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
break;
|
||||
case TCP_REQUEST_STATE_NEED_BUFFER:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
|
||||
assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);
|
||||
|
||||
if (!tcp_req->has_incapsule_data && (tcp_req != TAILQ_FIRST(&tqpair->ch->pending_data_buf_queue))) {
|
||||
if (!tcp_req->has_incapsule_data &&
|
||||
(tcp_req != TAILQ_FIRST(&tqpair->group->pending_data_buf_queue))) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP,
|
||||
"Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n",
|
||||
tcp_req, tqpair);
|
||||
@ -2498,7 +2462,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
/* Try to get a data buffer */
|
||||
rc = spdk_nvmf_tcp_req_parse_sgl(ttransport, tcp_req);
|
||||
if (rc < 0) {
|
||||
TAILQ_REMOVE(&tqpair->ch->pending_data_buf_queue, tcp_req, link);
|
||||
TAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
||||
/* Reset the tqpair receving pdu state */
|
||||
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
||||
@ -2513,7 +2477,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
break;
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&tqpair->ch->pending_data_buf_queue, tcp_req, link);
|
||||
TAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
|
||||
/* If data is transferring from host to controller, we need to do a transfer from the host. */
|
||||
if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
|
||||
@ -2599,7 +2563,7 @@ spdk_nvmf_tcp_qpair_process_pending(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
|
||||
spdk_nvmf_tcp_handle_queued_r2t_req(tqpair);
|
||||
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->ch->pending_data_buf_queue, link, req_tmp) {
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->group->pending_data_buf_queue, link, req_tmp) {
|
||||
if (spdk_nvmf_tcp_req_process(ttransport, tcp_req) == false) {
|
||||
break;
|
||||
}
|
||||
@ -2672,7 +2636,7 @@ spdk_nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
|
||||
return -1;
|
||||
}
|
||||
|
||||
tqpair->ch->tgroup = tgroup;
|
||||
tqpair->group = tgroup;
|
||||
tqpair->state = NVME_TCP_QPAIR_STATE_INVALID;
|
||||
TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link);
|
||||
|
||||
@ -2690,6 +2654,9 @@ spdk_nvmf_tcp_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
|
||||
tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
|
||||
tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);
|
||||
|
||||
assert(tqpair->group == tgroup);
|
||||
tqpair->group = NULL;
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "remove tqpair=%p from the tgroup=%p\n", tqpair, tgroup);
|
||||
TAILQ_REMOVE(&tgroup->qpairs, tqpair, link);
|
||||
rc = spdk_sock_group_remove_sock(tgroup->sock_group, tqpair->sock);
|
||||
|
@ -256,7 +256,6 @@ test_nvmf_tcp_create(void)
|
||||
CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
|
||||
/* destroy transport */
|
||||
spdk_mempool_free(ttransport->transport.data_buf_pool);
|
||||
spdk_io_device_unregister(ttransport, NULL);
|
||||
free(ttransport);
|
||||
|
||||
/* case 2 */
|
||||
@ -280,7 +279,6 @@ test_nvmf_tcp_create(void)
|
||||
CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
|
||||
/* destroy transport */
|
||||
spdk_mempool_free(ttransport->transport.data_buf_pool);
|
||||
spdk_io_device_unregister(ttransport, NULL);
|
||||
free(ttransport);
|
||||
|
||||
/* case 3 */
|
||||
@ -354,6 +352,7 @@ test_nvmf_tcp_poll_group_create(void)
|
||||
SPDK_CU_ASSERT_FATAL(group);
|
||||
group->transport = transport;
|
||||
spdk_nvmf_tcp_poll_group_destroy(group);
|
||||
spdk_nvmf_tcp_destroy(transport);
|
||||
|
||||
spdk_thread_exit(thread);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user