2016-08-02 22:35:29 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
2020-01-28 11:54:47 +00:00
|
|
|
* Copyright (c) Intel Corporation. All rights reserved.
|
|
|
|
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
|
2016-08-02 22:35:29 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-08-02 22:35:29 +00:00
|
|
|
|
2018-02-07 19:58:14 +00:00
|
|
|
#include "spdk/sock.h"
|
2018-02-16 18:14:32 +00:00
|
|
|
#include "spdk_internal/sock.h"
|
2020-10-06 16:16:26 +00:00
|
|
|
#include "spdk/log.h"
|
2016-08-02 22:35:29 +00:00
|
|
|
|
2020-02-19 11:18:51 +00:00
|
|
|
#define SPDK_SOCK_DEFAULT_PRIORITY 0
|
2020-09-01 09:06:03 +00:00
|
|
|
#define SPDK_SOCK_DEFAULT_ZCOPY true
|
2020-02-19 11:18:51 +00:00
|
|
|
#define SPDK_SOCK_OPTS_FIELD_OK(opts, field) (offsetof(struct spdk_sock_opts, field) + sizeof(opts->field) <= (opts->opts_size))
|
|
|
|
|
2018-02-21 17:42:27 +00:00
|
|
|
static STAILQ_HEAD(, spdk_net_impl) g_net_impls = STAILQ_HEAD_INITIALIZER(g_net_impls);
|
2020-09-21 11:07:52 +00:00
|
|
|
static struct spdk_net_impl *g_default_impl;
|
2018-02-16 18:14:32 +00:00
|
|
|
|
2019-05-14 12:50:15 +00:00
|
|
|
struct spdk_sock_placement_id_entry {
|
|
|
|
int placement_id;
|
|
|
|
uint32_t ref;
|
|
|
|
struct spdk_sock_group *group;
|
|
|
|
STAILQ_ENTRY(spdk_sock_placement_id_entry) link;
|
|
|
|
};
|
|
|
|
|
|
|
|
static STAILQ_HEAD(, spdk_sock_placement_id_entry) g_placement_id_map = STAILQ_HEAD_INITIALIZER(
|
|
|
|
g_placement_id_map);
|
|
|
|
static pthread_mutex_t g_map_table_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
/* Insert a group into the placement map.
|
|
|
|
* If the group is already in the map, take a reference.
|
|
|
|
*/
|
|
|
|
static int
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_map_insert(int placement_id, struct spdk_sock_group *group)
|
2019-05-14 12:50:15 +00:00
|
|
|
{
|
|
|
|
struct spdk_sock_placement_id_entry *entry;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_map_table_mutex);
|
|
|
|
STAILQ_FOREACH(entry, &g_placement_id_map, link) {
|
|
|
|
if (placement_id == entry->placement_id) {
|
|
|
|
/* The mapping already exists, it means that different sockets have
|
|
|
|
* the same placement_ids.
|
|
|
|
*/
|
|
|
|
entry->ref++;
|
|
|
|
pthread_mutex_unlock(&g_map_table_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = calloc(1, sizeof(*entry));
|
|
|
|
if (!entry) {
|
|
|
|
SPDK_ERRLOG("Cannot allocate an entry for placement_id=%u\n", placement_id);
|
|
|
|
pthread_mutex_unlock(&g_map_table_mutex);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->placement_id = placement_id;
|
|
|
|
entry->group = group;
|
|
|
|
entry->ref++;
|
|
|
|
|
|
|
|
STAILQ_INSERT_TAIL(&g_placement_id_map, entry, link);
|
|
|
|
pthread_mutex_unlock(&g_map_table_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release a reference to the group for a given placement_id.
|
2021-01-25 09:41:20 +00:00
|
|
|
* We use lazy free method. If a placement_id with a sock is associated with the group,
|
|
|
|
* it will possibly be associated again by another sock with the same placement_id. And
|
|
|
|
* there will no memory leak, because if a polling group is destroyed, the
|
|
|
|
* sock_remove_sock_group_from_map_table will be called to guarantee the mapping correctness.
|
2019-05-14 12:50:15 +00:00
|
|
|
*/
|
|
|
|
static void
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_map_release(int placement_id)
|
2019-05-14 12:50:15 +00:00
|
|
|
{
|
|
|
|
struct spdk_sock_placement_id_entry *entry;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&g_map_table_mutex);
|
|
|
|
STAILQ_FOREACH(entry, &g_placement_id_map, link) {
|
|
|
|
if (placement_id == entry->placement_id) {
|
|
|
|
assert(entry->ref > 0);
|
|
|
|
entry->ref--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&g_map_table_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look up the group for a placement_id. */
|
|
|
|
static void
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_map_lookup(int placement_id, struct spdk_sock_group **group)
|
2019-05-14 12:50:15 +00:00
|
|
|
{
|
|
|
|
struct spdk_sock_placement_id_entry *entry;
|
|
|
|
|
|
|
|
*group = NULL;
|
|
|
|
pthread_mutex_lock(&g_map_table_mutex);
|
|
|
|
STAILQ_FOREACH(entry, &g_placement_id_map, link) {
|
|
|
|
if (placement_id == entry->placement_id) {
|
|
|
|
assert(entry->group != NULL);
|
|
|
|
*group = entry->group;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_map_table_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the socket group from the map table */
|
|
|
|
static void
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_remove_sock_group_from_map_table(struct spdk_sock_group *group)
|
2019-05-14 12:50:15 +00:00
|
|
|
{
|
2019-07-25 20:40:50 +00:00
|
|
|
struct spdk_sock_placement_id_entry *entry, *tmp;
|
2019-05-14 12:50:15 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&g_map_table_mutex);
|
2019-07-25 20:40:50 +00:00
|
|
|
STAILQ_FOREACH_SAFE(entry, &g_placement_id_map, link, tmp) {
|
|
|
|
if (entry->group == group) {
|
|
|
|
STAILQ_REMOVE(&g_placement_id_map, entry, spdk_sock_placement_id_entry, link);
|
2019-07-31 13:59:38 +00:00
|
|
|
free(entry);
|
2019-07-25 20:40:50 +00:00
|
|
|
}
|
2019-05-14 12:50:15 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_map_table_mutex);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-08-31 16:47:15 +00:00
|
|
|
static int
|
|
|
|
sock_get_placement_id(struct spdk_sock *sock)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
int placement_id;
|
|
|
|
|
|
|
|
if (!sock->placement_id) {
|
|
|
|
rc = sock->net_impl->get_placement_id(sock, &placement_id);
|
|
|
|
if (!rc && (placement_id != 0)) {
|
|
|
|
sock->placement_id = placement_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return sock->placement_id;
|
|
|
|
}
|
|
|
|
|
2019-05-14 12:50:15 +00:00
|
|
|
int
|
|
|
|
spdk_sock_get_optimal_sock_group(struct spdk_sock *sock, struct spdk_sock_group **group)
|
|
|
|
{
|
2020-08-31 16:47:15 +00:00
|
|
|
int placement_id;
|
2019-05-14 12:50:15 +00:00
|
|
|
|
2020-08-31 16:47:15 +00:00
|
|
|
placement_id = sock_get_placement_id(sock);
|
|
|
|
if (placement_id != 0) {
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_map_lookup(placement_id, group);
|
2019-05-14 12:50:15 +00:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-16 19:04:00 +00:00
|
|
|
int
|
2018-10-08 01:51:03 +00:00
|
|
|
spdk_sock_getaddr(struct spdk_sock *sock, char *saddr, int slen, uint16_t *sport,
|
|
|
|
char *caddr, int clen, uint16_t *cport)
|
2018-02-16 19:04:00 +00:00
|
|
|
{
|
2018-10-08 01:51:03 +00:00
|
|
|
return sock->net_impl->getaddr(sock, saddr, slen, sport, caddr, clen, cport);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
2020-02-19 11:18:51 +00:00
|
|
|
void
|
|
|
|
spdk_sock_get_default_opts(struct spdk_sock_opts *opts)
|
|
|
|
{
|
|
|
|
assert(opts);
|
|
|
|
|
|
|
|
if (SPDK_SOCK_OPTS_FIELD_OK(opts, priority)) {
|
|
|
|
opts->priority = SPDK_SOCK_DEFAULT_PRIORITY;
|
|
|
|
}
|
2020-09-01 09:06:03 +00:00
|
|
|
|
|
|
|
if (SPDK_SOCK_OPTS_FIELD_OK(opts, zcopy)) {
|
|
|
|
opts->zcopy = SPDK_SOCK_DEFAULT_ZCOPY;
|
|
|
|
}
|
2020-02-19 11:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* opts The opts allocated in the current library.
|
|
|
|
* opts_user The opts passed by the caller.
|
|
|
|
* */
|
|
|
|
static void
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_init_opts(struct spdk_sock_opts *opts, struct spdk_sock_opts *opts_user)
|
2020-02-19 11:18:51 +00:00
|
|
|
{
|
|
|
|
assert(opts);
|
|
|
|
assert(opts_user);
|
|
|
|
|
|
|
|
opts->opts_size = sizeof(*opts);
|
|
|
|
spdk_sock_get_default_opts(opts);
|
|
|
|
|
|
|
|
/* reset the size according to the user */
|
|
|
|
opts->opts_size = opts_user->opts_size;
|
|
|
|
if (SPDK_SOCK_OPTS_FIELD_OK(opts, priority)) {
|
|
|
|
opts->priority = opts_user->priority;
|
|
|
|
}
|
2020-09-01 09:06:03 +00:00
|
|
|
|
|
|
|
if (SPDK_SOCK_OPTS_FIELD_OK(opts, zcopy)) {
|
|
|
|
opts->zcopy = opts_user->zcopy;
|
|
|
|
}
|
2020-02-19 11:18:51 +00:00
|
|
|
}
|
|
|
|
|
2018-02-16 19:04:00 +00:00
|
|
|
struct spdk_sock *
|
sock: Add impl_name parameter in spdk_sock_listen/connect.
Purpose: With this patch,
(1)We can support using different sock implementations in
one application together.
(2)For one IP address managed by kernel, we can use different method
to listen/connect, e.g., posix, or uring. With this patch, we can
designate the specified sock implementation if impl_name is not NULL
and valid. Otherwise, spdk_sock_listen/connect will try to use the sock
implementations in the list by order if impl_name is NULL.
Without this patch, the app will always use the same type of sock implementation
if the order is fixed. For example, if we have posix and uring together,
the first one will always be uring.
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: Ic49563f5025085471d356798e522ff7ab748f586
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478140
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-12-17 13:57:10 +00:00
|
|
|
spdk_sock_connect(const char *ip, int port, char *impl_name)
|
2020-02-19 11:18:51 +00:00
|
|
|
{
|
|
|
|
struct spdk_sock_opts opts;
|
|
|
|
|
|
|
|
opts.opts_size = sizeof(opts);
|
|
|
|
spdk_sock_get_default_opts(&opts);
|
|
|
|
return spdk_sock_connect_ext(ip, port, impl_name, &opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_sock *
|
2020-09-21 11:07:52 +00:00
|
|
|
spdk_sock_connect_ext(const char *ip, int port, char *_impl_name, struct spdk_sock_opts *opts)
|
2018-02-16 19:04:00 +00:00
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
struct spdk_net_impl *impl = NULL;
|
|
|
|
struct spdk_sock *sock;
|
2020-02-19 11:18:51 +00:00
|
|
|
struct spdk_sock_opts opts_local;
|
2020-09-21 11:07:52 +00:00
|
|
|
const char *impl_name = NULL;
|
2020-02-19 11:18:51 +00:00
|
|
|
|
|
|
|
if (opts == NULL) {
|
|
|
|
SPDK_ERRLOG("the opts should not be NULL pointer\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-02-16 21:39:03 +00:00
|
|
|
|
2020-09-21 11:07:52 +00:00
|
|
|
if (_impl_name) {
|
|
|
|
impl_name = _impl_name;
|
|
|
|
} else if (g_default_impl) {
|
|
|
|
impl_name = g_default_impl->name;
|
|
|
|
}
|
|
|
|
|
2018-02-16 21:39:03 +00:00
|
|
|
STAILQ_FOREACH_FROM(impl, &g_net_impls, link) {
|
sock: Add impl_name parameter in spdk_sock_listen/connect.
Purpose: With this patch,
(1)We can support using different sock implementations in
one application together.
(2)For one IP address managed by kernel, we can use different method
to listen/connect, e.g., posix, or uring. With this patch, we can
designate the specified sock implementation if impl_name is not NULL
and valid. Otherwise, spdk_sock_listen/connect will try to use the sock
implementations in the list by order if impl_name is NULL.
Without this patch, the app will always use the same type of sock implementation
if the order is fixed. For example, if we have posix and uring together,
the first one will always be uring.
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: Ic49563f5025085471d356798e522ff7ab748f586
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478140
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-12-17 13:57:10 +00:00
|
|
|
if (impl_name && strncmp(impl_name, impl->name, strlen(impl->name) + 1)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(sock, "Creating a client socket using impl %s\n", impl->name);
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_init_opts(&opts_local, opts);
|
2020-02-19 11:18:51 +00:00
|
|
|
sock = impl->connect(ip, port, &opts_local);
|
2018-02-16 21:39:03 +00:00
|
|
|
if (sock != NULL) {
|
2020-02-19 11:18:51 +00:00
|
|
|
/* Copy the contents, both the two structures are the same ABI version */
|
|
|
|
memcpy(&sock->opts, &opts_local, sizeof(sock->opts));
|
2018-02-16 21:39:03 +00:00
|
|
|
sock->net_impl = impl;
|
2019-08-26 22:03:07 +00:00
|
|
|
TAILQ_INIT(&sock->queued_reqs);
|
2019-10-18 17:50:27 +00:00
|
|
|
TAILQ_INIT(&sock->pending_reqs);
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_sock *
|
sock: Add impl_name parameter in spdk_sock_listen/connect.
Purpose: With this patch,
(1)We can support using different sock implementations in
one application together.
(2)For one IP address managed by kernel, we can use different method
to listen/connect, e.g., posix, or uring. With this patch, we can
designate the specified sock implementation if impl_name is not NULL
and valid. Otherwise, spdk_sock_listen/connect will try to use the sock
implementations in the list by order if impl_name is NULL.
Without this patch, the app will always use the same type of sock implementation
if the order is fixed. For example, if we have posix and uring together,
the first one will always be uring.
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: Ic49563f5025085471d356798e522ff7ab748f586
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478140
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-12-17 13:57:10 +00:00
|
|
|
spdk_sock_listen(const char *ip, int port, char *impl_name)
|
2020-02-19 11:18:51 +00:00
|
|
|
{
|
|
|
|
struct spdk_sock_opts opts;
|
|
|
|
|
|
|
|
opts.opts_size = sizeof(opts);
|
|
|
|
spdk_sock_get_default_opts(&opts);
|
|
|
|
return spdk_sock_listen_ext(ip, port, impl_name, &opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_sock *
|
2020-09-21 11:07:52 +00:00
|
|
|
spdk_sock_listen_ext(const char *ip, int port, char *_impl_name, struct spdk_sock_opts *opts)
|
2018-02-16 19:04:00 +00:00
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
struct spdk_net_impl *impl = NULL;
|
|
|
|
struct spdk_sock *sock;
|
2020-02-19 11:18:51 +00:00
|
|
|
struct spdk_sock_opts opts_local;
|
2020-09-21 11:07:52 +00:00
|
|
|
const char *impl_name = NULL;
|
2020-02-19 11:18:51 +00:00
|
|
|
|
|
|
|
if (opts == NULL) {
|
|
|
|
SPDK_ERRLOG("the opts should not be NULL pointer\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-02-16 21:39:03 +00:00
|
|
|
|
2020-09-21 11:07:52 +00:00
|
|
|
if (_impl_name) {
|
|
|
|
impl_name = _impl_name;
|
|
|
|
} else if (g_default_impl) {
|
|
|
|
impl_name = g_default_impl->name;
|
|
|
|
}
|
|
|
|
|
2018-02-16 21:39:03 +00:00
|
|
|
STAILQ_FOREACH_FROM(impl, &g_net_impls, link) {
|
sock: Add impl_name parameter in spdk_sock_listen/connect.
Purpose: With this patch,
(1)We can support using different sock implementations in
one application together.
(2)For one IP address managed by kernel, we can use different method
to listen/connect, e.g., posix, or uring. With this patch, we can
designate the specified sock implementation if impl_name is not NULL
and valid. Otherwise, spdk_sock_listen/connect will try to use the sock
implementations in the list by order if impl_name is NULL.
Without this patch, the app will always use the same type of sock implementation
if the order is fixed. For example, if we have posix and uring together,
the first one will always be uring.
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: Ic49563f5025085471d356798e522ff7ab748f586
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/478140
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2019-12-17 13:57:10 +00:00
|
|
|
if (impl_name && strncmp(impl_name, impl->name, strlen(impl->name) + 1)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(sock, "Creating a listening socket using impl %s\n", impl->name);
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_init_opts(&opts_local, opts);
|
2020-02-19 11:18:51 +00:00
|
|
|
sock = impl->listen(ip, port, &opts_local);
|
2018-02-16 21:39:03 +00:00
|
|
|
if (sock != NULL) {
|
2020-02-19 11:18:51 +00:00
|
|
|
/* Copy the contents, both the two structures are the same ABI version */
|
|
|
|
memcpy(&sock->opts, &opts_local, sizeof(sock->opts));
|
2018-02-16 21:39:03 +00:00
|
|
|
sock->net_impl = impl;
|
2019-08-26 22:03:07 +00:00
|
|
|
/* Don't need to initialize the request queues for listen
|
|
|
|
* sockets. */
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_sock *
|
|
|
|
spdk_sock_accept(struct spdk_sock *sock)
|
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
struct spdk_sock *new_sock;
|
|
|
|
|
|
|
|
new_sock = sock->net_impl->accept(sock);
|
|
|
|
if (new_sock != NULL) {
|
2020-02-19 11:18:51 +00:00
|
|
|
/* Inherit the opts from the "accept sock" */
|
|
|
|
new_sock->opts = sock->opts;
|
|
|
|
memcpy(&new_sock->opts, &sock->opts, sizeof(new_sock->opts));
|
2018-02-16 21:39:03 +00:00
|
|
|
new_sock->net_impl = sock->net_impl;
|
2019-08-26 22:03:07 +00:00
|
|
|
TAILQ_INIT(&new_sock->queued_reqs);
|
2019-10-18 17:50:27 +00:00
|
|
|
TAILQ_INIT(&new_sock->pending_reqs);
|
2018-02-16 21:39:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return new_sock;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-11-20 20:27:01 +00:00
|
|
|
spdk_sock_close(struct spdk_sock **_sock)
|
2018-02-16 19:04:00 +00:00
|
|
|
{
|
2019-11-20 20:27:01 +00:00
|
|
|
struct spdk_sock *sock = *_sock;
|
2018-02-16 20:53:47 +00:00
|
|
|
int rc;
|
|
|
|
|
2019-11-20 20:27:01 +00:00
|
|
|
if (sock == NULL) {
|
2018-02-16 20:53:47 +00:00
|
|
|
errno = EBADF;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-11-20 20:27:01 +00:00
|
|
|
if (sock->cb_fn != NULL) {
|
2018-02-16 20:53:47 +00:00
|
|
|
/* This sock is still part of a sock_group. */
|
|
|
|
errno = EBUSY;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-08-26 22:03:07 +00:00
|
|
|
sock->flags.closed = true;
|
|
|
|
|
|
|
|
if (sock->cb_cnt > 0) {
|
|
|
|
/* Let the callback unwind before destroying the socket */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_sock_abort_requests(sock);
|
|
|
|
|
2019-11-20 20:27:01 +00:00
|
|
|
rc = sock->net_impl->close(sock);
|
2018-02-16 20:53:47 +00:00
|
|
|
if (rc == 0) {
|
2019-11-20 20:27:01 +00:00
|
|
|
*_sock = NULL;
|
2018-02-16 20:53:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
|
|
|
spdk_sock_recv(struct spdk_sock *sock, void *buf, size_t len)
|
|
|
|
{
|
2021-02-08 01:51:42 +00:00
|
|
|
if (sock == NULL || sock->flags.closed) {
|
2019-08-26 22:03:07 +00:00
|
|
|
errno = EBADF;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->recv(sock, buf, len);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
2019-02-26 23:58:24 +00:00
|
|
|
ssize_t
|
|
|
|
spdk_sock_readv(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
|
|
|
|
{
|
2021-02-08 01:51:42 +00:00
|
|
|
if (sock == NULL || sock->flags.closed) {
|
2019-08-26 22:03:07 +00:00
|
|
|
errno = EBADF;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-02-26 23:58:24 +00:00
|
|
|
return sock->net_impl->readv(sock, iov, iovcnt);
|
|
|
|
}
|
|
|
|
|
2018-02-16 19:04:00 +00:00
|
|
|
ssize_t
|
|
|
|
spdk_sock_writev(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
|
|
|
|
{
|
2021-02-08 01:51:42 +00:00
|
|
|
if (sock == NULL || sock->flags.closed) {
|
2019-08-26 22:03:07 +00:00
|
|
|
errno = EBADF;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->writev(sock, iov, iovcnt);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
2019-08-26 22:03:07 +00:00
|
|
|
void
|
|
|
|
spdk_sock_writev_async(struct spdk_sock *sock, struct spdk_sock_request *req)
|
|
|
|
{
|
|
|
|
assert(req->cb_fn != NULL);
|
|
|
|
|
2021-02-08 01:51:42 +00:00
|
|
|
if (sock == NULL || sock->flags.closed) {
|
2019-08-26 22:03:07 +00:00
|
|
|
req->cb_fn(req->cb_arg, -EBADF);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sock->net_impl->writev_async(sock, req);
|
|
|
|
}
|
|
|
|
|
2019-11-21 15:16:18 +00:00
|
|
|
int
|
|
|
|
spdk_sock_flush(struct spdk_sock *sock)
|
|
|
|
{
|
2021-02-08 01:51:42 +00:00
|
|
|
if (sock == NULL || sock->flags.closed) {
|
2020-01-28 20:16:40 +00:00
|
|
|
return -EBADF;
|
2020-12-04 16:40:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Sock is in a polling group, so group polling mechanism will work */
|
|
|
|
if (sock->group_impl != NULL) {
|
|
|
|
return 0;
|
2020-01-28 20:16:40 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 15:16:18 +00:00
|
|
|
return sock->net_impl->flush(sock);
|
|
|
|
}
|
|
|
|
|
2018-02-16 19:04:00 +00:00
|
|
|
int
|
|
|
|
spdk_sock_set_recvlowat(struct spdk_sock *sock, int nbytes)
|
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->set_recvlowat(sock, nbytes);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_set_recvbuf(struct spdk_sock *sock, int sz)
|
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->set_recvbuf(sock, sz);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_set_sendbuf(struct spdk_sock *sock, int sz)
|
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->set_sendbuf(sock, sz);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
spdk_sock_is_ipv6(struct spdk_sock *sock)
|
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->is_ipv6(sock);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
spdk_sock_is_ipv4(struct spdk_sock *sock)
|
|
|
|
{
|
2018-02-16 21:39:03 +00:00
|
|
|
return sock->net_impl->is_ipv4(sock);
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 17:48:32 +00:00
|
|
|
bool
|
|
|
|
spdk_sock_is_connected(struct spdk_sock *sock)
|
|
|
|
{
|
|
|
|
return sock->net_impl->is_connected(sock);
|
|
|
|
}
|
|
|
|
|
2018-02-16 19:04:00 +00:00
|
|
|
struct spdk_sock_group *
|
2019-05-14 18:40:20 +00:00
|
|
|
spdk_sock_group_create(void *ctx)
|
2018-02-16 19:04:00 +00:00
|
|
|
{
|
2018-02-19 13:38:50 +00:00
|
|
|
struct spdk_net_impl *impl = NULL;
|
2018-02-16 21:10:29 +00:00
|
|
|
struct spdk_sock_group *group;
|
2018-02-19 04:52:43 +00:00
|
|
|
struct spdk_sock_group_impl *group_impl;
|
|
|
|
|
|
|
|
group = calloc(1, sizeof(*group));
|
|
|
|
if (group == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-02-16 21:10:29 +00:00
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
STAILQ_INIT(&group->group_impls);
|
|
|
|
|
|
|
|
STAILQ_FOREACH_FROM(impl, &g_net_impls, link) {
|
|
|
|
group_impl = impl->group_impl_create();
|
2018-03-13 14:36:15 +00:00
|
|
|
if (group_impl != NULL) {
|
|
|
|
STAILQ_INSERT_TAIL(&group->group_impls, group_impl, link);
|
|
|
|
TAILQ_INIT(&group_impl->socks);
|
sock: keep track of removed sockets during call to poll
We have been intermittently hitting the assert where
we check sock->cb_fn != NULL in spdk_sock_group_impl_poll_count.
The only way we could be hitting this specific error is if we
wereremoving a socket from a sock group within after receiving
an event for it.
Specifically, we are seeing this error on the NVMe-oF TCP target
which relies on posix sockets using epoll.
The man page for epoll states the following:
If you use an event cache or store all the file descriptors
returned from epoll_wait(2), then make sure to provide
a way to mark its closure dynamically (i.e., caused by
a previous event's processing). Suppose you receive 100 events
from epoll_wait(2), and in event #47 a condition causes event
#13 to be closed. If you remove the structure and close(2)
the file descriptor for event #13, then your event cache might
still say there are events waiting for that file descriptor
causing confusion.
One solution for this is to call, during the processing
of event 47, epoll_ctl(EPOLL_CTL_DEL) to delete file
descriptor 13 and close(2), then mark its associated data
structure as removed and link it to a cleanup list. If
you find another event for file descriptor 13 in your batch
processing, you will discover the file descriptor had
been previously removed and there will be no confusion.
Since we do store all of the file descriptors returned from
epoll_wait, we need to implement the tracking mentioned above.
fixes issue #1294
Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib592ce19e3f0b691e3a825d02ebb42d7338e3ceb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1589
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
2020-03-30 21:54:02 +00:00
|
|
|
group_impl->num_removed_socks = 0;
|
2018-03-13 14:36:15 +00:00
|
|
|
group_impl->net_impl = impl;
|
|
|
|
}
|
2018-02-16 21:10:29 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 18:40:20 +00:00
|
|
|
group->ctx = ctx;
|
2018-02-16 21:10:29 +00:00
|
|
|
return group;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-14 18:40:20 +00:00
|
|
|
void *
|
|
|
|
spdk_sock_group_get_ctx(struct spdk_sock_group *group)
|
|
|
|
{
|
|
|
|
if (group == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return group->ctx;
|
|
|
|
}
|
|
|
|
|
2018-02-16 19:04:00 +00:00
|
|
|
int
|
|
|
|
spdk_sock_group_add_sock(struct spdk_sock_group *group, struct spdk_sock *sock,
|
|
|
|
spdk_sock_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
2018-02-19 13:38:50 +00:00
|
|
|
struct spdk_sock_group_impl *group_impl = NULL;
|
2019-05-14 12:50:15 +00:00
|
|
|
int rc, placement_id = 0;
|
2018-02-16 21:10:29 +00:00
|
|
|
|
|
|
|
if (cb_fn == NULL) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-09-30 19:38:26 +00:00
|
|
|
if (sock->group_impl != NULL) {
|
2018-02-16 21:10:29 +00:00
|
|
|
/*
|
|
|
|
* This sock is already part of a sock_group. Currently we don't
|
|
|
|
* support this.
|
|
|
|
*/
|
|
|
|
errno = EBUSY;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-31 16:47:15 +00:00
|
|
|
placement_id = sock_get_placement_id(sock);
|
|
|
|
if (placement_id != 0) {
|
2020-05-09 23:22:08 +00:00
|
|
|
rc = sock_map_insert(placement_id, group);
|
2019-05-14 12:50:15 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
STAILQ_FOREACH_FROM(group_impl, &group->group_impls, link) {
|
|
|
|
if (sock->net_impl == group_impl->net_impl) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (group_impl == NULL) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = group_impl->net_impl->group_impl_add_sock(group_impl, sock);
|
2018-02-16 21:10:29 +00:00
|
|
|
if (rc == 0) {
|
2018-02-19 13:38:50 +00:00
|
|
|
TAILQ_INSERT_TAIL(&group_impl->socks, sock, link);
|
2019-09-30 19:38:26 +00:00
|
|
|
sock->group_impl = group_impl;
|
2018-02-16 21:10:29 +00:00
|
|
|
sock->cb_fn = cb_fn;
|
|
|
|
sock->cb_arg = cb_arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_group_remove_sock(struct spdk_sock_group *group, struct spdk_sock *sock)
|
|
|
|
{
|
2018-02-19 13:38:50 +00:00
|
|
|
struct spdk_sock_group_impl *group_impl = NULL;
|
2019-05-14 12:50:15 +00:00
|
|
|
int rc, placement_id = 0;
|
2018-02-16 21:10:29 +00:00
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
STAILQ_FOREACH_FROM(group_impl, &group->group_impls, link) {
|
|
|
|
if (sock->net_impl == group_impl->net_impl) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (group_impl == NULL) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-09-30 19:38:26 +00:00
|
|
|
assert(group_impl == sock->group_impl);
|
|
|
|
|
2020-08-31 16:47:15 +00:00
|
|
|
placement_id = sock_get_placement_id(sock);
|
|
|
|
if (placement_id != 0) {
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_map_release(placement_id);
|
2019-05-14 12:50:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
rc = group_impl->net_impl->group_impl_remove_sock(group_impl, sock);
|
2018-02-16 21:10:29 +00:00
|
|
|
if (rc == 0) {
|
2018-02-19 13:38:50 +00:00
|
|
|
TAILQ_REMOVE(&group_impl->socks, sock, link);
|
sock: keep track of removed sockets during call to poll
We have been intermittently hitting the assert where
we check sock->cb_fn != NULL in spdk_sock_group_impl_poll_count.
The only way we could be hitting this specific error is if we
wereremoving a socket from a sock group within after receiving
an event for it.
Specifically, we are seeing this error on the NVMe-oF TCP target
which relies on posix sockets using epoll.
The man page for epoll states the following:
If you use an event cache or store all the file descriptors
returned from epoll_wait(2), then make sure to provide
a way to mark its closure dynamically (i.e., caused by
a previous event's processing). Suppose you receive 100 events
from epoll_wait(2), and in event #47 a condition causes event
#13 to be closed. If you remove the structure and close(2)
the file descriptor for event #13, then your event cache might
still say there are events waiting for that file descriptor
causing confusion.
One solution for this is to call, during the processing
of event 47, epoll_ctl(EPOLL_CTL_DEL) to delete file
descriptor 13 and close(2), then mark its associated data
structure as removed and link it to a cleanup list. If
you find another event for file descriptor 13 in your batch
processing, you will discover the file descriptor had
been previously removed and there will be no confusion.
Since we do store all of the file descriptors returned from
epoll_wait, we need to implement the tracking mentioned above.
fixes issue #1294
Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib592ce19e3f0b691e3a825d02ebb42d7338e3ceb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1589
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
2020-03-30 21:54:02 +00:00
|
|
|
assert(group_impl->num_removed_socks < MAX_EVENTS_PER_POLL);
|
|
|
|
group_impl->removed_socks[group_impl->num_removed_socks] = (uintptr_t)sock;
|
|
|
|
group_impl->num_removed_socks++;
|
2019-09-30 19:38:26 +00:00
|
|
|
sock->group_impl = NULL;
|
2018-02-16 21:10:29 +00:00
|
|
|
sock->cb_fn = NULL;
|
|
|
|
sock->cb_arg = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_group_poll(struct spdk_sock_group *group)
|
|
|
|
{
|
|
|
|
return spdk_sock_group_poll_count(group, MAX_EVENTS_PER_POLL);
|
|
|
|
}
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
static int
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_group_impl_poll_count(struct spdk_sock_group_impl *group_impl,
|
|
|
|
struct spdk_sock_group *group,
|
|
|
|
int max_events)
|
2018-02-16 19:04:00 +00:00
|
|
|
{
|
2018-02-16 21:18:23 +00:00
|
|
|
struct spdk_sock *socks[MAX_EVENTS_PER_POLL];
|
|
|
|
int num_events, i;
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
if (TAILQ_EMPTY(&group_impl->socks)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
sock: keep track of removed sockets during call to poll
We have been intermittently hitting the assert where
we check sock->cb_fn != NULL in spdk_sock_group_impl_poll_count.
The only way we could be hitting this specific error is if we
wereremoving a socket from a sock group within after receiving
an event for it.
Specifically, we are seeing this error on the NVMe-oF TCP target
which relies on posix sockets using epoll.
The man page for epoll states the following:
If you use an event cache or store all the file descriptors
returned from epoll_wait(2), then make sure to provide
a way to mark its closure dynamically (i.e., caused by
a previous event's processing). Suppose you receive 100 events
from epoll_wait(2), and in event #47 a condition causes event
#13 to be closed. If you remove the structure and close(2)
the file descriptor for event #13, then your event cache might
still say there are events waiting for that file descriptor
causing confusion.
One solution for this is to call, during the processing
of event 47, epoll_ctl(EPOLL_CTL_DEL) to delete file
descriptor 13 and close(2), then mark its associated data
structure as removed and link it to a cleanup list. If
you find another event for file descriptor 13 in your batch
processing, you will discover the file descriptor had
been previously removed and there will be no confusion.
Since we do store all of the file descriptors returned from
epoll_wait, we need to implement the tracking mentioned above.
fixes issue #1294
Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib592ce19e3f0b691e3a825d02ebb42d7338e3ceb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1589
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
2020-03-30 21:54:02 +00:00
|
|
|
/* The number of removed sockets should be reset for each call to poll. */
|
|
|
|
group_impl->num_removed_socks = 0;
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
num_events = group_impl->net_impl->group_impl_poll(group_impl, max_events, socks);
|
|
|
|
if (num_events == -1) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num_events; i++) {
|
|
|
|
struct spdk_sock *sock = socks[i];
|
sock: keep track of removed sockets during call to poll
We have been intermittently hitting the assert where
we check sock->cb_fn != NULL in spdk_sock_group_impl_poll_count.
The only way we could be hitting this specific error is if we
wereremoving a socket from a sock group within after receiving
an event for it.
Specifically, we are seeing this error on the NVMe-oF TCP target
which relies on posix sockets using epoll.
The man page for epoll states the following:
If you use an event cache or store all the file descriptors
returned from epoll_wait(2), then make sure to provide
a way to mark its closure dynamically (i.e., caused by
a previous event's processing). Suppose you receive 100 events
from epoll_wait(2), and in event #47 a condition causes event
#13 to be closed. If you remove the structure and close(2)
the file descriptor for event #13, then your event cache might
still say there are events waiting for that file descriptor
causing confusion.
One solution for this is to call, during the processing
of event 47, epoll_ctl(EPOLL_CTL_DEL) to delete file
descriptor 13 and close(2), then mark its associated data
structure as removed and link it to a cleanup list. If
you find another event for file descriptor 13 in your batch
processing, you will discover the file descriptor had
been previously removed and there will be no confusion.
Since we do store all of the file descriptors returned from
epoll_wait, we need to implement the tracking mentioned above.
fixes issue #1294
Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib592ce19e3f0b691e3a825d02ebb42d7338e3ceb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1589
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
2020-03-30 21:54:02 +00:00
|
|
|
int j;
|
|
|
|
bool valid = true;
|
|
|
|
for (j = 0; j < group_impl->num_removed_socks; j++) {
|
|
|
|
if ((uintptr_t)sock == group_impl->removed_socks[j]) {
|
|
|
|
valid = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-02-19 13:38:50 +00:00
|
|
|
|
sock: keep track of removed sockets during call to poll
We have been intermittently hitting the assert where
we check sock->cb_fn != NULL in spdk_sock_group_impl_poll_count.
The only way we could be hitting this specific error is if we
wereremoving a socket from a sock group within after receiving
an event for it.
Specifically, we are seeing this error on the NVMe-oF TCP target
which relies on posix sockets using epoll.
The man page for epoll states the following:
If you use an event cache or store all the file descriptors
returned from epoll_wait(2), then make sure to provide
a way to mark its closure dynamically (i.e., caused by
a previous event's processing). Suppose you receive 100 events
from epoll_wait(2), and in event #47 a condition causes event
#13 to be closed. If you remove the structure and close(2)
the file descriptor for event #13, then your event cache might
still say there are events waiting for that file descriptor
causing confusion.
One solution for this is to call, during the processing
of event 47, epoll_ctl(EPOLL_CTL_DEL) to delete file
descriptor 13 and close(2), then mark its associated data
structure as removed and link it to a cleanup list. If
you find another event for file descriptor 13 in your batch
processing, you will discover the file descriptor had
been previously removed and there will be no confusion.
Since we do store all of the file descriptors returned from
epoll_wait, we need to implement the tracking mentioned above.
fixes issue #1294
Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib592ce19e3f0b691e3a825d02ebb42d7338e3ceb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1589
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
2020-03-30 21:54:02 +00:00
|
|
|
if (valid) {
|
|
|
|
assert(sock->cb_fn != NULL);
|
|
|
|
sock->cb_fn(sock->cb_arg, group, sock);
|
|
|
|
}
|
2018-02-19 13:38:50 +00:00
|
|
|
}
|
sock: keep track of removed sockets during call to poll
We have been intermittently hitting the assert where
we check sock->cb_fn != NULL in spdk_sock_group_impl_poll_count.
The only way we could be hitting this specific error is if we
wereremoving a socket from a sock group within after receiving
an event for it.
Specifically, we are seeing this error on the NVMe-oF TCP target
which relies on posix sockets using epoll.
The man page for epoll states the following:
If you use an event cache or store all the file descriptors
returned from epoll_wait(2), then make sure to provide
a way to mark its closure dynamically (i.e., caused by
a previous event's processing). Suppose you receive 100 events
from epoll_wait(2), and in event #47 a condition causes event
#13 to be closed. If you remove the structure and close(2)
the file descriptor for event #13, then your event cache might
still say there are events waiting for that file descriptor
causing confusion.
One solution for this is to call, during the processing
of event 47, epoll_ctl(EPOLL_CTL_DEL) to delete file
descriptor 13 and close(2), then mark its associated data
structure as removed and link it to a cleanup list. If
you find another event for file descriptor 13 in your batch
processing, you will discover the file descriptor had
been previously removed and there will be no confusion.
Since we do store all of the file descriptors returned from
epoll_wait, we need to implement the tracking mentioned above.
fixes issue #1294
Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib592ce19e3f0b691e3a825d02ebb42d7338e3ceb
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1589
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
2020-03-30 21:54:02 +00:00
|
|
|
|
2019-07-31 02:08:30 +00:00
|
|
|
return num_events;
|
2018-02-19 13:38:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_group_poll_count(struct spdk_sock_group *group, int max_events)
|
|
|
|
{
|
|
|
|
struct spdk_sock_group_impl *group_impl = NULL;
|
2019-07-31 02:08:30 +00:00
|
|
|
int rc, num_events = 0;
|
2018-02-19 13:38:50 +00:00
|
|
|
|
2018-02-16 21:18:23 +00:00
|
|
|
if (max_events < 1) {
|
|
|
|
errno = -EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only poll for up to 32 events at a time - if more events are pending,
|
|
|
|
* the next call to this function will reap them.
|
|
|
|
*/
|
|
|
|
if (max_events > MAX_EVENTS_PER_POLL) {
|
|
|
|
max_events = MAX_EVENTS_PER_POLL;
|
|
|
|
}
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
STAILQ_FOREACH_FROM(group_impl, &group->group_impls, link) {
|
2020-05-09 23:22:08 +00:00
|
|
|
rc = sock_group_impl_poll_count(group_impl, group, max_events);
|
2019-07-31 02:08:30 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
num_events = -1;
|
2018-02-19 13:38:50 +00:00
|
|
|
SPDK_ERRLOG("group_impl_poll_count for net(%s) failed\n",
|
|
|
|
group_impl->net_impl->name);
|
2019-07-31 02:08:30 +00:00
|
|
|
} else if (num_events >= 0) {
|
|
|
|
num_events += rc;
|
2018-02-19 13:38:50 +00:00
|
|
|
}
|
2018-02-16 21:18:23 +00:00
|
|
|
}
|
|
|
|
|
2019-07-31 02:08:30 +00:00
|
|
|
return num_events;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_group_close(struct spdk_sock_group **group)
|
|
|
|
{
|
2018-02-19 13:38:50 +00:00
|
|
|
struct spdk_sock_group_impl *group_impl = NULL, *tmp;
|
2018-02-16 20:53:47 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (*group == NULL) {
|
|
|
|
errno = EBADF;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
STAILQ_FOREACH_SAFE(group_impl, &(*group)->group_impls, link, tmp) {
|
|
|
|
if (!TAILQ_EMPTY(&group_impl->socks)) {
|
|
|
|
errno = EBUSY;
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-16 20:53:47 +00:00
|
|
|
}
|
|
|
|
|
2018-02-19 13:38:50 +00:00
|
|
|
STAILQ_FOREACH_SAFE(group_impl, &(*group)->group_impls, link, tmp) {
|
|
|
|
rc = group_impl->net_impl->group_impl_close(group_impl);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("group_impl_close for net(%s) failed\n",
|
|
|
|
group_impl->net_impl->name);
|
|
|
|
}
|
2018-02-16 20:53:47 +00:00
|
|
|
}
|
|
|
|
|
2020-05-09 23:22:08 +00:00
|
|
|
sock_remove_sock_group_from_map_table(*group);
|
2018-02-19 13:38:50 +00:00
|
|
|
free(*group);
|
|
|
|
*group = NULL;
|
|
|
|
|
|
|
|
return 0;
|
2018-02-16 19:04:00 +00:00
|
|
|
}
|
2018-02-16 18:14:32 +00:00
|
|
|
|
2020-01-28 11:54:47 +00:00
|
|
|
static inline struct spdk_net_impl *
|
|
|
|
sock_get_impl_by_name(const char *impl_name)
|
|
|
|
{
|
|
|
|
struct spdk_net_impl *impl;
|
|
|
|
|
|
|
|
assert(impl_name != NULL);
|
|
|
|
STAILQ_FOREACH(impl, &g_net_impls, link) {
|
|
|
|
if (0 == strcmp(impl_name, impl->name)) {
|
|
|
|
return impl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_impl_get_opts(const char *impl_name, struct spdk_sock_impl_opts *opts, size_t *len)
|
|
|
|
{
|
|
|
|
struct spdk_net_impl *impl;
|
|
|
|
|
|
|
|
if (!impl_name || !opts || !len) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl = sock_get_impl_by_name(impl_name);
|
|
|
|
if (!impl) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!impl->get_opts) {
|
|
|
|
errno = ENOTSUP;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return impl->get_opts(opts, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_sock_impl_set_opts(const char *impl_name, const struct spdk_sock_impl_opts *opts, size_t len)
|
|
|
|
{
|
|
|
|
struct spdk_net_impl *impl;
|
|
|
|
|
|
|
|
if (!impl_name || !opts) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl = sock_get_impl_by_name(impl_name);
|
|
|
|
if (!impl) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!impl->set_opts) {
|
|
|
|
errno = ENOTSUP;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return impl->set_opts(opts, len);
|
|
|
|
}
|
|
|
|
|
2020-01-28 22:18:16 +00:00
|
|
|
void
|
|
|
|
spdk_sock_write_config_json(struct spdk_json_write_ctx *w)
|
|
|
|
{
|
|
|
|
struct spdk_net_impl *impl;
|
|
|
|
struct spdk_sock_impl_opts opts;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
assert(w != NULL);
|
|
|
|
|
|
|
|
spdk_json_write_array_begin(w);
|
|
|
|
|
2020-09-21 11:23:46 +00:00
|
|
|
if (g_default_impl) {
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "sock_set_default_impl");
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_string(w, "impl_name", g_default_impl->name);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
|
|
|
|
2020-01-28 22:18:16 +00:00
|
|
|
STAILQ_FOREACH(impl, &g_net_impls, link) {
|
|
|
|
if (!impl->get_opts) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(opts);
|
|
|
|
if (impl->get_opts(&opts, &len) == 0) {
|
|
|
|
spdk_json_write_object_begin(w);
|
|
|
|
spdk_json_write_named_string(w, "method", "sock_impl_set_options");
|
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_string(w, "impl_name", impl->name);
|
|
|
|
spdk_json_write_named_uint32(w, "recv_buf_size", opts.recv_buf_size);
|
|
|
|
spdk_json_write_named_uint32(w, "send_buf_size", opts.send_buf_size);
|
2020-07-14 13:28:30 +00:00
|
|
|
spdk_json_write_named_bool(w, "enable_recv_pipe", opts.enable_recv_pipe);
|
2020-07-14 19:03:45 +00:00
|
|
|
spdk_json_write_named_bool(w, "enable_zerocopy_send", opts.enable_zerocopy_send);
|
2021-01-28 09:19:15 +00:00
|
|
|
spdk_json_write_named_bool(w, "enable_quickack", opts.enable_quickack);
|
|
|
|
spdk_json_write_named_bool(w, "enable_placement_id", opts.enable_placement_id);
|
2020-01-28 22:18:16 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("Failed to get socket options for socket implementation %s\n", impl->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_json_write_array_end(w);
|
|
|
|
}
|
|
|
|
|
2018-02-16 18:14:32 +00:00
|
|
|
void
|
2020-01-07 14:23:16 +00:00
|
|
|
spdk_net_impl_register(struct spdk_net_impl *impl, int priority)
|
2018-02-16 18:14:32 +00:00
|
|
|
{
|
2020-01-07 14:23:16 +00:00
|
|
|
struct spdk_net_impl *cur, *prev;
|
|
|
|
|
|
|
|
impl->priority = priority;
|
|
|
|
prev = NULL;
|
|
|
|
STAILQ_FOREACH(cur, &g_net_impls, link) {
|
|
|
|
if (impl->priority > cur->priority) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
prev = cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prev) {
|
|
|
|
STAILQ_INSERT_AFTER(&g_net_impls, prev, impl, link);
|
2018-02-19 13:38:50 +00:00
|
|
|
} else {
|
|
|
|
STAILQ_INSERT_HEAD(&g_net_impls, impl, link);
|
|
|
|
}
|
2018-02-16 18:14:32 +00:00
|
|
|
}
|
2020-09-21 11:07:52 +00:00
|
|
|
|
|
|
|
int spdk_sock_set_default_impl(const char *impl_name)
|
|
|
|
{
|
|
|
|
struct spdk_net_impl *impl;
|
|
|
|
|
|
|
|
if (!impl_name) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl = sock_get_impl_by_name(impl_name);
|
|
|
|
if (!impl) {
|
|
|
|
errno = EINVAL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (impl == g_default_impl) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_default_impl) {
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(sock, "Change the default sock impl from %s to %s\n", g_default_impl->name,
|
2020-09-21 11:07:52 +00:00
|
|
|
impl->name);
|
|
|
|
} else {
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(sock, "Set default sock implementation to %s\n", impl_name);
|
2020-09-21 11:07:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
g_default_impl = impl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT(sock)
|