net/mlx5: standardize on negative errno values
Set rte_errno systematically as well. Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
parent
925061b58b
commit
a6d83b6a92
@ -108,7 +108,7 @@ mlx5_getenv_int(const char *name)
|
||||
* A pointer to the callback data.
|
||||
*
|
||||
* @return
|
||||
* a pointer to the allocate space.
|
||||
* Allocated buffer, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
static void *
|
||||
mlx5_alloc_verbs_buf(size_t size, void *data)
|
||||
@ -130,6 +130,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
|
||||
}
|
||||
assert(data != NULL);
|
||||
ret = rte_malloc_socket(__func__, size, alignment, socket);
|
||||
if (!ret && size)
|
||||
rte_errno = ENOMEM;
|
||||
DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -365,7 +367,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
|
||||
* User data.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
@ -376,8 +378,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
errno = 0;
|
||||
tmp = strtoul(val, NULL, 0);
|
||||
if (errno) {
|
||||
rte_errno = errno;
|
||||
WARN("%s: \"%s\" is not a valid integer", key, val);
|
||||
return errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
|
||||
config->cqe_comp = !!tmp;
|
||||
@ -397,7 +400,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
config->rx_vec_en = !!tmp;
|
||||
} else {
|
||||
WARN("%s: unknown parameter", key);
|
||||
return -EINVAL;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -411,7 +415,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
|
||||
* Device arguments structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
|
||||
@ -442,9 +446,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
|
||||
if (rte_kvargs_count(kvlist, params[i])) {
|
||||
ret = rte_kvargs_process(kvlist, params[i],
|
||||
mlx5_args_check, config);
|
||||
if (ret != 0) {
|
||||
if (ret) {
|
||||
rte_errno = EINVAL;
|
||||
rte_kvargs_free(kvlist);
|
||||
return ret;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -470,7 +475,7 @@ static void *uar_base;
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_uar_init_primary(struct rte_eth_dev *dev)
|
||||
@ -479,7 +484,6 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
|
||||
void *addr = (void *)0;
|
||||
int i;
|
||||
const struct rte_mem_config *mcfg;
|
||||
int ret;
|
||||
|
||||
if (uar_base) { /* UAR address space mapped. */
|
||||
priv->uar_base = uar_base;
|
||||
@ -501,8 +505,8 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
|
||||
if (addr == MAP_FAILED) {
|
||||
ERROR("Failed to reserve UAR address space, please adjust "
|
||||
"MLX5_UAR_SIZE or try --base-virtaddr");
|
||||
ret = ENOMEM;
|
||||
return ret;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
/* Accept either same addr or a new addr returned from mmap if target
|
||||
* range occupied.
|
||||
@ -521,14 +525,13 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_uar_init_secondary(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
void *addr;
|
||||
int ret;
|
||||
|
||||
assert(priv->uar_base);
|
||||
if (uar_base) { /* already reserved. */
|
||||
@ -541,15 +544,15 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
|
||||
if (addr == MAP_FAILED) {
|
||||
ERROR("UAR mmap failed: %p size: %llu",
|
||||
priv->uar_base, MLX5_UAR_SIZE);
|
||||
ret = ENXIO;
|
||||
return ret;
|
||||
rte_errno = ENXIO;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (priv->uar_base != addr) {
|
||||
ERROR("UAR address %p size %llu occupied, please adjust "
|
||||
"MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
|
||||
priv->uar_base, MLX5_UAR_SIZE);
|
||||
ret = ENXIO;
|
||||
return ret;
|
||||
rte_errno = ENXIO;
|
||||
return -rte_errno;
|
||||
}
|
||||
uar_base = addr; /* process local, don't reserve again */
|
||||
INFO("Reserved UAR address space: %p", addr);
|
||||
@ -568,13 +571,13 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
|
||||
* PCI device information.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
struct rte_pci_device *pci_dev)
|
||||
{
|
||||
struct ibv_device **list;
|
||||
struct ibv_device **list = NULL;
|
||||
struct ibv_device *ibv_dev;
|
||||
int err = 0;
|
||||
struct ibv_context *attr_ctx = NULL;
|
||||
@ -594,7 +597,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
idx = mlx5_dev_idx(&pci_dev->addr);
|
||||
if (idx == -1) {
|
||||
ERROR("this driver cannot support any more adapters");
|
||||
return -ENOMEM;
|
||||
err = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
DEBUG("using driver device index %d", idx);
|
||||
/* Save PCI address. */
|
||||
@ -602,9 +606,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
list = mlx5_glue->get_device_list(&i);
|
||||
if (list == NULL) {
|
||||
assert(errno);
|
||||
err = errno;
|
||||
if (errno == ENOSYS)
|
||||
ERROR("cannot list devices, is ib_uverbs loaded?");
|
||||
return -errno;
|
||||
goto error;
|
||||
}
|
||||
assert(i >= 0);
|
||||
/*
|
||||
@ -626,7 +631,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
INFO("PCI information matches, using device \"%s\"",
|
||||
list[i]->name);
|
||||
attr_ctx = mlx5_glue->open_device(list[i]);
|
||||
err = errno;
|
||||
rte_errno = errno;
|
||||
err = rte_errno;
|
||||
break;
|
||||
}
|
||||
if (attr_ctx == NULL) {
|
||||
@ -634,13 +640,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
switch (err) {
|
||||
case 0:
|
||||
ERROR("cannot access device, is mlx5_ib loaded?");
|
||||
return -ENODEV;
|
||||
err = ENODEV;
|
||||
goto error;
|
||||
case EINVAL:
|
||||
ERROR("cannot use device, are drivers up to date?");
|
||||
return -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
assert(err > 0);
|
||||
return -err;
|
||||
}
|
||||
ibv_dev = list[i];
|
||||
DEBUG("device opened");
|
||||
@ -680,8 +685,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
#else
|
||||
WARN("Tunnel offloading disabled due to old OFED/rdma-core version");
|
||||
#endif
|
||||
if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
|
||||
if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) {
|
||||
err = errno;
|
||||
goto error;
|
||||
}
|
||||
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
|
||||
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
|
||||
char name[RTE_ETH_NAME_MAX_LEN];
|
||||
@ -718,22 +725,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
eth_dev = rte_eth_dev_attach_secondary(name);
|
||||
if (eth_dev == NULL) {
|
||||
ERROR("can not attach rte ethdev");
|
||||
err = ENOMEM;
|
||||
rte_errno = ENOMEM;
|
||||
err = rte_errno;
|
||||
goto error;
|
||||
}
|
||||
eth_dev->device = &pci_dev->device;
|
||||
eth_dev->dev_ops = &mlx5_dev_sec_ops;
|
||||
err = mlx5_uar_init_secondary(eth_dev);
|
||||
if (err < 0) {
|
||||
err = -err;
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
/* Receive command fd from primary process */
|
||||
err = mlx5_socket_connect(eth_dev);
|
||||
if (err < 0) {
|
||||
err = -err;
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
/* Remap UAR for Tx queues. */
|
||||
err = mlx5_tx_uar_remap(eth_dev, err);
|
||||
if (err)
|
||||
@ -804,6 +808,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
}
|
||||
if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
|
||||
ERROR("ibv_query_device_ex() failed");
|
||||
err = errno;
|
||||
goto port_error;
|
||||
}
|
||||
config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
|
||||
@ -899,7 +904,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
}
|
||||
#endif
|
||||
/* Get actual MTU if possible. */
|
||||
mlx5_get_mtu(eth_dev, &priv->mtu);
|
||||
err = mlx5_get_mtu(eth_dev, &priv->mtu);
|
||||
if (err)
|
||||
goto port_error;
|
||||
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
|
||||
/*
|
||||
* Initialize burst functions to prevent crashes before link-up.
|
||||
@ -943,16 +950,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
*/
|
||||
/* no port found, complain */
|
||||
if (!mlx5_dev[idx].ports) {
|
||||
err = ENODEV;
|
||||
goto error;
|
||||
rte_errno = ENODEV;
|
||||
err = rte_errno;
|
||||
}
|
||||
error:
|
||||
if (attr_ctx)
|
||||
claim_zero(mlx5_glue->close_device(attr_ctx));
|
||||
if (list)
|
||||
mlx5_glue->free_device_list(list);
|
||||
assert(err >= 0);
|
||||
return -err;
|
||||
if (err) {
|
||||
rte_errno = err;
|
||||
return -rte_errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct rte_pci_id mlx5_pci_id_map[] = {
|
||||
|
@ -101,7 +101,7 @@ struct ethtool_link_settings {
|
||||
* Interface name output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
|
||||
@ -117,8 +117,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
|
||||
MKSTR(path, "%s/device/net", priv->ibdev_path);
|
||||
|
||||
dir = opendir(path);
|
||||
if (dir == NULL)
|
||||
return -1;
|
||||
if (dir == NULL) {
|
||||
rte_errno = errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
while ((dent = readdir(dir)) != NULL) {
|
||||
char *name = dent->d_name;
|
||||
@ -168,8 +170,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
|
||||
snprintf(match, sizeof(match), "%s", name);
|
||||
}
|
||||
closedir(dir);
|
||||
if (match[0] == '\0')
|
||||
return -1;
|
||||
if (match[0] == '\0') {
|
||||
rte_errno = ENOENT;
|
||||
return -rte_errno;
|
||||
}
|
||||
strncpy(*ifname, match, sizeof(*ifname));
|
||||
return 0;
|
||||
}
|
||||
@ -185,20 +189,31 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
|
||||
* Interface request structure output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
|
||||
{
|
||||
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
|
||||
int ret = -1;
|
||||
int ret = 0;
|
||||
|
||||
if (sock == -1)
|
||||
return ret;
|
||||
if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0)
|
||||
ret = ioctl(sock, req, ifr);
|
||||
if (sock == -1) {
|
||||
rte_errno = errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
ret = mlx5_get_ifname(dev, &ifr->ifr_name);
|
||||
if (ret)
|
||||
goto error;
|
||||
ret = ioctl(sock, req, ifr);
|
||||
if (ret == -1) {
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
close(sock);
|
||||
return ret;
|
||||
return 0;
|
||||
error:
|
||||
close(sock);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -210,7 +225,7 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
|
||||
* MTU value output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
|
||||
@ -233,7 +248,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
|
||||
* MTU value to set.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
@ -254,7 +269,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
* Bitmask for flags to modify.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
|
||||
@ -276,7 +291,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
@ -295,31 +310,36 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
(mlx5_get_rx_port_offloads() |
|
||||
mlx5_get_rx_queue_offloads(dev));
|
||||
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
|
||||
int ret = 0;
|
||||
|
||||
if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
|
||||
ERROR("Some Tx offloads are not supported "
|
||||
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
tx_offloads, supp_tx_offloads);
|
||||
return ENOTSUP;
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
|
||||
ERROR("Some Rx offloads are not supported "
|
||||
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
rx_offloads, supp_rx_offloads);
|
||||
return ENOTSUP;
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (use_app_rss_key &&
|
||||
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
|
||||
rss_hash_default_key_len)) {
|
||||
/* MLX5 RSS only support 40bytes key. */
|
||||
return EINVAL;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
priv->rss_conf.rss_key =
|
||||
rte_realloc(priv->rss_conf.rss_key,
|
||||
rss_hash_default_key_len, 0);
|
||||
if (!priv->rss_conf.rss_key) {
|
||||
ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
|
||||
return ENOMEM;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
memcpy(priv->rss_conf.rss_key,
|
||||
use_app_rss_key ?
|
||||
@ -337,7 +357,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
}
|
||||
if (rxqs_n > priv->config.ind_table_max_size) {
|
||||
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
|
||||
return EINVAL;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rxqs_n == priv->rxqs_n)
|
||||
return 0;
|
||||
@ -350,8 +371,9 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
|
||||
priv->config.ind_table_max_size :
|
||||
rxqs_n));
|
||||
if (mlx5_rss_reta_index_resize(dev, reta_idx_n))
|
||||
return ENOMEM;
|
||||
ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* When the number of RX queues is not a power of two, the remaining
|
||||
* table entries are padded with reused WQs and hashes are not spread
|
||||
* uniformly. */
|
||||
@ -361,7 +383,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
|
||||
j = 0;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -452,7 +473,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on error.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
|
||||
@ -464,19 +485,22 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
|
||||
struct ifreq ifr;
|
||||
struct rte_eth_link dev_link;
|
||||
int link_speed = 0;
|
||||
int ret;
|
||||
|
||||
if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) {
|
||||
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
|
||||
return -1;
|
||||
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
|
||||
if (ret) {
|
||||
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
memset(&dev_link, 0, sizeof(dev_link));
|
||||
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
|
||||
(ifr.ifr_flags & IFF_RUNNING));
|
||||
ifr.ifr_data = (void *)&edata;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
link_speed = ethtool_cmd_speed(&edata);
|
||||
if (link_speed == -1)
|
||||
@ -506,7 +530,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
/* Link status is still the same. */
|
||||
return -1;
|
||||
rte_errno = EAGAIN;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -516,7 +541,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on error.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
@ -526,19 +551,22 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
struct ifreq ifr;
|
||||
struct rte_eth_link dev_link;
|
||||
uint64_t sc;
|
||||
int ret;
|
||||
|
||||
if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) {
|
||||
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
|
||||
return -1;
|
||||
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
|
||||
if (ret) {
|
||||
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
memset(&dev_link, 0, sizeof(dev_link));
|
||||
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
|
||||
(ifr.ifr_flags & IFF_RUNNING));
|
||||
ifr.ifr_data = (void *)&gcmd;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
|
||||
|
||||
@ -549,10 +577,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
|
||||
*ecmd = gcmd;
|
||||
ifr.ifr_data = (void *)ecmd;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
|
||||
strerror(errno));
|
||||
return -1;
|
||||
strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
dev_link.link_speed = ecmd->speed;
|
||||
sc = ecmd->link_mode_masks[0] |
|
||||
@ -602,7 +631,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
/* Link status is still the same. */
|
||||
return -1;
|
||||
rte_errno = EAGAIN;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -615,18 +645,21 @@ static void
|
||||
mlx5_link_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
dev->tx_pkt_burst = mlx5_select_tx_function(dev);
|
||||
dev->rx_pkt_burst = mlx5_select_rx_function(dev);
|
||||
err = mlx5_traffic_enable(dev);
|
||||
if (err)
|
||||
ret = mlx5_traffic_enable(dev);
|
||||
if (ret) {
|
||||
ERROR("%p: error occurred while configuring control flows: %s",
|
||||
(void *)dev, strerror(err));
|
||||
err = mlx5_flow_start(dev, &priv->flows);
|
||||
if (err)
|
||||
(void *)dev, strerror(rte_errno));
|
||||
return;
|
||||
}
|
||||
ret = mlx5_flow_start(dev, &priv->flows);
|
||||
if (ret) {
|
||||
ERROR("%p: error occurred while configuring flows: %s",
|
||||
(void *)dev, strerror(err));
|
||||
(void *)dev, strerror(rte_errno));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -656,7 +689,7 @@ mlx5_link_stop(struct rte_eth_dev *dev)
|
||||
* Link desired status.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
|
||||
@ -670,7 +703,8 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
|
||||
try++;
|
||||
sleep(1);
|
||||
}
|
||||
return -EAGAIN;
|
||||
rte_errno = EAGAIN;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -682,7 +716,7 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
|
||||
* Wait for request completion (ignored).
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on error.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
|
||||
@ -699,10 +733,12 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
|
||||
ret = mlx5_link_update_unlocked_gset(dev);
|
||||
else
|
||||
ret = mlx5_link_update_unlocked_gs(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* If lsc interrupt is disabled, should always be ready for traffic. */
|
||||
if (!dev->data->dev_conf.intr_conf.lsc) {
|
||||
mlx5_link_start(dev);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
/* Re-select burst callbacks only if link status has been changed. */
|
||||
if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
|
||||
@ -711,7 +747,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
|
||||
else
|
||||
mlx5_link_stop(dev);
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -723,36 +759,32 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
|
||||
* New MTU.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
uint16_t kern_mtu;
|
||||
int ret = 0;
|
||||
uint16_t kern_mtu = 0;
|
||||
int ret;
|
||||
|
||||
ret = mlx5_get_mtu(dev, &kern_mtu);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
/* Set kernel interface MTU first. */
|
||||
ret = mlx5_set_mtu(dev, mtu);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
ret = mlx5_get_mtu(dev, &kern_mtu);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
if (kern_mtu == mtu) {
|
||||
priv->mtu = mtu;
|
||||
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
ret = errno;
|
||||
WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
|
||||
strerror(ret));
|
||||
assert(ret >= 0);
|
||||
return -ret;
|
||||
rte_errno = EAGAIN;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -764,7 +796,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
* Flow control output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
@ -776,11 +808,11 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
int ret;
|
||||
|
||||
ifr.ifr_data = (void *)ðpause;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
|
||||
ret = errno;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s",
|
||||
strerror(ret));
|
||||
goto out;
|
||||
strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
fc_conf->autoneg = ethpause.autoneg;
|
||||
if (ethpause.rx_pause && ethpause.tx_pause)
|
||||
@ -791,10 +823,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
fc_conf->mode = RTE_FC_TX_PAUSE;
|
||||
else
|
||||
fc_conf->mode = RTE_FC_NONE;
|
||||
ret = 0;
|
||||
out:
|
||||
assert(ret >= 0);
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -806,7 +835,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
* Flow control parameters.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
@ -830,17 +859,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
ethpause.tx_pause = 1;
|
||||
else
|
||||
ethpause.tx_pause = 0;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
|
||||
ret = errno;
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
|
||||
" failed: %s",
|
||||
strerror(ret));
|
||||
goto out;
|
||||
strerror(rte_errno));
|
||||
return ret;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
assert(ret >= 0);
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -852,7 +878,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
* PCI bus address output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
|
||||
@ -863,8 +889,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
|
||||
MKSTR(path, "%s/device/uevent", device->ibdev_path);
|
||||
|
||||
file = fopen(path, "rb");
|
||||
if (file == NULL)
|
||||
return -1;
|
||||
if (file == NULL) {
|
||||
rte_errno = errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
while (fgets(line, sizeof(line), file) == line) {
|
||||
size_t len = strlen(line);
|
||||
int ret;
|
||||
@ -900,15 +928,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* Zero if the callback process can be called immediately.
|
||||
* Zero if the callback process can be called immediately, negative errno
|
||||
* value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_link_status_update(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rte_eth_link *link = &dev->data->dev_link;
|
||||
int ret;
|
||||
|
||||
mlx5_link_update(dev, 0);
|
||||
ret = mlx5_link_update(dev, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (((link->link_speed == 0) && link->link_status) ||
|
||||
((link->link_speed != 0) && !link->link_status)) {
|
||||
/*
|
||||
@ -1062,12 +1094,13 @@ void
|
||||
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int rc, flags;
|
||||
int ret;
|
||||
int flags;
|
||||
|
||||
assert(priv->ctx->async_fd > 0);
|
||||
flags = fcntl(priv->ctx->async_fd, F_GETFL);
|
||||
rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
|
||||
if (rc < 0) {
|
||||
ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
|
||||
if (ret) {
|
||||
INFO("failed to change file descriptor async event queue");
|
||||
dev->data->dev_conf.intr_conf.lsc = 0;
|
||||
dev->data->dev_conf.intr_conf.rmv = 0;
|
||||
@ -1079,8 +1112,10 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
|
||||
rte_intr_callback_register(&priv->intr_handle,
|
||||
mlx5_dev_interrupt_handler, dev);
|
||||
}
|
||||
rc = mlx5_socket_init(dev);
|
||||
if (!rc && priv->primary_socket) {
|
||||
ret = mlx5_socket_init(dev);
|
||||
if (ret)
|
||||
ERROR("cannot initialise socket: %s", strerror(rte_errno));
|
||||
else if (priv->primary_socket) {
|
||||
priv->intr_handle_socket.fd = priv->primary_socket;
|
||||
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
|
||||
rte_intr_callback_register(&priv->intr_handle_socket,
|
||||
@ -1095,7 +1130,7 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_set_link_down(struct rte_eth_dev *dev)
|
||||
@ -1110,7 +1145,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_set_link_up(struct rte_eth_dev *dev)
|
||||
|
@ -247,7 +247,8 @@ struct mlx5_flow_items {
|
||||
* Internal structure to store the conversion.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative value otherwise.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is
|
||||
* set.
|
||||
*/
|
||||
int (*convert)(const struct rte_flow_item *item,
|
||||
const void *default_mask,
|
||||
@ -460,45 +461,52 @@ struct ibv_spec_header {
|
||||
* Bit-Mask size in bytes.
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_item_validate(const struct rte_flow_item *item,
|
||||
const uint8_t *mask, unsigned int size)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!item->spec && (item->mask || item->last))
|
||||
return -1;
|
||||
if (!item->spec && (item->mask || item->last)) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (item->spec && !item->mask) {
|
||||
unsigned int i;
|
||||
const uint8_t *spec = item->spec;
|
||||
|
||||
for (i = 0; i < size; ++i)
|
||||
if ((spec[i] | mask[i]) != mask[i])
|
||||
return -1;
|
||||
if ((spec[i] | mask[i]) != mask[i]) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
if (item->last && !item->mask) {
|
||||
unsigned int i;
|
||||
const uint8_t *spec = item->last;
|
||||
|
||||
for (i = 0; i < size; ++i)
|
||||
if ((spec[i] | mask[i]) != mask[i])
|
||||
return -1;
|
||||
if ((spec[i] | mask[i]) != mask[i]) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
if (item->mask) {
|
||||
unsigned int i;
|
||||
const uint8_t *spec = item->spec;
|
||||
|
||||
for (i = 0; i < size; ++i)
|
||||
if ((spec[i] | mask[i]) != mask[i])
|
||||
return -1;
|
||||
if ((spec[i] | mask[i]) != mask[i]) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
if (item->spec && item->last) {
|
||||
uint8_t spec[size];
|
||||
uint8_t last[size];
|
||||
const uint8_t *apply = mask;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (item->mask)
|
||||
apply = item->mask;
|
||||
@ -507,8 +515,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
|
||||
last[i] = ((const uint8_t *)item->last)[i] & apply[i];
|
||||
}
|
||||
ret = memcmp(spec, last, size);
|
||||
if (ret != 0) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -521,7 +533,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
|
||||
* User RSS configuration to save.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
|
||||
@ -533,10 +545,14 @@ mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
|
||||
* device default RSS configuration.
|
||||
*/
|
||||
if (rss_conf) {
|
||||
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
|
||||
return EINVAL;
|
||||
if (rss_conf->rss_key_len != 40)
|
||||
return EINVAL;
|
||||
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rss_conf->rss_key_len != 40) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rss_conf->rss_key_len && rss_conf->rss_key) {
|
||||
parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
|
||||
memcpy(parser->rss_key, rss_conf->rss_key,
|
||||
@ -616,14 +632,17 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_parse *parser)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Add default RSS configuration necessary for Verbs to create QP even
|
||||
* if no RSS is necessary.
|
||||
*/
|
||||
mlx5_flow_convert_rss_conf(parser,
|
||||
(const struct rte_eth_rss_conf *)
|
||||
&priv->rss_conf);
|
||||
ret = mlx5_flow_convert_rss_conf(parser,
|
||||
(const struct rte_eth_rss_conf *)
|
||||
&priv->rss_conf);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
|
||||
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
|
||||
continue;
|
||||
@ -772,6 +791,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
|
||||
{
|
||||
const struct mlx5_flow_items *cur_item = mlx5_flow_items;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
/* Initialise the offsets to start after verbs attribute. */
|
||||
for (i = 0; i != hash_rxq_init_n; ++i)
|
||||
@ -779,7 +799,6 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
|
||||
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
|
||||
const struct mlx5_flow_items *token = NULL;
|
||||
unsigned int n;
|
||||
int err;
|
||||
|
||||
if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
|
||||
continue;
|
||||
@ -795,10 +814,10 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
|
||||
if (!token)
|
||||
goto exit_item_not_supported;
|
||||
cur_item = token;
|
||||
err = mlx5_flow_item_validate(items,
|
||||
ret = mlx5_flow_item_validate(items,
|
||||
(const uint8_t *)cur_item->mask,
|
||||
cur_item->mask_sz);
|
||||
if (err)
|
||||
if (ret)
|
||||
goto exit_item_not_supported;
|
||||
if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
|
||||
if (parser->inner) {
|
||||
@ -835,9 +854,8 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
|
||||
}
|
||||
return 0;
|
||||
exit_item_not_supported:
|
||||
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
items, "item not supported");
|
||||
return -rte_errno;
|
||||
return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
items, "item not supported");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -851,7 +869,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
|
||||
* Perform verbose error reporting if not NULL.
|
||||
*
|
||||
* @return
|
||||
* A verbs flow attribute on success, NULL otherwise.
|
||||
* A verbs flow attribute on success, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
static struct ibv_flow_attr *
|
||||
mlx5_flow_convert_allocate(unsigned int priority,
|
||||
@ -1055,7 +1073,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
|
||||
parser->queue[HASH_RXQ_ETH].ibv_attr =
|
||||
mlx5_flow_convert_allocate(priority, offset, error);
|
||||
if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
|
||||
return ENOMEM;
|
||||
goto exit_enomem;
|
||||
parser->queue[HASH_RXQ_ETH].offset =
|
||||
sizeof(struct ibv_flow_attr);
|
||||
} else {
|
||||
@ -1090,7 +1108,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
|
||||
cur_item->mask),
|
||||
parser);
|
||||
if (ret) {
|
||||
rte_flow_error_set(error, ret,
|
||||
rte_flow_error_set(error, rte_errno,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
items, "item not supported");
|
||||
goto exit_free;
|
||||
@ -1132,13 +1150,13 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
|
||||
parser->queue[i].ibv_attr = NULL;
|
||||
}
|
||||
}
|
||||
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, "cannot allocate verbs spec attributes.");
|
||||
return ret;
|
||||
return -rte_errno;
|
||||
exit_count_error:
|
||||
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL, "cannot create counter.");
|
||||
return rte_errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1184,6 +1202,9 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_eth(const struct rte_flow_item *item,
|
||||
@ -1233,6 +1254,9 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_vlan(const struct rte_flow_item *item,
|
||||
@ -1273,6 +1297,9 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_ipv4(const struct rte_flow_item *item,
|
||||
@ -1325,6 +1352,9 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_ipv6(const struct rte_flow_item *item,
|
||||
@ -1397,6 +1427,9 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_udp(const struct rte_flow_item *item,
|
||||
@ -1443,6 +1476,9 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_tcp(const struct rte_flow_item *item,
|
||||
@ -1489,6 +1525,9 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
|
||||
* Default bit-masks to use when item->mask is not provided.
|
||||
* @param data[in, out]
|
||||
* User structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_vxlan(const struct rte_flow_item *item,
|
||||
@ -1528,8 +1567,10 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
|
||||
* before will also match this rule.
|
||||
* To avoid such situation, VNI 0 is currently refused.
|
||||
*/
|
||||
if (!vxlan.val.tunnel_id)
|
||||
return EINVAL;
|
||||
if (!vxlan.val.tunnel_id) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
mlx5_flow_create_copy(parser, &vxlan, size);
|
||||
return 0;
|
||||
}
|
||||
@ -1541,6 +1582,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
|
||||
* Internal parser structure.
|
||||
* @param mark_id
|
||||
* Mark identifier.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
|
||||
@ -1566,7 +1610,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
|
||||
* Pointer to MLX5 flow parser structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
|
||||
@ -1584,8 +1628,10 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
|
||||
|
||||
init_attr.counter_set_id = 0;
|
||||
parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
|
||||
if (!parser->cs)
|
||||
return EINVAL;
|
||||
if (!parser->cs) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
counter.counter_set_handle = parser->cs->handle;
|
||||
mlx5_flow_create_copy(parser, &counter, size);
|
||||
#endif
|
||||
@ -1605,7 +1651,7 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
|
||||
* Perform verbose error reporting if not NULL.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
|
||||
@ -1616,7 +1662,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct ibv_flow_spec_action_drop *drop;
|
||||
unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
|
||||
int err = 0;
|
||||
|
||||
assert(priv->pd);
|
||||
assert(priv->ctx);
|
||||
@ -1642,7 +1687,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
|
||||
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
|
||||
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "flow rule creation failure");
|
||||
err = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
return 0;
|
||||
@ -1662,7 +1706,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
|
||||
flow->cs = NULL;
|
||||
parser->cs = NULL;
|
||||
}
|
||||
return err;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1678,7 +1722,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
|
||||
* Perform verbose error reporting if not NULL.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a errno value otherwise and rte_errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
|
||||
@ -1716,10 +1760,10 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
|
||||
parser->queues,
|
||||
parser->queues_n);
|
||||
if (!flow->frxq[i].hrxq) {
|
||||
rte_flow_error_set(error, ENOMEM,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "cannot create hash rxq");
|
||||
return ENOMEM;
|
||||
return rte_flow_error_set(error, ENOMEM,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL,
|
||||
"cannot create hash rxq");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -1738,7 +1782,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
|
||||
* Perform verbose error reporting if not NULL.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a errno value otherwise and rte_errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
@ -1747,15 +1791,15 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int err = 0;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
unsigned int flows_n = 0;
|
||||
|
||||
assert(priv->pd);
|
||||
assert(priv->ctx);
|
||||
assert(!parser->drop);
|
||||
err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
|
||||
if (err)
|
||||
ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
|
||||
if (ret)
|
||||
goto error;
|
||||
if (parser->count)
|
||||
flow->cs = parser->cs;
|
||||
@ -1771,7 +1815,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
rte_flow_error_set(error, ENOMEM,
|
||||
RTE_FLOW_ERROR_TYPE_HANDLE,
|
||||
NULL, "flow rule creation failure");
|
||||
err = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
++flows_n;
|
||||
@ -1793,6 +1836,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
}
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
assert(flow);
|
||||
for (i = 0; i != hash_rxq_init_n; ++i) {
|
||||
if (flow->frxq[i].ibv_flow) {
|
||||
@ -1810,7 +1854,8 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
flow->cs = NULL;
|
||||
parser->cs = NULL;
|
||||
}
|
||||
return err;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1830,7 +1875,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
* Perform verbose error reporting if not NULL.
|
||||
*
|
||||
* @return
|
||||
* A flow on success, NULL otherwise.
|
||||
* A flow on success, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
static struct rte_flow *
|
||||
mlx5_flow_list_create(struct rte_eth_dev *dev,
|
||||
@ -1843,10 +1888,10 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
|
||||
struct mlx5_flow_parse parser = { .create = 1, };
|
||||
struct rte_flow *flow = NULL;
|
||||
unsigned int i;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
err = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
|
||||
if (err)
|
||||
ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
|
||||
if (ret)
|
||||
goto exit;
|
||||
flow = rte_calloc(__func__, 1,
|
||||
sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
|
||||
@ -1869,11 +1914,11 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
|
||||
memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
|
||||
/* finalise the flow. */
|
||||
if (parser.drop)
|
||||
err = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
|
||||
ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
|
||||
error);
|
||||
else
|
||||
err = mlx5_flow_create_action_queue(dev, &parser, flow, error);
|
||||
if (err)
|
||||
ret = mlx5_flow_create_action_queue(dev, &parser, flow, error);
|
||||
if (ret)
|
||||
goto exit;
|
||||
TAILQ_INSERT_TAIL(list, flow, next);
|
||||
DEBUG("Flow created %p", (void *)flow);
|
||||
@ -1901,11 +1946,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
|
||||
const struct rte_flow_action actions[],
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
int ret;
|
||||
struct mlx5_flow_parse parser = { .create = 0, };
|
||||
|
||||
ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
|
||||
return ret;
|
||||
return mlx5_flow_convert(dev, attr, items, actions, error, &parser);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2029,7 +2072,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
@ -2042,11 +2085,13 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
|
||||
if (!fdq) {
|
||||
WARN("cannot allocate memory for drop queue");
|
||||
goto error;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
|
||||
if (!fdq->cq) {
|
||||
WARN("cannot allocate CQ for drop queue");
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
fdq->wq = mlx5_glue->create_wq
|
||||
@ -2060,6 +2105,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
});
|
||||
if (!fdq->wq) {
|
||||
WARN("cannot allocate WQ for drop queue");
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
fdq->ind_table = mlx5_glue->create_rwq_ind_table
|
||||
@ -2071,6 +2117,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
});
|
||||
if (!fdq->ind_table) {
|
||||
WARN("cannot allocate indirection table for drop queue");
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
fdq->qp = mlx5_glue->create_qp_ex
|
||||
@ -2093,6 +2140,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
});
|
||||
if (!fdq->qp) {
|
||||
WARN("cannot allocate QP for drop queue");
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
priv->flow_drop_queue = fdq;
|
||||
@ -2109,7 +2157,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
|
||||
if (fdq)
|
||||
rte_free(fdq);
|
||||
priv->flow_drop_queue = NULL;
|
||||
return -1;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2208,7 +2256,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
* Pointer to a TAILQ flow list.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a errno value otherwise and rte_errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
@ -2228,7 +2276,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
DEBUG("Flow %p cannot be applied",
|
||||
(void *)flow);
|
||||
rte_errno = EINVAL;
|
||||
return rte_errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("Flow %p applied", (void *)flow);
|
||||
/* Next flow. */
|
||||
@ -2255,7 +2303,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
DEBUG("Flow %p cannot be applied",
|
||||
(void *)flow);
|
||||
rte_errno = EINVAL;
|
||||
return rte_errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
flow_create:
|
||||
flow->frxq[i].ibv_flow =
|
||||
@ -2265,7 +2313,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
|
||||
DEBUG("Flow %p cannot be applied",
|
||||
(void *)flow);
|
||||
rte_errno = EINVAL;
|
||||
return rte_errno;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("Flow %p applied", (void *)flow);
|
||||
}
|
||||
@ -2315,7 +2363,7 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
|
||||
* A VLAN flow mask to apply.
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
|
||||
@ -2367,8 +2415,10 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
|
||||
} local;
|
||||
} action_rss;
|
||||
|
||||
if (!priv->reta_idx_n)
|
||||
return EINVAL;
|
||||
if (!priv->reta_idx_n) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
for (i = 0; i != priv->reta_idx_n; ++i)
|
||||
action_rss.local.queue[i] = (*priv->reta_idx)[i];
|
||||
action_rss.local.rss_conf = &priv->rss_conf;
|
||||
@ -2377,7 +2427,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
|
||||
flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
|
||||
actions, &error);
|
||||
if (!flow)
|
||||
return rte_errno;
|
||||
return -rte_errno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2392,7 +2442,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
|
||||
* An Ethernet flow mask to apply.
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_ctrl_flow(struct rte_eth_dev *dev,
|
||||
@ -2445,7 +2495,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
|
||||
* returned data from the counter.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a errno value otherwise and rte_errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_flow_query_count(struct ibv_counter_set *cs,
|
||||
@ -2462,15 +2512,13 @@ mlx5_flow_query_count(struct ibv_counter_set *cs,
|
||||
.out = counters,
|
||||
.outlen = 2 * sizeof(uint64_t),
|
||||
};
|
||||
int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
|
||||
int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
|
||||
|
||||
if (res) {
|
||||
rte_flow_error_set(error, -res,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL,
|
||||
"cannot read counter");
|
||||
return -res;
|
||||
}
|
||||
if (err)
|
||||
return rte_flow_error_set(error, err,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL,
|
||||
"cannot read counter");
|
||||
query_count->hits_set = 1;
|
||||
query_count->bytes_set = 1;
|
||||
query_count->hits = counters[0] - counter_stats->hits;
|
||||
@ -2495,20 +2543,22 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
|
||||
void *data,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
int res = EINVAL;
|
||||
|
||||
if (flow->cs) {
|
||||
res = mlx5_flow_query_count(flow->cs,
|
||||
&flow->counter_stats,
|
||||
(struct rte_flow_query_count *)data,
|
||||
error);
|
||||
int ret;
|
||||
|
||||
ret = mlx5_flow_query_count(flow->cs,
|
||||
&flow->counter_stats,
|
||||
(struct rte_flow_query_count *)data,
|
||||
error);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
rte_flow_error_set(error, res,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL,
|
||||
"no counter found for flow");
|
||||
return rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
|
||||
NULL,
|
||||
"no counter found for flow");
|
||||
}
|
||||
return -res;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2551,7 +2601,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
|
||||
* Generic flow parameters structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on error.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
@ -2564,7 +2614,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
/* Validate queue number. */
|
||||
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
|
||||
ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
|
||||
return EINVAL;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
attributes->attr.ingress = 1;
|
||||
attributes->items[0] = (struct rte_flow_item) {
|
||||
@ -2586,7 +2637,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
break;
|
||||
default:
|
||||
ERROR("invalid behavior %d", fdir_filter->action.behavior);
|
||||
return ENOTSUP;
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
attributes->queue.index = fdir_filter->action.rx_queue;
|
||||
switch (fdir_filter->input.flow_type) {
|
||||
@ -2720,9 +2772,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
};
|
||||
break;
|
||||
default:
|
||||
ERROR("invalid flow type%d",
|
||||
fdir_filter->input.flow_type);
|
||||
return ENOTSUP;
|
||||
ERROR("invalid flow type%d", fdir_filter->input.flow_type);
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -2736,7 +2788,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
|
||||
* Flow director filter to add.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_fdir_filter_add(struct rte_eth_dev *dev,
|
||||
@ -2760,11 +2812,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
|
||||
|
||||
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
|
||||
if (ret)
|
||||
return -ret;
|
||||
return ret;
|
||||
ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
|
||||
attributes.actions, &error, &parser);
|
||||
if (ret)
|
||||
return -ret;
|
||||
return ret;
|
||||
flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
|
||||
attributes.items, attributes.actions,
|
||||
&error);
|
||||
@ -2772,7 +2824,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
|
||||
DEBUG("FDIR created %p", (void *)flow);
|
||||
return 0;
|
||||
}
|
||||
return ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2784,7 +2836,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
|
||||
* Filter to be deleted.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
|
||||
@ -2805,7 +2857,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
|
||||
|
||||
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
|
||||
if (ret)
|
||||
return -ret;
|
||||
return ret;
|
||||
ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
|
||||
attributes.actions, &error, &parser);
|
||||
if (ret)
|
||||
@ -2863,6 +2915,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
|
||||
/* The flow does not match. */
|
||||
continue;
|
||||
}
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
if (flow)
|
||||
mlx5_flow_list_destroy(dev, &priv->flows, flow);
|
||||
exit:
|
||||
@ -2870,7 +2923,8 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
|
||||
if (parser.queue[i].ibv_attr)
|
||||
rte_free(parser.queue[i].ibv_attr);
|
||||
}
|
||||
return -ret;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2882,7 +2936,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
|
||||
* Filter to be updated.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_fdir_filter_update(struct rte_eth_dev *dev,
|
||||
@ -2893,8 +2947,7 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev,
|
||||
ret = mlx5_fdir_filter_delete(dev, fdir_filter);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = mlx5_fdir_filter_add(dev, fdir_filter);
|
||||
return ret;
|
||||
return mlx5_fdir_filter_add(dev, fdir_filter);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2948,7 +3001,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
|
||||
* Pointer to operation-specific structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
@ -2957,7 +3010,6 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
enum rte_fdir_mode fdir_mode =
|
||||
priv->dev->data->dev_conf.fdir_conf.mode;
|
||||
int ret = 0;
|
||||
|
||||
if (filter_op == RTE_ETH_FILTER_NOP)
|
||||
return 0;
|
||||
@ -2965,18 +3017,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
|
||||
ERROR("%p: flow director mode %d not supported",
|
||||
(void *)dev, fdir_mode);
|
||||
return EINVAL;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
switch (filter_op) {
|
||||
case RTE_ETH_FILTER_ADD:
|
||||
ret = mlx5_fdir_filter_add(dev, arg);
|
||||
break;
|
||||
return mlx5_fdir_filter_add(dev, arg);
|
||||
case RTE_ETH_FILTER_UPDATE:
|
||||
ret = mlx5_fdir_filter_update(dev, arg);
|
||||
break;
|
||||
return mlx5_fdir_filter_update(dev, arg);
|
||||
case RTE_ETH_FILTER_DELETE:
|
||||
ret = mlx5_fdir_filter_delete(dev, arg);
|
||||
break;
|
||||
return mlx5_fdir_filter_delete(dev, arg);
|
||||
case RTE_ETH_FILTER_FLUSH:
|
||||
mlx5_fdir_filter_flush(dev);
|
||||
break;
|
||||
@ -2984,12 +3034,11 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
mlx5_fdir_info_get(dev, arg);
|
||||
break;
|
||||
default:
|
||||
DEBUG("%p: unknown operation %u", (void *)dev,
|
||||
filter_op);
|
||||
ret = EINVAL;
|
||||
break;
|
||||
DEBUG("%p: unknown operation %u", (void *)dev, filter_op);
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3005,7 +3054,7 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
* Pointer to operation-specific structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
|
||||
@ -3013,21 +3062,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
|
||||
enum rte_filter_op filter_op,
|
||||
void *arg)
|
||||
{
|
||||
int ret = EINVAL;
|
||||
|
||||
switch (filter_type) {
|
||||
case RTE_ETH_FILTER_GENERIC:
|
||||
if (filter_op != RTE_ETH_FILTER_GET)
|
||||
return -EINVAL;
|
||||
if (filter_op != RTE_ETH_FILTER_GET) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
*(const void **)arg = &mlx5_flow_ops;
|
||||
return 0;
|
||||
case RTE_ETH_FILTER_FDIR:
|
||||
ret = mlx5_fdir_ctrl_func(dev, filter_op, arg);
|
||||
break;
|
||||
return mlx5_fdir_ctrl_func(dev, filter_op, arg);
|
||||
default:
|
||||
ERROR("%p: filter type (%d) not supported",
|
||||
(void *)dev, filter_type);
|
||||
break;
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,15 +41,17 @@
|
||||
* MAC address output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 on failure and errno is set.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
|
||||
{
|
||||
struct ifreq request;
|
||||
int ret;
|
||||
|
||||
if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request))
|
||||
return -1;
|
||||
ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
|
||||
return 0;
|
||||
}
|
||||
@ -67,8 +69,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
|
||||
{
|
||||
assert(index < MLX5_MAX_MAC_ADDRESSES);
|
||||
memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
|
||||
if (!dev->data->promiscuous)
|
||||
mlx5_traffic_restart(dev);
|
||||
if (!dev->data->promiscuous) {
|
||||
int ret = mlx5_traffic_restart(dev);
|
||||
|
||||
if (ret)
|
||||
ERROR("%p cannot remove mac address: %s", (void *)dev,
|
||||
strerror(rte_errno));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -84,14 +91,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
|
||||
* VMDq pool index to associate address with (ignored).
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
|
||||
uint32_t index, uint32_t vmdq __rte_unused)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
assert(index < MLX5_MAX_MAC_ADDRESSES);
|
||||
/* First, make sure this address isn't already configured. */
|
||||
@ -102,12 +108,13 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
|
||||
if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
|
||||
continue;
|
||||
/* Address already configured elsewhere, return with error. */
|
||||
return EADDRINUSE;
|
||||
rte_errno = EADDRINUSE;
|
||||
return -rte_errno;
|
||||
}
|
||||
dev->data->mac_addrs[index] = *mac;
|
||||
if (!dev->data->promiscuous)
|
||||
mlx5_traffic_restart(dev);
|
||||
return ret;
|
||||
return mlx5_traffic_restart(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -121,6 +128,10 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
|
||||
void
|
||||
mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
DEBUG("%p: setting primary MAC address", (void *)dev);
|
||||
mlx5_mac_addr_add(dev, mac_addr, 0, 0);
|
||||
ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
|
||||
if (ret)
|
||||
ERROR("cannot set mac address: %s", strerror(rte_errno));
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
|
||||
* Index of the next available entry.
|
||||
*
|
||||
* @return
|
||||
* mr on success, NULL on failure.
|
||||
* mr on success, NULL on failure and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_mr *
|
||||
mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
@ -115,6 +115,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
" rte_eth_dev_start()",
|
||||
(void *)mp, mp->name);
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
rte_errno = ENOTSUP;
|
||||
return NULL;
|
||||
}
|
||||
mr = mlx5_mr_new(dev, mp);
|
||||
@ -203,7 +204,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
|
||||
mlx5_mr_release(mr);
|
||||
return;
|
||||
}
|
||||
mlx5_mr_new(priv->dev, mp);
|
||||
mr = mlx5_mr_new(priv->dev, mp);
|
||||
if (!mr)
|
||||
ERROR("cannot create memory region: %s", strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -216,7 +219,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
|
||||
* Pointer to the memory pool to register.
|
||||
*
|
||||
* @return
|
||||
* The memory region on success.
|
||||
* The memory region on success, NULL on failure and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_mr *
|
||||
mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
@ -231,11 +234,13 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
|
||||
if (!mr) {
|
||||
DEBUG("unable to configure MR, ibv_reg_mr() failed.");
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
if (mlx5_check_mempool(mp, &start, &end) != 0) {
|
||||
ERROR("mempool %p: not virtually contiguous",
|
||||
(void *)mp);
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
DEBUG("mempool %p area start=%p end=%p size=%zu",
|
||||
@ -260,6 +265,10 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
|
||||
(size_t)(end - start));
|
||||
mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
|
||||
IBV_ACCESS_LOCAL_WRITE);
|
||||
if (!mr->mr) {
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
mr->mp = mp;
|
||||
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
|
||||
rte_atomic32_inc(&mr->refcnt);
|
||||
|
@ -35,33 +35,31 @@
|
||||
* RSS configuration data.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rss_hash_update(struct rte_eth_dev *dev,
|
||||
struct rte_eth_rss_conf *rss_conf)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rss_conf->rss_key && rss_conf->rss_key_len) {
|
||||
priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
|
||||
rss_conf->rss_key_len, 0);
|
||||
if (!priv->rss_conf.rss_key) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
|
||||
rss_conf->rss_key_len);
|
||||
priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
|
||||
}
|
||||
priv->rss_conf.rss_hf = rss_conf->rss_hf;
|
||||
out:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -73,7 +71,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
|
||||
* RSS configuration data.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
|
||||
@ -81,8 +79,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
|
||||
if (!rss_conf)
|
||||
return -EINVAL;
|
||||
if (!rss_conf) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (rss_conf->rss_key &&
|
||||
(rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
|
||||
memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
|
||||
@ -102,7 +102,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
|
||||
* The size of the array to allocate.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
|
||||
@ -116,8 +116,10 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
|
||||
|
||||
mem = rte_realloc(priv->reta_idx,
|
||||
reta_size * sizeof((*priv->reta_idx)[0]), 0);
|
||||
if (!mem)
|
||||
return ENOMEM;
|
||||
if (!mem) {
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
priv->reta_idx = mem;
|
||||
priv->reta_idx_n = reta_size;
|
||||
if (old_size < reta_size)
|
||||
@ -138,7 +140,7 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
|
||||
* Size of the RETA table.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
|
||||
@ -149,8 +151,10 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
|
||||
unsigned int idx;
|
||||
unsigned int i;
|
||||
|
||||
if (!reta_size || reta_size > priv->reta_idx_n)
|
||||
return -EINVAL;
|
||||
if (!reta_size || reta_size > priv->reta_idx_n) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
/* Fill each entry of the table even if its bit is not set. */
|
||||
for (idx = 0, i = 0; (i != reta_size); ++i) {
|
||||
idx = i / RTE_RETA_GROUP_SIZE;
|
||||
@ -171,7 +175,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
|
||||
* Size of the RETA table.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
|
||||
@ -184,8 +188,10 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
|
||||
unsigned int i;
|
||||
unsigned int pos;
|
||||
|
||||
if (!reta_size)
|
||||
return -EINVAL;
|
||||
if (!reta_size) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
ret = mlx5_rss_reta_index_resize(dev, reta_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -199,7 +205,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
|
||||
}
|
||||
if (dev->data->dev_started) {
|
||||
mlx5_dev_stop(dev);
|
||||
mlx5_dev_start(dev);
|
||||
return mlx5_dev_start(dev);
|
||||
}
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
@ -32,8 +32,13 @@
|
||||
void
|
||||
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev->data->promiscuous = 1;
|
||||
mlx5_traffic_restart(dev);
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("%p cannot enable promiscuous mode: %s", (void *)dev,
|
||||
strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -45,8 +50,13 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
|
||||
void
|
||||
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev->data->promiscuous = 0;
|
||||
mlx5_traffic_restart(dev);
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("%p cannot disable promiscuous mode: %s", (void *)dev,
|
||||
strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -58,8 +68,13 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
|
||||
void
|
||||
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev->data->all_multicast = 1;
|
||||
mlx5_traffic_restart(dev);
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("%p cannot enable allmulicast mode: %s", (void *)dev,
|
||||
strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -71,6 +86,11 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
|
||||
void
|
||||
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev->data->all_multicast = 0;
|
||||
mlx5_traffic_restart(dev);
|
||||
ret = mlx5_traffic_restart(dev);
|
||||
if (ret)
|
||||
ERROR("%p cannot disable allmulicast mode: %s", (void *)dev,
|
||||
strerror(rte_errno));
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
|
||||
* Pointer to RX queue structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
@ -69,7 +69,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
|
||||
unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
int err;
|
||||
|
||||
/* Iterate on segments. */
|
||||
for (i = 0; (i != elts_n); ++i) {
|
||||
@ -78,7 +78,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
|
||||
if (buf == NULL) {
|
||||
ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
|
||||
ret = ENOMEM;
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
/* Headroom is reserved by rte_pktmbuf_alloc(). */
|
||||
@ -120,9 +120,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
}
|
||||
DEBUG("%p: allocated and configured %u segments (max %u packets)",
|
||||
(void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
|
||||
assert(ret == 0);
|
||||
return 0;
|
||||
error:
|
||||
err = rte_errno; /* Save rte_errno before cleanup. */
|
||||
elts_n = i;
|
||||
for (i = 0; (i != elts_n); ++i) {
|
||||
if ((*rxq_ctrl->rxq.elts)[i] != NULL)
|
||||
@ -130,8 +130,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
(*rxq_ctrl->rxq.elts)[i] = NULL;
|
||||
}
|
||||
DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
|
||||
assert(ret > 0);
|
||||
return ret;
|
||||
rte_errno = err; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -271,7 +271,7 @@ mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
|
||||
* Memory pool for buffer allocations.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
@ -282,7 +282,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl =
|
||||
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
|
||||
int ret = 0;
|
||||
|
||||
if (!rte_is_power_of_2(desc)) {
|
||||
desc = 1 << log2above(desc);
|
||||
@ -295,37 +294,37 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
if (idx >= priv->rxqs_n) {
|
||||
ERROR("%p: queue index out of range (%u >= %u)",
|
||||
(void *)dev, idx, priv->rxqs_n);
|
||||
return -EOVERFLOW;
|
||||
rte_errno = EOVERFLOW;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) {
|
||||
ret = ENOTSUP;
|
||||
ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
|
||||
"offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
|
||||
(void *)dev, conf->offloads,
|
||||
dev->data->dev_conf.rxmode.offloads,
|
||||
(mlx5_get_rx_port_offloads() |
|
||||
mlx5_get_rx_queue_offloads(dev)));
|
||||
goto out;
|
||||
rte_errno = ENOTSUP;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!mlx5_rxq_releasable(dev, idx)) {
|
||||
ret = EBUSY;
|
||||
ERROR("%p: unable to release queue index %u",
|
||||
(void *)dev, idx);
|
||||
goto out;
|
||||
rte_errno = EBUSY;
|
||||
return -rte_errno;
|
||||
}
|
||||
mlx5_rxq_release(dev, idx);
|
||||
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
|
||||
if (!rxq_ctrl) {
|
||||
ERROR("%p: unable to allocate queue index %u",
|
||||
(void *)dev, idx);
|
||||
ret = ENOMEM;
|
||||
goto out;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("%p: adding RX queue %p to list",
|
||||
(void *)dev, (void *)rxq_ctrl);
|
||||
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
|
||||
out:
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -358,7 +357,7 @@ mlx5_rx_queue_release(void *dpdk_rxq)
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
@ -377,7 +376,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
if (intr_handle->intr_vec == NULL) {
|
||||
ERROR("failed to allocate memory for interrupt vector,"
|
||||
" Rx interrupts will not be supported");
|
||||
return -ENOMEM;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
intr_handle->type = RTE_INTR_HANDLE_EXT;
|
||||
for (i = 0; i != n; ++i) {
|
||||
@ -400,16 +400,18 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
" (%d), Rx interrupts cannot be enabled",
|
||||
RTE_MAX_RXTX_INTR_VEC_ID);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
return -1;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
fd = rxq_ibv->channel->fd;
|
||||
flags = fcntl(fd, F_GETFL);
|
||||
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
||||
if (rc < 0) {
|
||||
rte_errno = errno;
|
||||
ERROR("failed to make Rx interrupt file descriptor"
|
||||
" %d non-blocking for queue index %d", fd, i);
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
return -1;
|
||||
return -rte_errno;
|
||||
}
|
||||
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
|
||||
intr_handle->efds[count] = fd;
|
||||
@ -497,7 +499,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
|
||||
* Rx queue number.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
@ -505,12 +507,11 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct mlx5_rxq_data *rxq_data;
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl;
|
||||
int ret = 0;
|
||||
|
||||
rxq_data = (*priv->rxqs)[rx_queue_id];
|
||||
if (!rxq_data) {
|
||||
ret = EINVAL;
|
||||
goto exit;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
|
||||
if (rxq_ctrl->irq) {
|
||||
@ -518,16 +519,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
|
||||
rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
|
||||
if (!rxq_ibv) {
|
||||
ret = EINVAL;
|
||||
goto exit;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
|
||||
mlx5_rxq_ibv_release(rxq_ibv);
|
||||
}
|
||||
exit:
|
||||
if (ret)
|
||||
WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -539,7 +537,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
* Rx queue number.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
@ -550,35 +548,36 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
struct mlx5_rxq_ibv *rxq_ibv = NULL;
|
||||
struct ibv_cq *ev_cq;
|
||||
void *ev_ctx;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
rxq_data = (*priv->rxqs)[rx_queue_id];
|
||||
if (!rxq_data) {
|
||||
ret = EINVAL;
|
||||
goto exit;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
|
||||
if (!rxq_ctrl->irq)
|
||||
goto exit;
|
||||
return 0;
|
||||
rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
|
||||
if (!rxq_ibv) {
|
||||
ret = EINVAL;
|
||||
goto exit;
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
|
||||
if (ret || ev_cq != rxq_ibv->cq) {
|
||||
ret = EINVAL;
|
||||
rte_errno = EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
rxq_data->cq_arm_sn++;
|
||||
mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
|
||||
return 0;
|
||||
exit:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
if (rxq_ibv)
|
||||
mlx5_rxq_ibv_release(rxq_ibv);
|
||||
if (ret)
|
||||
WARN("unable to disable interrupt on rx queue %d",
|
||||
rx_queue_id);
|
||||
return -ret;
|
||||
WARN("unable to disable interrupt on rx queue %d", rx_queue_id);
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -590,7 +589,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
||||
* Queue index in DPDK Rx queue array
|
||||
*
|
||||
* @return
|
||||
* The Verbs object initialised if it can be created.
|
||||
* The Verbs object initialised, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_rxq_ibv *
|
||||
mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
@ -626,6 +625,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (!tmpl) {
|
||||
ERROR("%p: cannot allocate verbs resources",
|
||||
(void *)rxq_ctrl);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
tmpl->rxq_ctrl = rxq_ctrl;
|
||||
@ -643,6 +643,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (!tmpl->channel) {
|
||||
ERROR("%p: Comp Channel creation failure",
|
||||
(void *)rxq_ctrl);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
@ -672,6 +673,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
&attr.cq.mlx5));
|
||||
if (tmpl->cq == NULL) {
|
||||
ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
DEBUG("priv->device_attr.max_qp_wr is %d",
|
||||
@ -708,6 +710,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
|
||||
if (tmpl->wq == NULL) {
|
||||
ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
/*
|
||||
@ -722,6 +725,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
((1 << rxq_data->elts_n) >> rxq_data->sges_n),
|
||||
(1 << rxq_data->sges_n),
|
||||
attr.wq.max_wr, attr.wq.max_sge);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
/* Change queue state to ready. */
|
||||
@ -733,6 +737,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (ret) {
|
||||
ERROR("%p: WQ state to IBV_WQS_RDY failed",
|
||||
(void *)rxq_ctrl);
|
||||
rte_errno = ret;
|
||||
goto error;
|
||||
}
|
||||
obj.cq.in = tmpl->cq;
|
||||
@ -740,11 +745,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
obj.rwq.in = tmpl->wq;
|
||||
obj.rwq.out = &rwq;
|
||||
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
|
||||
if (ret != 0)
|
||||
if (ret) {
|
||||
rte_errno = ret;
|
||||
goto error;
|
||||
}
|
||||
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
|
||||
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
|
||||
"it should be set to %u", RTE_CACHE_LINE_SIZE);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
/* Fill the rings. */
|
||||
@ -788,6 +796,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
|
||||
return tmpl;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
if (tmpl->wq)
|
||||
claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
|
||||
if (tmpl->cq)
|
||||
@ -797,6 +806,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
if (tmpl->mr)
|
||||
mlx5_mr_release(tmpl->mr);
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -920,7 +930,7 @@ mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
|
||||
* NUMA socket on which memory must be allocated.
|
||||
*
|
||||
* @return
|
||||
* A DPDK queue object on success.
|
||||
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_rxq_ctrl *
|
||||
mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
@ -942,8 +952,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
sizeof(*tmpl) +
|
||||
desc_n * sizeof(struct rte_mbuf *),
|
||||
0, socket);
|
||||
if (!tmpl)
|
||||
if (!tmpl) {
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
tmpl->socket = socket;
|
||||
if (priv->dev->data->dev_conf.intr_conf.rxq)
|
||||
tmpl->irq = 1;
|
||||
@ -973,6 +985,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
(void *)dev,
|
||||
1 << sges_n,
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
rte_errno = EOVERFLOW;
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
@ -991,6 +1004,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
(void *)dev,
|
||||
desc,
|
||||
1 << tmpl->rxq.sges_n);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
/* Toggle RX checksum offload if hardware supports it. */
|
||||
@ -1045,7 +1059,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
* TX queue index.
|
||||
*
|
||||
* @return
|
||||
* A pointer to the queue if it exists.
|
||||
* A pointer to the queue if it exists, NULL otherwise.
|
||||
*/
|
||||
struct mlx5_rxq_ctrl *
|
||||
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
|
||||
@ -1108,7 +1122,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
|
||||
* TX queue index.
|
||||
*
|
||||
* @return
|
||||
* 1 if the queue can be released.
|
||||
* 1 if the queue can be released, negative errno otherwise and rte_errno is
|
||||
* set.
|
||||
*/
|
||||
int
|
||||
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
|
||||
@ -1116,8 +1131,10 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct mlx5_rxq_ctrl *rxq_ctrl;
|
||||
|
||||
if (!(*priv->rxqs)[idx])
|
||||
return -1;
|
||||
if (!(*priv->rxqs)[idx]) {
|
||||
rte_errno = EINVAL;
|
||||
return -rte_errno;
|
||||
}
|
||||
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
|
||||
return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
|
||||
}
|
||||
@ -1157,7 +1174,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
|
||||
* Number of queues in the array.
|
||||
*
|
||||
* @return
|
||||
* A new indirection table.
|
||||
* The Verbs object initialised, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_ind_table_ibv *
|
||||
mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
|
||||
@ -1174,8 +1191,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
|
||||
|
||||
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
|
||||
queues_n * sizeof(uint16_t), 0);
|
||||
if (!ind_tbl)
|
||||
if (!ind_tbl) {
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
for (i = 0; i != queues_n; ++i) {
|
||||
struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
|
||||
|
||||
@ -1195,8 +1214,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
|
||||
.ind_tbl = wq,
|
||||
.comp_mask = 0,
|
||||
});
|
||||
if (!ind_tbl->ind_table)
|
||||
if (!ind_tbl->ind_table) {
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
rte_atomic32_inc(&ind_tbl->refcnt);
|
||||
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
|
||||
DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
|
||||
@ -1321,7 +1342,7 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
|
||||
* Number of queues.
|
||||
*
|
||||
* @return
|
||||
* An hash Rx queue on success.
|
||||
* The Verbs object initialised, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_hrxq *
|
||||
mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
@ -1331,13 +1352,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
struct mlx5_hrxq *hrxq;
|
||||
struct mlx5_ind_table_ibv *ind_tbl;
|
||||
struct ibv_qp *qp;
|
||||
int err;
|
||||
|
||||
queues_n = hash_fields ? queues_n : 1;
|
||||
ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
|
||||
if (!ind_tbl)
|
||||
ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
|
||||
if (!ind_tbl)
|
||||
if (!ind_tbl) {
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
qp = mlx5_glue->create_qp_ex
|
||||
(priv->ctx,
|
||||
&(struct ibv_qp_init_attr_ex){
|
||||
@ -1355,8 +1379,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
.rwq_ind_tbl = ind_tbl->ind_table,
|
||||
.pd = priv->pd,
|
||||
});
|
||||
if (!qp)
|
||||
if (!qp) {
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
|
||||
if (!hrxq)
|
||||
goto error;
|
||||
@ -1371,9 +1397,11 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
|
||||
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
||||
return hrxq;
|
||||
error:
|
||||
err = rte_errno; /* Save rte_errno before cleanup. */
|
||||
mlx5_ind_table_ibv_release(dev, ind_tbl);
|
||||
if (qp)
|
||||
claim_zero(mlx5_glue->destroy_qp(qp));
|
||||
rte_errno = err; /* Restore rte_errno. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_socket_init(struct rte_eth_dev *dev)
|
||||
@ -41,16 +41,21 @@ mlx5_socket_init(struct rte_eth_dev *dev)
|
||||
*/
|
||||
ret = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("secondary process not supported: %s", strerror(errno));
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
priv->primary_socket = ret;
|
||||
flags = fcntl(priv->primary_socket, F_GETFL, 0);
|
||||
if (flags == -1)
|
||||
goto out;
|
||||
if (flags == -1) {
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
|
||||
MLX5_DRIVER_NAME, priv->primary_socket);
|
||||
ret = stat(sun.sun_path, &file_stat);
|
||||
@ -59,29 +64,30 @@ mlx5_socket_init(struct rte_eth_dev *dev)
|
||||
ret = bind(priv->primary_socket, (const struct sockaddr *)&sun,
|
||||
sizeof(sun));
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("cannot bind socket, secondary process not supported: %s",
|
||||
strerror(errno));
|
||||
goto close;
|
||||
}
|
||||
ret = listen(priv->primary_socket, 0);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("Secondary process not supported: %s", strerror(errno));
|
||||
goto close;
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
close:
|
||||
remove(sun.sun_path);
|
||||
out:
|
||||
error:
|
||||
claim_zero(close(priv->primary_socket));
|
||||
priv->primary_socket = 0;
|
||||
return -(ret);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
* Un-Initialise the socket to communicate with the secondary process
|
||||
*
|
||||
* @param[in] dev
|
||||
* Pointer to Ethernet device.
|
||||
*/
|
||||
void
|
||||
mlx5_socket_uninit(struct rte_eth_dev *dev)
|
||||
@ -131,19 +137,21 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
|
||||
sizeof(int));
|
||||
if (ret < 0) {
|
||||
WARN("cannot change socket options");
|
||||
goto out;
|
||||
ret = errno;
|
||||
WARN("cannot change socket options: %s", strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
|
||||
if (ret < 0) {
|
||||
WARN("received an empty message: %s", strerror(errno));
|
||||
goto out;
|
||||
ret = errno;
|
||||
WARN("received an empty message: %s", strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
/* Expect to receive credentials only. */
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (cmsg == NULL) {
|
||||
WARN("no message");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
|
||||
(cmsg->cmsg_len >= sizeof(*cred))) {
|
||||
@ -153,13 +161,13 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
cmsg = CMSG_NXTHDR(&msg, cmsg);
|
||||
if (cmsg != NULL) {
|
||||
WARN("Message wrongly formatted");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
/* Make sure all the ancillary data was received and valid. */
|
||||
if ((cred == NULL) || (cred->uid != getuid()) ||
|
||||
(cred->gid != getgid())) {
|
||||
WARN("wrong credentials");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
/* Set-up the ancillary data. */
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
@ -172,7 +180,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
ret = sendmsg(conn_sock, &msg, 0);
|
||||
if (ret < 0)
|
||||
WARN("cannot send response");
|
||||
out:
|
||||
error:
|
||||
close(conn_sock);
|
||||
}
|
||||
|
||||
@ -183,7 +191,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet structure.
|
||||
*
|
||||
* @return
|
||||
* fd on success, negative errno value on failure.
|
||||
* fd on success, negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
@ -192,7 +200,7 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
struct sockaddr_un sun = {
|
||||
.sun_family = AF_UNIX,
|
||||
};
|
||||
int socket_fd;
|
||||
int socket_fd = -1;
|
||||
int *fd = NULL;
|
||||
int ret;
|
||||
struct ucred *cred;
|
||||
@ -212,57 +220,67 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
|
||||
|
||||
ret = socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("cannot connect to primary");
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
socket_fd = ret;
|
||||
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
|
||||
MLX5_DRIVER_NAME, priv->primary_socket);
|
||||
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("cannot connect to primary");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (cmsg == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
DEBUG("cannot get first message");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
cmsg->cmsg_level = SOL_SOCKET;
|
||||
cmsg->cmsg_type = SCM_CREDENTIALS;
|
||||
cmsg->cmsg_len = CMSG_LEN(sizeof(*cred));
|
||||
cred = (struct ucred *)CMSG_DATA(cmsg);
|
||||
if (cred == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
DEBUG("no credentials received");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
cred->pid = getpid();
|
||||
cred->uid = getuid();
|
||||
cred->gid = getgid();
|
||||
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
|
||||
if (ret < 0) {
|
||||
rte_errno = errno;
|
||||
WARN("cannot send credentials to primary: %s",
|
||||
strerror(errno));
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
|
||||
if (ret <= 0) {
|
||||
rte_errno = errno;
|
||||
WARN("no message from primary: %s", strerror(errno));
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
if (cmsg == NULL) {
|
||||
rte_errno = EINVAL;
|
||||
WARN("No file descriptor received");
|
||||
goto out;
|
||||
goto error;
|
||||
}
|
||||
fd = (int *)CMSG_DATA(cmsg);
|
||||
if (*fd <= 0) {
|
||||
if (*fd < 0) {
|
||||
WARN("no file descriptor received: %s", strerror(errno));
|
||||
ret = *fd;
|
||||
goto out;
|
||||
rte_errno = *fd;
|
||||
goto error;
|
||||
}
|
||||
ret = *fd;
|
||||
out:
|
||||
close(socket_fd);
|
||||
return ret;
|
||||
return 0;
|
||||
error:
|
||||
if (socket_fd != -1)
|
||||
close(socket_fd);
|
||||
return -rte_errno;
|
||||
}
|
||||
|
@ -128,7 +128,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
|
||||
* Counters table output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success and stats is filled, negative on error.
|
||||
* 0 on success and stats is filled, negative errno value otherwise and
|
||||
* rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
|
||||
@ -140,13 +141,15 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
|
||||
unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
|
||||
unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
|
||||
struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
|
||||
int ret;
|
||||
|
||||
et_stats->cmd = ETHTOOL_GSTATS;
|
||||
et_stats->n_stats = xstats_ctrl->stats_n;
|
||||
ifr.ifr_data = (caddr_t)et_stats;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("unable to read statistic values from device");
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i != xstats_n; ++i) {
|
||||
if (mlx5_counters_init[i].ib) {
|
||||
@ -178,18 +181,21 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
|
||||
* Pointer to Ethernet device.
|
||||
*
|
||||
* @return
|
||||
* Number of statistics on success, -1 on error.
|
||||
* Number of statistics on success, negative errno value otherwise and
|
||||
* rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
|
||||
struct ethtool_drvinfo drvinfo;
|
||||
struct ifreq ifr;
|
||||
int ret;
|
||||
|
||||
drvinfo.cmd = ETHTOOL_GDRVINFO;
|
||||
ifr.ifr_data = (caddr_t)&drvinfo;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("unable to query number of statistics");
|
||||
return -1;
|
||||
return ret;
|
||||
}
|
||||
return drvinfo.n_stats;
|
||||
}
|
||||
@ -211,12 +217,14 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
struct ethtool_gstrings *strings = NULL;
|
||||
unsigned int dev_stats_n;
|
||||
unsigned int str_sz;
|
||||
int ret;
|
||||
|
||||
dev_stats_n = mlx5_ethtool_get_stats_n(dev);
|
||||
if (dev_stats_n < 1) {
|
||||
ret = mlx5_ethtool_get_stats_n(dev);
|
||||
if (ret < 0) {
|
||||
WARN("no extended statistics available");
|
||||
return;
|
||||
}
|
||||
dev_stats_n = ret;
|
||||
xstats_ctrl->stats_n = dev_stats_n;
|
||||
/* Allocate memory to grab stat names and values. */
|
||||
str_sz = dev_stats_n * ETH_GSTRING_LEN;
|
||||
@ -231,7 +239,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
strings->string_set = ETH_SS_STATS;
|
||||
strings->len = dev_stats_n;
|
||||
ifr.ifr_data = (caddr_t)strings;
|
||||
if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
|
||||
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
|
||||
if (ret) {
|
||||
WARN("unable to get statistic names");
|
||||
goto free;
|
||||
}
|
||||
@ -260,7 +269,9 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
}
|
||||
/* Copy to base at first time. */
|
||||
assert(xstats_n <= MLX5_MAX_XSTATS);
|
||||
mlx5_read_dev_counters(dev, xstats_ctrl->base);
|
||||
ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
|
||||
if (ret)
|
||||
ERROR("cannot read device counters: %s", strerror(rte_errno));
|
||||
free:
|
||||
rte_free(strings);
|
||||
}
|
||||
@ -277,7 +288,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
|
||||
*
|
||||
* @return
|
||||
* Number of extended stats on success and stats is filled,
|
||||
* negative on error.
|
||||
* negative on error and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
|
||||
@ -286,15 +297,15 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
uint64_t counters[n];
|
||||
int ret = 0;
|
||||
|
||||
if (n >= xstats_n && stats) {
|
||||
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
|
||||
int stats_n;
|
||||
int ret;
|
||||
|
||||
stats_n = mlx5_ethtool_get_stats_n(dev);
|
||||
if (stats_n < 0)
|
||||
return -1;
|
||||
return stats_n;
|
||||
if (xstats_ctrl->stats_n != stats_n)
|
||||
mlx5_xstats_init(dev);
|
||||
ret = mlx5_read_dev_counters(dev, counters);
|
||||
@ -315,6 +326,10 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param[out] stats
|
||||
* Stats structure output buffer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success and stats is filled, negative errno value otherwise and
|
||||
* rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
||||
@ -419,14 +434,22 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
|
||||
unsigned int i;
|
||||
unsigned int n = xstats_n;
|
||||
uint64_t counters[n];
|
||||
int ret;
|
||||
|
||||
stats_n = mlx5_ethtool_get_stats_n(dev);
|
||||
if (stats_n < 0)
|
||||
if (stats_n < 0) {
|
||||
ERROR("%p cannot get stats: %s", (void *)dev,
|
||||
strerror(-stats_n));
|
||||
return;
|
||||
}
|
||||
if (xstats_ctrl->stats_n != stats_n)
|
||||
mlx5_xstats_init(dev);
|
||||
if (mlx5_read_dev_counters(dev, counters) < 0)
|
||||
ret = mlx5_read_dev_counters(dev, counters);
|
||||
if (ret) {
|
||||
ERROR("%p cannot read device counters: %s", (void *)dev,
|
||||
strerror(rte_errno));
|
||||
return;
|
||||
}
|
||||
for (i = 0; i != n; ++i)
|
||||
xstats_ctrl->base[i] = counters[i];
|
||||
}
|
||||
|
@ -37,14 +37,14 @@ mlx5_txq_stop(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno on error.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_txq_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
/* Add memory regions to Tx queues. */
|
||||
for (i = 0; i != priv->txqs_n; ++i) {
|
||||
@ -62,17 +62,19 @@ mlx5_txq_start(struct rte_eth_dev *dev)
|
||||
txq_alloc_elts(txq_ctrl);
|
||||
txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
|
||||
if (!txq_ctrl->ibv) {
|
||||
ret = ENOMEM;
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
|
||||
if (ret)
|
||||
goto error;
|
||||
return ret;
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
mlx5_txq_stop(dev);
|
||||
return ret;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -98,7 +100,7 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno on error.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
static int
|
||||
mlx5_rxq_start(struct rte_eth_dev *dev)
|
||||
@ -116,15 +118,15 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
|
||||
if (ret)
|
||||
goto error;
|
||||
rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
|
||||
if (!rxq_ctrl->ibv) {
|
||||
ret = ENOMEM;
|
||||
if (!rxq_ctrl->ibv)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
return -ret;
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
mlx5_rxq_stop(dev);
|
||||
return -ret;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -136,48 +138,48 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct mlx5_mr *mr = NULL;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
dev->data->dev_started = 1;
|
||||
err = mlx5_flow_create_drop_queue(dev);
|
||||
if (err) {
|
||||
ret = mlx5_flow_create_drop_queue(dev);
|
||||
if (ret) {
|
||||
ERROR("%p: Drop queue allocation failed: %s",
|
||||
(void *)dev, strerror(err));
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
|
||||
rte_mempool_walk(mlx5_mp2mr_iter, priv);
|
||||
err = mlx5_txq_start(dev);
|
||||
if (err) {
|
||||
ERROR("%p: TXQ allocation failed: %s",
|
||||
(void *)dev, strerror(err));
|
||||
ret = mlx5_txq_start(dev);
|
||||
if (ret) {
|
||||
ERROR("%p: Tx Queue allocation failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
err = mlx5_rxq_start(dev);
|
||||
if (err) {
|
||||
ERROR("%p: RXQ allocation failed: %s",
|
||||
(void *)dev, strerror(err));
|
||||
ret = mlx5_rxq_start(dev);
|
||||
if (ret) {
|
||||
ERROR("%p: Rx Queue allocation failed: %s",
|
||||
(void *)dev, strerror(rte_errno));
|
||||
goto error;
|
||||
}
|
||||
err = mlx5_rx_intr_vec_enable(dev);
|
||||
if (err) {
|
||||
ERROR("%p: RX interrupt vector creation failed",
|
||||
(void *)priv);
|
||||
ret = mlx5_rx_intr_vec_enable(dev);
|
||||
if (ret) {
|
||||
ERROR("%p: Rx interrupt vector creation failed",
|
||||
(void *)dev);
|
||||
goto error;
|
||||
}
|
||||
mlx5_xstats_init(dev);
|
||||
/* Update link status and Tx/Rx callbacks for the first time. */
|
||||
memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
|
||||
INFO("Forcing port %u link to be up", dev->data->port_id);
|
||||
err = mlx5_force_link_status_change(dev, ETH_LINK_UP);
|
||||
if (err) {
|
||||
ret = mlx5_force_link_status_change(dev, ETH_LINK_UP);
|
||||
if (ret) {
|
||||
DEBUG("Failed to set port %u link to be up",
|
||||
dev->data->port_id);
|
||||
goto error;
|
||||
@ -185,6 +187,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
mlx5_dev_interrupt_handler_install(dev);
|
||||
return 0;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
/* Rollback. */
|
||||
dev->data->dev_started = 0;
|
||||
for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
|
||||
@ -194,7 +197,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)
|
||||
mlx5_txq_stop(dev);
|
||||
mlx5_rxq_stop(dev);
|
||||
mlx5_flow_delete_drop_queue(dev);
|
||||
return err;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -238,7 +242,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_traffic_enable(struct rte_eth_dev *dev)
|
||||
@ -276,8 +280,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
|
||||
.type = 0,
|
||||
};
|
||||
|
||||
claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
|
||||
return 0;
|
||||
ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
if (dev->data->all_multicast) {
|
||||
struct rte_flow_item_eth multicast = {
|
||||
@ -286,7 +291,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
|
||||
.type = 0,
|
||||
};
|
||||
|
||||
claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
|
||||
ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
|
||||
if (ret)
|
||||
goto error;
|
||||
} else {
|
||||
/* Add broadcast/multicast flows. */
|
||||
for (i = 0; i != vlan_filter_n; ++i) {
|
||||
@ -346,15 +353,17 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
|
||||
goto error;
|
||||
}
|
||||
if (!vlan_filter_n) {
|
||||
ret = mlx5_ctrl_flow(dev, &unicast,
|
||||
&unicast_mask);
|
||||
ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
error:
|
||||
return rte_errno;
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
mlx5_flow_list_flush(dev, &priv->ctrl_flows);
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
|
||||
|
||||
@ -379,14 +388,14 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
|
||||
* Pointer to Ethernet device private data.
|
||||
*
|
||||
* @return
|
||||
* 0 on success.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_traffic_restart(struct rte_eth_dev *dev)
|
||||
{
|
||||
if (dev->data->dev_started) {
|
||||
mlx5_traffic_disable(dev);
|
||||
mlx5_traffic_enable(dev);
|
||||
return mlx5_traffic_enable(dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
|
||||
* Thresholds parameters.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
@ -171,7 +171,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
struct mlx5_txq_data *txq = (*priv->txqs)[idx];
|
||||
struct mlx5_txq_ctrl *txq_ctrl =
|
||||
container_of(txq, struct mlx5_txq_ctrl, txq);
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Don't verify port offloads for application which
|
||||
@ -179,13 +178,13 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
*/
|
||||
if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
|
||||
!mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
|
||||
ret = ENOTSUP;
|
||||
rte_errno = ENOTSUP;
|
||||
ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
|
||||
"offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
|
||||
(void *)dev, conf->offloads,
|
||||
dev->data->dev_conf.txmode.offloads,
|
||||
mlx5_get_tx_port_offloads(dev));
|
||||
goto out;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (desc <= MLX5_TX_COMP_THRESH) {
|
||||
WARN("%p: number of descriptors requested for TX queue %u"
|
||||
@ -205,27 +204,26 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
if (idx >= priv->txqs_n) {
|
||||
ERROR("%p: queue index out of range (%u >= %u)",
|
||||
(void *)dev, idx, priv->txqs_n);
|
||||
return -EOVERFLOW;
|
||||
rte_errno = EOVERFLOW;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (!mlx5_txq_releasable(dev, idx)) {
|
||||
ret = EBUSY;
|
||||
rte_errno = EBUSY;
|
||||
ERROR("%p: unable to release queue index %u",
|
||||
(void *)dev, idx);
|
||||
goto out;
|
||||
return -rte_errno;
|
||||
}
|
||||
mlx5_txq_release(dev, idx);
|
||||
txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
|
||||
if (!txq_ctrl) {
|
||||
ERROR("%p: unable to allocate queue index %u",
|
||||
(void *)dev, idx);
|
||||
ret = ENOMEM;
|
||||
goto out;
|
||||
return -rte_errno;
|
||||
}
|
||||
DEBUG("%p: adding TX queue %p to list",
|
||||
(void *)dev, (void *)txq_ctrl);
|
||||
(*priv->txqs)[idx] = &txq_ctrl->txq;
|
||||
out:
|
||||
return -ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -248,9 +246,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
|
||||
priv = txq_ctrl->priv;
|
||||
for (i = 0; (i != priv->txqs_n); ++i)
|
||||
if ((*priv->txqs)[i] == txq) {
|
||||
mlx5_txq_release(priv->dev, i);
|
||||
DEBUG("%p: removing TX queue %p from list",
|
||||
(void *)priv->dev, (void *)txq_ctrl);
|
||||
mlx5_txq_release(priv->dev, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -267,7 +265,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
|
||||
* Verbs file descriptor to map UAR pages.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
|
||||
@ -284,7 +282,6 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
|
||||
struct mlx5_txq_ctrl *txq_ctrl;
|
||||
int already_mapped;
|
||||
size_t page_size = sysconf(_SC_PAGESIZE);
|
||||
int r;
|
||||
|
||||
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
|
||||
/*
|
||||
@ -323,8 +320,8 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
|
||||
/* fixed mmap have to return same address */
|
||||
ERROR("call to mmap failed on UAR for txq %d\n",
|
||||
i);
|
||||
r = ENXIO;
|
||||
return r;
|
||||
rte_errno = ENXIO;
|
||||
return -rte_errno;
|
||||
}
|
||||
}
|
||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
|
||||
@ -364,7 +361,7 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
|
||||
* Queue index in DPDK Rx queue array
|
||||
*
|
||||
* @return
|
||||
* The Verbs object initialised if it can be created.
|
||||
* The Verbs object initialised, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_txq_ibv *
|
||||
mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
@ -394,7 +391,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
priv->verbs_alloc_ctx.obj = txq_ctrl;
|
||||
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
|
||||
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
|
||||
goto error;
|
||||
rte_errno = EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
|
||||
/* MRs will be registered in mp2mr[] later. */
|
||||
@ -408,6 +406,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
|
||||
if (tmpl.cq == NULL) {
|
||||
ERROR("%p: CQ creation failure", (void *)txq_ctrl);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
attr.init = (struct ibv_qp_init_attr_ex){
|
||||
@ -449,6 +448,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
|
||||
if (tmpl.qp == NULL) {
|
||||
ERROR("%p: QP creation failure", (void *)txq_ctrl);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
attr.mod = (struct ibv_qp_attr){
|
||||
@ -461,6 +461,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
(IBV_QP_STATE | IBV_QP_PORT));
|
||||
if (ret) {
|
||||
ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
attr.mod = (struct ibv_qp_attr){
|
||||
@ -469,18 +470,21 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
||||
if (ret) {
|
||||
ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
attr.mod.qp_state = IBV_QPS_RTS;
|
||||
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
||||
if (ret) {
|
||||
ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
|
||||
txq_ctrl->socket);
|
||||
if (!txq_ibv) {
|
||||
ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
obj.cq.in = tmpl.cq;
|
||||
@ -488,11 +492,14 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
obj.qp.in = tmpl.qp;
|
||||
obj.qp.out = &qp;
|
||||
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
rte_errno = errno;
|
||||
goto error;
|
||||
}
|
||||
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
|
||||
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
|
||||
"it should be set to %u", RTE_CACHE_LINE_SIZE);
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
|
||||
@ -518,6 +525,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
|
||||
} else {
|
||||
ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
|
||||
rte_errno = EINVAL;
|
||||
goto error;
|
||||
}
|
||||
DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
|
||||
@ -526,11 +534,13 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
|
||||
return txq_ibv;
|
||||
error:
|
||||
ret = rte_errno; /* Save rte_errno before cleanup. */
|
||||
if (tmpl.cq)
|
||||
claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
|
||||
if (tmpl.qp)
|
||||
claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
|
||||
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
|
||||
rte_errno = ret; /* Restore rte_errno. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -743,7 +753,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
* Thresholds parameters.
|
||||
*
|
||||
* @return
|
||||
* A DPDK queue object on success.
|
||||
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
|
||||
*/
|
||||
struct mlx5_txq_ctrl *
|
||||
mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
@ -756,8 +766,10 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
sizeof(*tmpl) +
|
||||
desc * sizeof(struct rte_mbuf *),
|
||||
0, socket);
|
||||
if (!tmpl)
|
||||
if (!tmpl) {
|
||||
rte_errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
assert(desc > MLX5_TX_COMP_THRESH);
|
||||
tmpl->txq.offloads = conf->offloads;
|
||||
tmpl->priv = priv;
|
||||
|
@ -37,14 +37,13 @@
|
||||
* Toggle filter.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, negative errno value on failure.
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
DEBUG("%p: %s VLAN filter ID %" PRIu16,
|
||||
(void *)dev, (on ? "enable" : "disable"), vlan_id);
|
||||
@ -54,8 +53,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
break;
|
||||
/* Check if there's room for another VLAN filter. */
|
||||
if (i == RTE_DIM(priv->vlan_filter)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
rte_errno = ENOMEM;
|
||||
return -rte_errno;
|
||||
}
|
||||
if (i < priv->vlan_filter_n) {
|
||||
assert(priv->vlan_filter_n != 0);
|
||||
@ -78,10 +77,10 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
|
||||
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
|
||||
++priv->vlan_filter_n;
|
||||
}
|
||||
if (dev->data->dev_started)
|
||||
mlx5_traffic_restart(dev);
|
||||
out:
|
||||
return ret;
|
||||
if (dev->data->dev_started)
|
||||
return mlx5_traffic_restart(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -105,7 +104,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
|
||||
uint16_t vlan_offloads =
|
||||
(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
|
||||
0;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
/* Validate hw support */
|
||||
if (!priv->config.hw_vlan_strip) {
|
||||
@ -129,10 +128,10 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
|
||||
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
|
||||
.flags = vlan_offloads,
|
||||
};
|
||||
err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
|
||||
if (err) {
|
||||
ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
|
||||
if (ret) {
|
||||
ERROR("%p: failed to modified stripping mode: %s",
|
||||
(void *)dev, strerror(err));
|
||||
(void *)dev, strerror(rte_errno));
|
||||
return;
|
||||
}
|
||||
/* Update related bits in RX queue. */
|
||||
@ -146,6 +145,9 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param mask
|
||||
* VLAN offload bit mask.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative errno value otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
||||
|
Loading…
Reference in New Issue
Block a user