net/mlx5: remove control path locks

In priv struct only the memory region needs to be protected against
concurrent access between the control plane and the data plane.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Nélio Laranjeiro 2018-03-05 13:21:03 +01:00 committed by Ferruh Yigit
parent 0b3456e391
commit 7b2423cd2e
11 changed files with 9 additions and 166 deletions

View File

@ -165,7 +165,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
unsigned int i;
int ret;
priv_lock(priv);
DEBUG("%p: closing device \"%s\"",
(void *)dev,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
@ -227,7 +226,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
ret = priv_mr_verify(priv);
if (ret)
WARN("%p: some Memory Region still remain", (void *)priv);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}

View File

@ -148,7 +148,7 @@ struct priv {
LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
rte_spinlock_t lock; /* Lock for control functions. */
rte_spinlock_t mr_lock; /* MR Lock. */
int primary_socket; /* Unix socket for primary process. */
void *uar_base; /* Reserved address space for UAR mapping */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
@ -157,47 +157,6 @@ struct priv {
/* Context for Verbs allocator. */
};
/**
* Lock private structure to protect it from concurrent access in the
* control path.
*
* @param priv
* Pointer to private structure.
*/
static inline void
priv_lock(struct priv *priv)
{
rte_spinlock_lock(&priv->lock);
}
/**
* Try to lock private structure to protect it from concurrent access in the
* control path.
*
* @param priv
* Pointer to private structure.
*
* @return
* 1 if the lock is successfully taken; 0 otherwise.
*/
static inline int
priv_trylock(struct priv *priv)
{
return rte_spinlock_trylock(&priv->lock);
}
/**
* Unlock private structure.
*
* @param priv
* Pointer to private structure.
*/
static inline void
priv_unlock(struct priv *priv)
{
rte_spinlock_unlock(&priv->lock);
}
/* mlx5.c */
int mlx5_getenv_int(const char *);

View File

@ -269,18 +269,16 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
}
/**
* Ethernet device configuration.
*
* Prepare the driver for a given number of TX and RX queues.
* DPDK callback for Ethernet device configuration.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
* 0 on success, errno value on failure.
* 0 on success, negative errno value on failure.
*/
static int
dev_configure(struct rte_eth_dev *dev)
int
mlx5_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
@ -362,28 +360,7 @@ dev_configure(struct rte_eth_dev *dev)
j = 0;
}
return 0;
}
/**
* DPDK callback for Ethernet device configuration.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
mlx5_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
int ret;
priv_lock(priv);
ret = dev_configure(dev);
assert(ret >= 0);
priv_unlock(priv);
return -ret;
}
/**
@ -403,7 +380,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
char ifname[IF_NAMESIZE];
info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@ -431,7 +407,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
priv_unlock(priv);
}
/**
@ -490,7 +465,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
struct rte_eth_link dev_link;
int link_speed = 0;
/* priv_lock() is not taken to allow concurrent calls. */
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
@ -756,9 +730,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
struct priv *priv = dev->data->dev_private;
int ret;
priv_lock(priv);
ret = priv_link_update(priv, wait_to_complete);
priv_unlock(priv);
return ret;
}
@ -780,7 +752,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
uint16_t kern_mtu;
int ret = 0;
priv_lock(priv);
ret = priv_get_mtu(priv, &kern_mtu);
if (ret)
goto out;
@ -795,13 +766,11 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
priv->mtu = mtu;
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
}
priv_unlock(priv);
return 0;
out:
ret = errno;
WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
strerror(ret));
priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@ -828,7 +797,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)&ethpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
@ -847,7 +815,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->mode = RTE_FC_NONE;
ret = 0;
out:
priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@ -886,7 +853,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
@ -896,7 +862,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
ret = 0;
out:
priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@ -1039,15 +1004,8 @@ mlx5_dev_link_status_handler(void *arg)
struct priv *priv = dev->data->dev_private;
int ret;
while (!priv_trylock(priv)) {
/* Alarm is being canceled. */
if (priv->pending_alarm == 0)
return;
rte_pause();
}
priv->pending_alarm = 0;
ret = priv_link_status_update(priv);
priv_unlock(priv);
if (!ret)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@ -1067,9 +1025,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
struct priv *priv = dev->data->dev_private;
uint32_t events;
priv_lock(priv);
events = priv_dev_status_handler(priv);
priv_unlock(priv);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
@ -1088,9 +1044,7 @@ mlx5_dev_handler_socket(void *cb_arg)
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
priv_socket_handle(priv);
priv_unlock(priv);
}
/**
@ -1190,9 +1144,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int err;
priv_lock(priv);
err = priv_dev_set_link(priv, 0);
priv_unlock(priv);
return err;
}
@ -1211,9 +1163,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int err;
priv_lock(priv);
err = priv_dev_set_link(priv, 1);
priv_unlock(priv);
return err;
}

View File

@ -1919,9 +1919,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
int ret;
struct mlx5_flow_parse parser = { .create = 0, };
priv_lock(priv);
ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
priv_unlock(priv);
return ret;
}
@ -1941,10 +1939,8 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
priv_lock(priv);
flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
error);
priv_unlock(priv);
return flow;
}
@ -2431,9 +2427,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
priv_flow_destroy(priv, &priv->flows, flow);
priv_unlock(priv);
return 0;
}
@ -2449,9 +2443,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
priv_flow_flush(priv, &priv->flows);
priv_unlock(priv);
return 0;
}
@ -2509,16 +2501,14 @@ priv_flow_query_count(struct ibv_counter_set *cs,
* @see rte_flow_ops
*/
int
mlx5_flow_query(struct rte_eth_dev *dev,
mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow,
enum rte_flow_action_type action __rte_unused,
void *data,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
int res = EINVAL;
priv_lock(priv);
if (flow->cs) {
res = priv_flow_query_count(flow->cs,
&flow->counter_stats,
@ -2530,7 +2520,6 @@ mlx5_flow_query(struct rte_eth_dev *dev,
NULL,
"no counter found for flow");
}
priv_unlock(priv);
return -res;
}
#endif
@ -2548,13 +2537,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"port must be stopped first");
priv_unlock(priv);
return -rte_errno;
}
priv->isolated = !!enable;
@ -2562,7 +2549,6 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
priv->dev->dev_ops = &mlx5_dev_ops_isolate;
else
priv->dev->dev_ops = &mlx5_dev_ops;
priv_unlock(priv);
return 0;
}
@ -3044,9 +3030,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
priv_lock(priv);
ret = priv_fdir_ctrl_func(priv, filter_op, arg);
priv_unlock(priv);
break;
default:
ERROR("%p: filter type (%d) not supported",

View File

@ -164,9 +164,9 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr *mr;
priv_lock(txq_ctrl->priv);
rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
priv_unlock(txq_ctrl->priv);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return mr;
}

View File

@ -44,7 +44,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
int ret = 0;
priv_lock(priv);
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
ret = -EINVAL;
goto out;
@ -62,7 +61,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
}
priv->rss_conf.rss_hf = rss_conf->rss_hf;
out:
priv_unlock(priv);
return ret;
}
@ -85,7 +83,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
if (!rss_conf)
return -EINVAL;
priv_lock(priv);
if (rss_conf->rss_key &&
(rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
@ -93,7 +90,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
}
rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
rss_conf->rss_hf = priv->rss_conf.rss_hf;
priv_unlock(priv);
return 0;
}
@ -222,9 +218,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size);
priv_unlock(priv);
return -ret;
}
@ -249,9 +243,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
priv_unlock(priv);
if (dev->data->dev_started) {
mlx5_dev_stop(dev);
mlx5_dev_start(dev);

View File

@ -286,7 +286,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int ret = 0;
priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
WARN("%p: increased number of descriptors in RX queue %u"
@ -298,7 +297,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (idx >= priv->rxqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->rxqs_n);
priv_unlock(priv);
return -EOVERFLOW;
}
if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
@ -329,7 +327,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, (void *)rxq_ctrl);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
out:
priv_unlock(priv);
return -ret;
}
@ -350,12 +347,10 @@ mlx5_rx_queue_release(void *dpdk_rxq)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
priv_lock(priv);
if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
rte_panic("Rx queue %p is still used by a flow and cannot be"
" removed\n", (void *)rxq_ctrl);
mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
priv_unlock(priv);
}
/**
@ -512,7 +507,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
ret = EINVAL;
@ -531,7 +525,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
}
exit:
priv_unlock(priv);
if (ret)
WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
return -ret;
@ -559,7 +552,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
void *ev_ctx;
int ret = 0;
priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
ret = EINVAL;
@ -583,7 +575,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
exit:
if (rxq_ibv)
mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
priv_unlock(priv);
if (ret)
WARN("unable to disable interrupt on rx queue %d",
rx_queue_id);

View File

@ -328,7 +328,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
unsigned int i;
unsigned int idx;
priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
@ -374,7 +373,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* FIXME: retrieve and add hardware counters. */
#endif
*stats = tmp;
priv_unlock(priv);
return 0;
}
@ -391,7 +389,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
unsigned int i;
unsigned int idx;
priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
continue;
@ -409,7 +406,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
#ifndef MLX5_PMD_SOFT_COUNTERS
/* FIXME: reset hardware counters. */
#endif
priv_unlock(priv);
}
/**
@ -436,16 +432,13 @@ mlx5_xstats_get(struct rte_eth_dev *dev,
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
priv_lock(priv);
stats_n = priv_ethtool_get_stats_n(priv);
if (stats_n < 0) {
priv_unlock(priv);
return -1;
}
if (xstats_ctrl->stats_n != stats_n)
priv_xstats_init(priv);
ret = priv_xstats_get(priv, stats);
priv_unlock(priv);
}
return ret;
}
@ -463,15 +456,12 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
priv_lock(priv);
stats_n = priv_ethtool_get_stats_n(priv);
if (stats_n < 0)
goto unlock;
return;
if (xstats_ctrl->stats_n != stats_n)
priv_xstats_init(priv);
priv_xstats_reset(priv);
unlock:
priv_unlock(priv);
}
/**
@ -491,18 +481,15 @@ int
mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
if (n >= xstats_n && xstats_names) {
priv_lock(priv);
for (i = 0; i != xstats_n; ++i) {
strncpy(xstats_names[i].name,
mlx5_counters_init[i].dpdk_name,
RTE_ETH_XSTATS_NAME_SIZE);
xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
}
priv_unlock(priv);
}
return xstats_n;
}

View File

@ -142,7 +142,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
int err;
dev->data->dev_started = 1;
priv_lock(priv);
err = priv_flow_create_drop_queue(priv);
if (err) {
ERROR("%p: Drop queue allocation failed: %s",
@ -180,7 +179,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error;
}
priv_dev_interrupt_handler_install(priv, dev);
priv_unlock(priv);
return 0;
error:
/* Rollback. */
@ -192,7 +190,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
priv_txq_stop(priv);
priv_rxq_stop(priv);
priv_flow_delete_drop_queue(priv);
priv_unlock(priv);
return err;
}
@ -210,7 +207,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
priv_lock(priv);
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
@ -227,7 +223,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
priv_mr_release(priv, mr);
priv_flow_delete_drop_queue(priv);
priv_unlock(priv);
}
/**
@ -412,8 +407,6 @@ mlx5_traffic_restart(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
priv_dev_traffic_restart(priv, dev);
priv_unlock(priv);
return 0;
}

View File

@ -172,7 +172,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(txq, struct mlx5_txq_ctrl, txq);
int ret = 0;
priv_lock(priv);
/*
* Don't verify port offloads for application which
* use the old API.
@ -205,7 +204,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (idx >= priv->txqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->txqs_n);
priv_unlock(priv);
return -EOVERFLOW;
}
if (!mlx5_priv_txq_releasable(priv, idx)) {
@ -226,7 +224,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, (void *)txq_ctrl);
(*priv->txqs)[idx] = &txq_ctrl->txq;
out:
priv_unlock(priv);
return -ret;
}
@ -248,7 +245,6 @@ mlx5_tx_queue_release(void *dpdk_txq)
return;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
DEBUG("%p: removing TX queue %p from list",
@ -256,7 +252,6 @@ mlx5_tx_queue_release(void *dpdk_txq)
mlx5_priv_txq_release(priv, i);
break;
}
priv_unlock(priv);
}

View File

@ -46,7 +46,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
unsigned int i;
int ret = 0;
priv_lock(priv);
DEBUG("%p: %s VLAN filter ID %" PRIu16,
(void *)dev, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
@ -82,7 +81,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (dev->data->dev_started)
priv_dev_traffic_restart(priv, dev);
out:
priv_unlock(priv);
return ret;
}
@ -155,9 +153,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ERROR("VLAN stripping, invalid queue number %d", queue);
return;
}
priv_lock(priv);
priv_vlan_strip_queue_set(priv, queue, on);
priv_unlock(priv);
}
/**
@ -183,10 +179,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
/* Run on every RX queue and set/reset VLAN stripping. */
priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); i++)
priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
priv_unlock(priv);
}
return 0;
}