net/bonding: fix slave activation simultaneously

The bonding PMD decides to activate\deactivate its slaves according to
the slaves link statuses.
Thus, it registers to the LSC events of the slaves ports and
activates\deactivates them from its LSC callbacks called asynchronously
by the host thread when the slave link status is changed.

In addition, the bonding PMD uses the callback for slave activation
when it tries to start it, this operation is probably called by the
master thread.

Consequently, a slave may be activated in the same time by two
different threads and may cause a lot of optional errors, for example,
slave mempool recreation with the same name causes an error.

Synchronize the critical section in the LSC callback using a special
new spinlock.

Fixes: 414b202343 ("bonding: fix initial link status of slave")
Fixes: a45b288ef2 ("bond: support link status polling")
Cc: stable@dpdk.org

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Declan Doherty <declan.doherty@intel.com>
This commit is contained in:
Matan Azrad 2018-04-24 11:29:30 +00:00 committed by Ferruh Yigit
parent efe73c0d1d
commit 59056833cc
2 changed files with 16 additions and 2 deletions

View File

@ -2653,14 +2653,21 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (!valid_slave)
return rc;
/* Synchronize lsc callback parallel calls either by real link event
* from the slaves PMDs or by the bonding PMD itself.
*/
rte_spinlock_lock(&internals->lsc_lock);
/* Search for port in active port list */
active_pos = find_slave_by_id(internals->active_slaves,
internals->active_slave_count, port_id);
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
if (active_pos < internals->active_slave_count)
if (active_pos < internals->active_slave_count) {
rte_spinlock_unlock(&internals->lsc_lock);
return rc;
}
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@ -2679,8 +2686,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
internals->primary_port == port_id)
bond_ethdev_primary_set(internals, port_id);
} else {
if (active_pos == internals->active_slave_count)
if (active_pos == internals->active_slave_count) {
rte_spinlock_unlock(&internals->lsc_lock);
return rc;
}
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
@ -2733,6 +2742,9 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
NULL);
}
}
rte_spinlock_unlock(&internals->lsc_lock);
return 0;
}
@ -2953,6 +2965,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
rte_spinlock_init(&internals->lock);
rte_spinlock_init(&internals->lsc_lock);
internals->port_id = eth_dev->data->port_id;
internals->mode = BONDING_MODE_INVALID;

View File

@ -105,6 +105,7 @@ struct bond_dev_private {
uint8_t mode; /**< Link Bonding Mode */
rte_spinlock_t lock;
rte_spinlock_t lsc_lock;
uint16_t primary_port; /**< Primary Slave Port */
uint16_t current_primary_port; /**< Primary Slave Port */