net/vhost: emulate device start/stop behavior
.dev_start()/.dev_stop() roughly corresponds to the local device's port being ready. This is different from the remote client being connected which is roughly link up or down. Emulate the device start/stop behavior by separately tracking the start/stop state to determine if we should allow packets to be queued to/from the remote client. Signed-off-by: Chas Williams <ciwillia@brocade.com> Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
This commit is contained in:
parent
aed0b12930
commit
1ce3c7fe14
@ -112,9 +112,11 @@ struct vhost_queue {
|
||||
};
|
||||
|
||||
struct pmd_internal {
|
||||
rte_atomic32_t dev_attached;
|
||||
char *dev_name;
|
||||
char *iface_name;
|
||||
uint16_t max_queues;
|
||||
rte_atomic32_t started;
|
||||
};
|
||||
|
||||
struct internal_list {
|
||||
@ -494,6 +496,38 @@ find_internal_resource(char *ifname)
|
||||
return list;
|
||||
}
|
||||
|
||||
static void
|
||||
update_queuing_status(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct pmd_internal *internal = dev->data->dev_private;
|
||||
struct vhost_queue *vq;
|
||||
unsigned int i;
|
||||
int allow_queuing = 1;
|
||||
|
||||
if (rte_atomic32_read(&internal->started) == 0 ||
|
||||
rte_atomic32_read(&internal->dev_attached) == 0)
|
||||
allow_queuing = 0;
|
||||
|
||||
/* Wait until rx/tx_pkt_burst stops accessing vhost device */
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
vq = dev->data->rx_queues[i];
|
||||
if (vq == NULL)
|
||||
continue;
|
||||
rte_atomic32_set(&vq->allow_queuing, allow_queuing);
|
||||
while (rte_atomic32_read(&vq->while_queuing))
|
||||
rte_pause();
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
vq = dev->data->tx_queues[i];
|
||||
if (vq == NULL)
|
||||
continue;
|
||||
rte_atomic32_set(&vq->allow_queuing, allow_queuing);
|
||||
while (rte_atomic32_read(&vq->while_queuing))
|
||||
rte_pause();
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
new_device(int vid)
|
||||
{
|
||||
@ -545,18 +579,8 @@ new_device(int vid)
|
||||
|
||||
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
|
||||
|
||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
||||
vq = eth_dev->data->rx_queues[i];
|
||||
if (vq == NULL)
|
||||
continue;
|
||||
rte_atomic32_set(&vq->allow_queuing, 1);
|
||||
}
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
||||
vq = eth_dev->data->tx_queues[i];
|
||||
if (vq == NULL)
|
||||
continue;
|
||||
rte_atomic32_set(&vq->allow_queuing, 1);
|
||||
}
|
||||
rte_atomic32_set(&internal->dev_attached, 1);
|
||||
update_queuing_status(eth_dev);
|
||||
|
||||
RTE_LOG(INFO, PMD, "New connection established\n");
|
||||
|
||||
@ -569,6 +593,7 @@ static void
|
||||
destroy_device(int vid)
|
||||
{
|
||||
struct rte_eth_dev *eth_dev;
|
||||
struct pmd_internal *internal;
|
||||
struct vhost_queue *vq;
|
||||
struct internal_list *list;
|
||||
char ifname[PATH_MAX];
|
||||
@ -582,24 +607,10 @@ destroy_device(int vid)
|
||||
return;
|
||||
}
|
||||
eth_dev = list->eth_dev;
|
||||
internal = eth_dev->data->dev_private;
|
||||
|
||||
/* Wait until rx/tx_pkt_burst stops accessing vhost device */
|
||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
||||
vq = eth_dev->data->rx_queues[i];
|
||||
if (vq == NULL)
|
||||
continue;
|
||||
rte_atomic32_set(&vq->allow_queuing, 0);
|
||||
while (rte_atomic32_read(&vq->while_queuing))
|
||||
rte_pause();
|
||||
}
|
||||
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
||||
vq = eth_dev->data->tx_queues[i];
|
||||
if (vq == NULL)
|
||||
continue;
|
||||
rte_atomic32_set(&vq->allow_queuing, 0);
|
||||
while (rte_atomic32_read(&vq->while_queuing))
|
||||
rte_pause();
|
||||
}
|
||||
rte_atomic32_set(&internal->dev_attached, 0);
|
||||
update_queuing_status(eth_dev);
|
||||
|
||||
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
|
||||
|
||||
@ -773,14 +784,23 @@ vhost_driver_session_stop(void)
|
||||
}
|
||||
|
||||
static int
|
||||
eth_dev_start(struct rte_eth_dev *dev __rte_unused)
|
||||
eth_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct pmd_internal *internal = dev->data->dev_private;
|
||||
|
||||
rte_atomic32_set(&internal->started, 1);
|
||||
update_queuing_status(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
eth_dev_stop(struct rte_eth_dev *dev __rte_unused)
|
||||
eth_dev_stop(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct pmd_internal *internal = dev->data->dev_private;
|
||||
|
||||
rte_atomic32_set(&internal->started, 0);
|
||||
update_queuing_status(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
|
Loading…
Reference in New Issue
Block a user