vhost: fix retrieval of numa node in driver
After some testing, it was found that retrieving numa information about a vhost device via a call to get_mempolicy is more accurate when performed during the new_device callback versus the vring_state_changed callback, in particular upon initial boot of the VM. Performing this check during new_device is also potentially more efficient as this callback is only triggered once during device initialisation, compared with vring_state_changed which may be called multiple times depending on the number of queues assigned to the device. Reorganise the code to perform this check and assign the correct socket_id to the device during the new_device callback. Fixes: ee584e9710b9 ("vhost: add driver on top of the library") Signed-off-by: Ciara Loftus <ciara.loftus@intel.com> Acked-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
This commit is contained in:
parent
a7e88f1782
commit
0d9eb479e9
@ -229,6 +229,9 @@ new_device(struct virtio_net *dev)
|
|||||||
struct pmd_internal *internal;
|
struct pmd_internal *internal;
|
||||||
struct vhost_queue *vq;
|
struct vhost_queue *vq;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
#ifdef RTE_LIBRTE_VHOST_NUMA
|
||||||
|
int newnode, ret;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (dev == NULL) {
|
if (dev == NULL) {
|
||||||
RTE_LOG(INFO, PMD, "Invalid argument\n");
|
RTE_LOG(INFO, PMD, "Invalid argument\n");
|
||||||
@ -244,6 +247,17 @@ new_device(struct virtio_net *dev)
|
|||||||
eth_dev = list->eth_dev;
|
eth_dev = list->eth_dev;
|
||||||
internal = eth_dev->data->dev_private;
|
internal = eth_dev->data->dev_private;
|
||||||
|
|
||||||
|
#ifdef RTE_LIBRTE_VHOST_NUMA
|
||||||
|
ret = get_mempolicy(&newnode, NULL, 0, dev,
|
||||||
|
MPOL_F_NODE | MPOL_F_ADDR);
|
||||||
|
if (ret < 0) {
|
||||||
|
RTE_LOG(ERR, PMD, "Unknown numa node\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
eth_dev->data->numa_node = newnode;
|
||||||
|
#endif
|
||||||
|
|
||||||
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
||||||
vq = eth_dev->data->rx_queues[i];
|
vq = eth_dev->data->rx_queues[i];
|
||||||
if (vq == NULL)
|
if (vq == NULL)
|
||||||
@ -352,9 +366,6 @@ vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
|
|||||||
struct rte_vhost_vring_state *state;
|
struct rte_vhost_vring_state *state;
|
||||||
struct rte_eth_dev *eth_dev;
|
struct rte_eth_dev *eth_dev;
|
||||||
struct internal_list *list;
|
struct internal_list *list;
|
||||||
#ifdef RTE_LIBRTE_VHOST_NUMA
|
|
||||||
int newnode, ret;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (dev == NULL) {
|
if (dev == NULL) {
|
||||||
RTE_LOG(ERR, PMD, "Invalid argument\n");
|
RTE_LOG(ERR, PMD, "Invalid argument\n");
|
||||||
@ -370,17 +381,6 @@ vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
|
|||||||
eth_dev = list->eth_dev;
|
eth_dev = list->eth_dev;
|
||||||
/* won't be NULL */
|
/* won't be NULL */
|
||||||
state = vring_states[eth_dev->data->port_id];
|
state = vring_states[eth_dev->data->port_id];
|
||||||
|
|
||||||
#ifdef RTE_LIBRTE_VHOST_NUMA
|
|
||||||
ret = get_mempolicy(&newnode, NULL, 0, dev,
|
|
||||||
MPOL_F_NODE | MPOL_F_ADDR);
|
|
||||||
if (ret < 0) {
|
|
||||||
RTE_LOG(ERR, PMD, "Unknown numa node\n");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
eth_dev->data->numa_node = newnode;
|
|
||||||
#endif
|
|
||||||
rte_spinlock_lock(&state->lock);
|
rte_spinlock_lock(&state->lock);
|
||||||
state->cur[vring] = enable;
|
state->cur[vring] = enable;
|
||||||
state->max_vring = RTE_MAX(vring, state->max_vring);
|
state->max_vring = RTE_MAX(vring, state->max_vring);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user