net/mlx5: change device reference for secondary process
rte_eth_devices[] is not shared between primary and secondary process, but a static array to each process. The reverse pointer of device (priv->dev) is invalid. Instead, priv has the pointer to shared data of the device, struct rte_eth_dev_data *dev_data; Two macros are added, #define PORT_ID(priv) ((priv)->dev_data->port_id) #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
parent
ce07b1514d
commit
df428ceef4
@ -954,7 +954,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||
goto port_error;
|
||||
}
|
||||
eth_dev->data->dev_private = priv;
|
||||
priv->dev = eth_dev;
|
||||
priv->dev_data = eth_dev->data;
|
||||
eth_dev->data->mac_addrs = priv->mac;
|
||||
eth_dev->device = &pci_dev->device;
|
||||
rte_eth_copy_pci_info(eth_dev, pci_dev);
|
||||
|
@ -121,7 +121,7 @@ struct mlx5_verbs_alloc_ctx {
|
||||
};
|
||||
|
||||
struct priv {
|
||||
struct rte_eth_dev *dev; /* Ethernet device of master process. */
|
||||
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
|
||||
struct ibv_context *ctx; /* Verbs context. */
|
||||
struct ibv_device_attr_ex device_attr; /* Device properties. */
|
||||
struct ibv_pd *pd; /* Protection Domain. */
|
||||
@ -168,6 +168,9 @@ struct priv {
|
||||
uint32_t nl_sn; /* Netlink message sequence number. */
|
||||
};
|
||||
|
||||
#define PORT_ID(priv) ((priv)->dev_data->port_id)
|
||||
#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
|
||||
|
||||
/* mlx5.c */
|
||||
|
||||
int mlx5_getenv_int(const char *);
|
||||
|
@ -2034,7 +2034,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
|
||||
parser->queue[HASH_RXQ_ETH].ibv_attr;
|
||||
if (parser->count)
|
||||
flow->cs = parser->cs;
|
||||
if (!priv->dev->data->dev_started)
|
||||
if (!dev->data->dev_started)
|
||||
return 0;
|
||||
parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
|
||||
flow->frxq[HASH_RXQ_ETH].ibv_flow =
|
||||
@ -2086,7 +2086,6 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i != hash_rxq_init_n; ++i) {
|
||||
@ -2095,7 +2094,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
|
||||
flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
|
||||
parser->queue[i].ibv_attr = NULL;
|
||||
flow->frxq[i].hash_fields = parser->queue[i].hash_fields;
|
||||
if (!priv->dev->data->dev_started)
|
||||
if (!dev->data->dev_started)
|
||||
continue;
|
||||
flow->frxq[i].hrxq =
|
||||
mlx5_hrxq_get(dev,
|
||||
@ -2241,7 +2240,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
struct rte_flow *flow,
|
||||
struct rte_flow_error *error)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct priv *priv __rte_unused = dev->data->dev_private;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
unsigned int flows_n = 0;
|
||||
@ -2254,7 +2253,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
|
||||
goto error;
|
||||
if (parser->count)
|
||||
flow->cs = parser->cs;
|
||||
if (!priv->dev->data->dev_started)
|
||||
if (!dev->data->dev_started)
|
||||
return 0;
|
||||
for (i = 0; i != hash_rxq_init_n; ++i) {
|
||||
if (!flow->frxq[i].hrxq)
|
||||
@ -3097,9 +3096,9 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
|
||||
}
|
||||
priv->isolated = !!enable;
|
||||
if (enable)
|
||||
priv->dev->dev_ops = &mlx5_dev_ops_isolate;
|
||||
dev->dev_ops = &mlx5_dev_ops_isolate;
|
||||
else
|
||||
priv->dev->dev_ops = &mlx5_dev_ops;
|
||||
dev->dev_ops = &mlx5_dev_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3487,11 +3486,10 @@ mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
|
||||
static void
|
||||
mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rte_eth_fdir_masks *mask =
|
||||
&priv->dev->data->dev_conf.fdir_conf.mask;
|
||||
&dev->data->dev_conf.fdir_conf.mask;
|
||||
|
||||
fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
|
||||
fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
|
||||
fdir_info->guarant_spc = 0;
|
||||
rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
|
||||
fdir_info->max_flexpayload = 0;
|
||||
@ -3519,9 +3517,8 @@ static int
|
||||
mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
|
||||
void *arg)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
enum rte_fdir_mode fdir_mode =
|
||||
priv->dev->data->dev_conf.fdir_conf.mode;
|
||||
dev->data->dev_conf.fdir_conf.mode;
|
||||
|
||||
if (filter_op == RTE_ETH_FILTER_NOP)
|
||||
return 0;
|
||||
|
@ -105,8 +105,8 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
|
||||
/* Add a new entry, register MR first. */
|
||||
DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" (%p)",
|
||||
txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
|
||||
dev = txq_ctrl->priv->dev;
|
||||
PORT_ID(txq_ctrl->priv), mp->name, (void *)mp);
|
||||
dev = ETH_DEV(txq_ctrl->priv);
|
||||
mr = mlx5_mr_get(dev, mp);
|
||||
if (mr == NULL) {
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
|
||||
@ -114,8 +114,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
"port %u using unregistered mempool 0x%p(%s)"
|
||||
" in secondary process, please create mempool"
|
||||
" before rte_eth_dev_start()",
|
||||
txq_ctrl->priv->dev->data->port_id,
|
||||
(void *)mp, mp->name);
|
||||
PORT_ID(txq_ctrl->priv), (void *)mp, mp->name);
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
rte_errno = ENOTSUP;
|
||||
return NULL;
|
||||
@ -126,7 +125,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u unable to configure memory region,"
|
||||
" ibv_reg_mr() failed.",
|
||||
txq_ctrl->priv->dev->data->port_id);
|
||||
PORT_ID(txq_ctrl->priv));
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
return NULL;
|
||||
}
|
||||
@ -135,7 +134,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u memory region <-> memory pool table full, "
|
||||
" dropping oldest entry",
|
||||
txq_ctrl->priv->dev->data->port_id);
|
||||
PORT_ID(txq_ctrl->priv));
|
||||
--idx;
|
||||
mlx5_mr_release(txq->mp2mr[0]);
|
||||
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
|
||||
@ -146,7 +145,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u new memory region lkey for MP \"%s\" (%p): 0x%08"
|
||||
PRIu32,
|
||||
txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
|
||||
PORT_ID(txq_ctrl->priv), mp->name, (void *)mp,
|
||||
txq_ctrl->txq.mp2mr[idx]->lkey);
|
||||
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
|
||||
return mr;
|
||||
@ -207,15 +206,15 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
|
||||
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
|
||||
data.ret == -1)
|
||||
return;
|
||||
mr = mlx5_mr_get(priv->dev, mp);
|
||||
mr = mlx5_mr_get(ETH_DEV(priv), mp);
|
||||
if (mr) {
|
||||
mlx5_mr_release(mr);
|
||||
return;
|
||||
}
|
||||
mr = mlx5_mr_new(priv->dev, mp);
|
||||
mr = mlx5_mr_new(ETH_DEV(priv), mp);
|
||||
if (!mr)
|
||||
DRV_LOG(ERR, "port %u cannot create memory region: %s",
|
||||
priv->dev->data->port_id, strerror(rte_errno));
|
||||
PORT_ID(priv), strerror(rte_errno));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -78,7 +78,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
|
||||
if (buf == NULL) {
|
||||
DRV_LOG(ERR, "port %u empty mbuf pool",
|
||||
rxq_ctrl->priv->dev->data->port_id);
|
||||
PORT_ID(rxq_ctrl->priv));
|
||||
rte_errno = ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
@ -122,7 +122,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
DRV_LOG(DEBUG,
|
||||
"port %u Rx queue %u allocated and configured %u segments"
|
||||
" (max %u packets)",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
|
||||
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
|
||||
elts_n / (1 << rxq_ctrl->rxq.sges_n));
|
||||
return 0;
|
||||
error:
|
||||
@ -134,7 +134,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
(*rxq_ctrl->rxq.elts)[i] = NULL;
|
||||
}
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
|
||||
rte_errno = err; /* Restore rte_errno. */
|
||||
return -rte_errno;
|
||||
}
|
||||
@ -155,7 +155,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
uint16_t i;
|
||||
|
||||
DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
|
||||
if (rxq->elts == NULL)
|
||||
return;
|
||||
/**
|
||||
@ -186,7 +186,7 @@ void
|
||||
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
|
||||
{
|
||||
DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
|
||||
rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
|
||||
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
|
||||
if (rxq_ctrl->ibv)
|
||||
mlx5_rxq_ibv_release(rxq_ctrl->ibv);
|
||||
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
|
||||
@ -354,11 +354,11 @@ mlx5_rx_queue_release(void *dpdk_rxq)
|
||||
return;
|
||||
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
|
||||
priv = rxq_ctrl->priv;
|
||||
if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
|
||||
if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
|
||||
rte_panic("port %u Rx queue %u is still used by a flow and"
|
||||
" cannot be removed\n", priv->dev->data->port_id,
|
||||
rxq_ctrl->idx);
|
||||
mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
|
||||
" cannot be removed\n",
|
||||
PORT_ID(priv), rxq_ctrl->idx);
|
||||
mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -378,9 +378,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
|
||||
unsigned int rxqs_n = priv->rxqs_n;
|
||||
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
|
||||
unsigned int count = 0;
|
||||
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
|
||||
struct rte_intr_handle *intr_handle = dev->intr_handle;
|
||||
|
||||
if (!priv->dev->data->dev_conf.intr_conf.rxq)
|
||||
if (!dev->data->dev_conf.intr_conf.rxq)
|
||||
return 0;
|
||||
mlx5_rx_intr_vec_disable(dev);
|
||||
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
|
||||
@ -452,12 +452,12 @@ void
|
||||
mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
|
||||
struct rte_intr_handle *intr_handle = dev->intr_handle;
|
||||
unsigned int i;
|
||||
unsigned int rxqs_n = priv->rxqs_n;
|
||||
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
|
||||
|
||||
if (!priv->dev->data->dev_conf.intr_conf.rxq)
|
||||
if (!dev->data->dev_conf.intr_conf.rxq)
|
||||
return;
|
||||
if (!intr_handle->intr_vec)
|
||||
goto free;
|
||||
@ -897,7 +897,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
|
||||
if (!ret)
|
||||
rxq_ibv->mr = NULL;
|
||||
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
|
||||
rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
|
||||
PORT_ID(rxq_ibv->rxq_ctrl->priv),
|
||||
rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
|
||||
rxq_free_elts(rxq_ibv->rxq_ctrl);
|
||||
@ -990,7 +990,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
|
||||
return NULL;
|
||||
}
|
||||
tmpl->socket = socket;
|
||||
if (priv->dev->data->dev_conf.intr_conf.rxq)
|
||||
if (dev->data->dev_conf.intr_conf.rxq)
|
||||
tmpl->irq = 1;
|
||||
/* Enable scattered packets support for this queue if necessary. */
|
||||
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
@ -1328,8 +1328,8 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
|
||||
unsigned int i;
|
||||
|
||||
DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
|
||||
((struct priv *)dev->data->dev_private)->port,
|
||||
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
|
||||
dev->data->port_id, (void *)ind_tbl,
|
||||
rte_atomic32_read(&ind_tbl->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
|
||||
claim_zero(mlx5_glue->destroy_rwq_ind_table
|
||||
(ind_tbl->ind_table));
|
||||
@ -1589,8 +1589,8 @@ int
|
||||
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
|
||||
{
|
||||
DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
|
||||
((struct priv *)dev->data->dev_private)->port,
|
||||
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
||||
dev->data->port_id, (void *)hrxq,
|
||||
rte_atomic32_read(&hrxq->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
|
||||
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
|
||||
DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:"
|
||||
|
@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
for (i = 0; (i != elts_n); ++i)
|
||||
(*txq_ctrl->txq.elts)[i] = NULL;
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
|
||||
txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
|
||||
PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
|
||||
txq_ctrl->txq.elts_head = 0;
|
||||
txq_ctrl->txq.elts_tail = 0;
|
||||
txq_ctrl->txq.elts_comp = 0;
|
||||
@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
|
||||
|
||||
DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
|
||||
txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
|
||||
PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
|
||||
txq_ctrl->txq.elts_head = 0;
|
||||
txq_ctrl->txq.elts_tail = 0;
|
||||
txq_ctrl->txq.elts_comp = 0;
|
||||
@ -255,9 +255,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
|
||||
priv = txq_ctrl->priv;
|
||||
for (i = 0; (i != priv->txqs_n); ++i)
|
||||
if ((*priv->txqs)[i] == txq) {
|
||||
mlx5_txq_release(priv->dev, i);
|
||||
mlx5_txq_release(ETH_DEV(priv), i);
|
||||
DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
|
||||
priv->dev->data->port_id, txq_ctrl->idx);
|
||||
PORT_ID(priv), txq_ctrl->idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -618,7 +618,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
|
||||
{
|
||||
assert(txq_ibv);
|
||||
DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
|
||||
txq_ibv->txq_ctrl->priv->dev->data->port_id,
|
||||
PORT_ID(txq_ibv->txq_ctrl->priv),
|
||||
txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
|
||||
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
|
||||
claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
|
||||
@ -685,7 +685,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
unsigned int txqs_inline;
|
||||
unsigned int inline_max_packet_sz;
|
||||
eth_tx_burst_t tx_pkt_burst =
|
||||
mlx5_select_tx_function(txq_ctrl->priv->dev);
|
||||
mlx5_select_tx_function(ETH_DEV(priv));
|
||||
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
|
||||
int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
|
||||
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
|
||||
@ -747,8 +747,7 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
|
||||
DRV_LOG(WARNING,
|
||||
"port %u txq inline is too large (%d) setting"
|
||||
" it to the maximum possible: %d\n",
|
||||
priv->dev->data->port_id, txq_inline,
|
||||
max_inline);
|
||||
PORT_ID(priv), txq_inline, max_inline);
|
||||
txq_ctrl->txq.max_inline = max_inline /
|
||||
RTE_CACHE_LINE_SIZE;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user