net/mlx4: change device reference for secondary process

rte_eth_devices[] is not shared between primary and secondary process,
but a static array to each process. The reverse pointer of device
(priv->dev) becomes invalid if mlx4 supports secondary process.
Instead, priv has the pointer to shared data of the device,
  struct rte_eth_dev_data *dev_data;

Two macros are added,
  #define PORT_ID(priv) ((priv)->dev_data->port_id)
  #define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])

Cc: stable@dpdk.org

Suggested-by: Raslan Darawsheh <rasland@mellanox.com>
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
Yongseok Koh 2019-04-01 14:15:51 -07:00 committed by Ferruh Yigit
parent 2aac5b5d11
commit 099c2c5376
7 changed files with 64 additions and 56 deletions

View File

@ -753,11 +753,11 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* handled by rte_intr_rx_ctl().
*/
eth_dev->intr_handle = &priv->intr_handle;
priv->dev = eth_dev;
priv->dev_data = eth_dev->data;
eth_dev->dev_ops = &mlx4_dev_ops;
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
mlx4_dev_set_link_up(priv->dev);
mlx4_dev_set_link_up(eth_dev);
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);

View File

@ -79,7 +79,7 @@ LIST_HEAD(mlx4_mr_list, mlx4_mr);
struct mlx4_priv {
LIST_ENTRY(mlx4_priv) mem_event_cb;
/**< Called by memory event callback. */
struct rte_eth_dev *dev; /**< Ethernet device. */
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
struct ibv_pd *pd; /**< Protection Domain. */
@ -113,6 +113,9 @@ struct mlx4_priv {
/**< Configured MAC addresses. Unused entries are zeroed. */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
/* mlx4_ethdev.c */
int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);

View File

@ -773,7 +773,7 @@ mlx4_flow_prepare(struct mlx4_priv *priv,
if (flow->rss)
break;
queue = action->conf;
if (queue->index >= priv->dev->data->nb_rx_queues) {
if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) {
msg = "queue target index beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
@ -802,7 +802,7 @@ mlx4_flow_prepare(struct mlx4_priv *priv,
/* Sanity checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
priv->dev->data->nb_rx_queues)
ETH_DEV(priv)->data->nb_rx_queues)
break;
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
@ -1072,8 +1072,8 @@ mlx4_flow_toggle(struct mlx4_priv *priv,
/* Stop at the first nonexistent target queue. */
for (i = 0; i != rss->queues; ++i)
if (rss->queue_id[i] >=
priv->dev->data->nb_rx_queues ||
!priv->dev->data->rx_queues[rss->queue_id[i]]) {
ETH_DEV(priv)->data->nb_rx_queues ||
!ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) {
missing = 1;
break;
}
@ -1258,7 +1258,7 @@ static uint16_t
mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
{
while (vlan < 4096) {
if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] &
(UINT64_C(1) << (vlan % 64)))
return vlan;
++vlan;
@ -1335,7 +1335,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
* get RSS by default.
*/
uint32_t queues =
rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
@ -1357,9 +1357,9 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
};
struct ether_addr *rule_mac = &eth_spec.dst;
rte_be16_t *rule_vlan =
(priv->dev->data->dev_conf.rxmode.offloads &
(ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) &&
!priv->dev->data->promiscuous ?
!ETH_DEV(priv)->data->promiscuous ?
&vlan_spec.tci :
NULL;
uint16_t vlan = 0;
@ -1439,7 +1439,7 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
memcpy(rule_mac, mac, sizeof(*mac));
flow = mlx4_flow_create(priv->dev, &attr, pattern,
flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
@ -1455,15 +1455,16 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
if (ETH_DEV(priv)->data->promiscuous ||
ETH_DEV(priv)->data->all_multicast) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
if (priv->dev->data->promiscuous) {
if (ETH_DEV(priv)->data->promiscuous) {
if (flow->promisc)
break;
} else {
assert(priv->dev->data->all_multicast);
assert(ETH_DEV(priv)->data->all_multicast);
if (flow->allmulti)
break;
}
@ -1477,16 +1478,16 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
if (priv->dev->data->promiscuous) {
if (ETH_DEV(priv)->data->promiscuous) {
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
assert(priv->dev->data->all_multicast);
assert(ETH_DEV(priv)->data->all_multicast);
pattern[1].spec = &eth_allmulti;
pattern[1].mask = &eth_allmulti;
}
pattern[2] = pattern[3];
flow = mlx4_flow_create(priv->dev, &attr, pattern,
flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
@ -1503,7 +1504,8 @@ mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
struct rte_flow *next = LIST_NEXT(flow, next);
if (!flow->select)
claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
error));
else
flow->select = 0;
flow = next;
@ -1541,7 +1543,8 @@ mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
error));
} else {
/* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
@ -1574,7 +1577,7 @@ mlx4_flow_clean(struct mlx4_priv *priv)
struct rte_flow *flow;
while ((flow = LIST_FIRST(&priv->flows)))
mlx4_flow_destroy(priv->dev, flow, NULL);
mlx4_flow_destroy(ETH_DEV(priv), flow, NULL);
assert(LIST_EMPTY(&priv->rss));
}

View File

@ -65,7 +65,7 @@ static int
mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
{
unsigned int i;
unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int count = 0;
struct rte_intr_handle *intr_handle = &priv->intr_handle;
@ -79,7 +79,7 @@ mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
return -rte_errno;
}
for (i = 0; i != n; ++i) {
struct rxq *rxq = priv->dev->data->rx_queues[i];
struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
if (!rxq || !rxq->channel) {
@ -120,12 +120,12 @@ static void
mlx4_link_status_alarm(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
&priv->dev->data->dev_conf.intr_conf;
&ETH_DEV(priv)->data->dev_conf.intr_conf;
assert(priv->intr_alarm == 1);
priv->intr_alarm = 0;
if (intr_conf->lsc && !mlx4_link_status_check(priv))
_rte_eth_dev_callback_process(priv->dev,
_rte_eth_dev_callback_process(ETH_DEV(priv),
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
@ -145,8 +145,8 @@ mlx4_link_status_alarm(struct mlx4_priv *priv)
static int
mlx4_link_status_check(struct mlx4_priv *priv)
{
struct rte_eth_link *link = &priv->dev->data->dev_link;
int ret = mlx4_link_update(priv->dev, 0);
struct rte_eth_link *link = &ETH_DEV(priv)->data->dev_link;
int ret = mlx4_link_update(ETH_DEV(priv), 0);
if (ret)
return ret;
@ -185,7 +185,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
uint32_t caught[RTE_DIM(type)] = { 0 };
struct ibv_async_event event;
const struct rte_intr_conf *const intr_conf =
&priv->dev->data->dev_conf.intr_conf;
&ETH_DEV(priv)->data->dev_conf.intr_conf;
unsigned int i;
/* Read all message and acknowledge them. */
@ -208,7 +208,7 @@ mlx4_interrupt_handler(struct mlx4_priv *priv)
}
for (i = 0; i != RTE_DIM(caught); ++i)
if (caught[i])
_rte_eth_dev_callback_process(priv->dev, type[i],
_rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
NULL);
}
@ -282,7 +282,7 @@ int
mlx4_intr_install(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
&priv->dev->data->dev_conf.intr_conf;
&ETH_DEV(priv)->data->dev_conf.intr_conf;
int rc;
mlx4_intr_uninstall(priv);
@ -381,7 +381,7 @@ int
mlx4_rxq_intr_enable(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
&priv->dev->data->dev_conf.intr_conf;
&ETH_DEV(priv)->data->dev_conf.intr_conf;
if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
goto error;

View File

@ -896,7 +896,7 @@ mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
/* Iterate all the existing mlx4 devices. */
LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
@ -1028,7 +1028,7 @@ mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
@ -1050,7 +1050,7 @@ mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
@ -1225,7 +1225,7 @@ mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
struct mlx4_priv *priv = txq->priv;
mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx4_tx_addr2mr_bh(txq, addr);
}

View File

@ -176,6 +176,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
struct ibv_wq *ind_tbl[rss->queues];
struct mlx4_priv *priv = rss->priv;
struct rte_eth_dev *dev = ETH_DEV(priv);
const char *msg;
unsigned int i = 0;
int ret;
@ -189,8 +190,8 @@ mlx4_rss_attach(struct mlx4_rss *rss)
uint16_t id = rss->queue_id[i];
struct rxq *rxq = NULL;
if (id < priv->dev->data->nb_rx_queues)
rxq = priv->dev->data->rx_queues[id];
if (id < dev->data->nb_rx_queues)
rxq = dev->data->rx_queues[id];
if (!rxq) {
ret = EINVAL;
msg = "RSS target queue is not configured";
@ -269,7 +270,7 @@ mlx4_rss_attach(struct mlx4_rss *rss)
rss->ind = NULL;
}
while (i--)
mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
ERROR("mlx4: %s", msg);
--rss->usecnt;
rte_errno = ret;
@ -291,6 +292,7 @@ void
mlx4_rss_detach(struct mlx4_rss *rss)
{
struct mlx4_priv *priv = rss->priv;
struct rte_eth_dev *dev = ETH_DEV(priv);
unsigned int i;
assert(rss->refcnt);
@ -303,7 +305,7 @@ mlx4_rss_detach(struct mlx4_rss *rss)
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
for (i = 0; i != rss->queues; ++i)
mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
}
/**
@ -329,7 +331,7 @@ mlx4_rss_detach(struct mlx4_rss *rss)
int
mlx4_rss_init(struct mlx4_priv *priv)
{
struct rte_eth_dev *dev = priv->dev;
struct rte_eth_dev *dev = ETH_DEV(priv);
uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
uint32_t wq_num_prev = 0;
const char *msg;
@ -338,7 +340,7 @@ mlx4_rss_init(struct mlx4_priv *priv)
if (priv->rss_init)
return 0;
if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
ERROR("RSS does not support more than %d queues",
priv->hw_rss_max_qps);
rte_errno = EINVAL;
@ -356,8 +358,8 @@ mlx4_rss_init(struct mlx4_priv *priv)
rte_errno = ret;
return -ret;
}
for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
struct rxq *rxq = priv->dev->data->rx_queues[i];
for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
struct ibv_cq *cq;
struct ibv_wq *wq;
uint32_t wq_num;
@ -432,7 +434,7 @@ mlx4_rss_init(struct mlx4_priv *priv)
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
i, msg, strerror(ret));
while (i--) {
struct rxq *rxq = priv->dev->data->rx_queues[i];
struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq)
mlx4_rxq_detach(rxq);
@ -457,8 +459,8 @@ mlx4_rss_deinit(struct mlx4_priv *priv)
if (!priv->rss_init)
return;
for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
struct rxq *rxq = priv->dev->data->rx_queues[i];
for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq) {
assert(rxq->usecnt == 1);
@ -494,7 +496,7 @@ mlx4_rxq_attach(struct rxq *rxq)
}
struct mlx4_priv *priv = rxq->priv;
struct rte_eth_dev *dev = priv->dev;
struct rte_eth_dev *dev = ETH_DEV(priv);
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
@ -561,7 +563,7 @@ mlx4_rxq_attach(struct rxq *rxq)
}
/* Pre-register Rx mempool. */
DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
priv->dev->data->port_id, rxq->stats.idx,
ETH_DEV(priv)->data->port_id, rxq->stats.idx,
rxq->mp->name, rxq->mp->nb_mem_chunks);
mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
@ -917,11 +919,11 @@ mlx4_rx_queue_release(void *dpdk_rxq)
if (rxq == NULL)
return;
priv = rxq->priv;
for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
if (priv->dev->data->rx_queues[i] == rxq) {
for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
DEBUG("%p: removing Rx queue %p from list",
(void *)priv->dev, (void *)rxq);
priv->dev->data->rx_queues[i] = NULL;
(void *)ETH_DEV(priv), (void *)rxq);
ETH_DEV(priv)->data->rx_queues[i] = NULL;
break;
}
assert(!rxq->cq);

View File

@ -357,11 +357,11 @@ mlx4_tx_queue_release(void *dpdk_txq)
if (txq == NULL)
return;
priv = txq->priv;
for (i = 0; i != priv->dev->data->nb_tx_queues; ++i)
if (priv->dev->data->tx_queues[i] == txq) {
for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
DEBUG("%p: removing Tx queue %p from list",
(void *)priv->dev, (void *)txq);
priv->dev->data->tx_queues[i] = NULL;
(void *)ETH_DEV(priv), (void *)txq);
ETH_DEV(priv)->data->tx_queues[i] = NULL;
break;
}
mlx4_txq_free_elts(txq);