net/mlx5: fix MTU update
Changing the MTU is not related to changing the number of segments,
activating or not the multi-segment support should be handled by the
application.
Fixes: 9964b965ad
("net/mlx5: re-add Rx scatter support")
Cc: stable@dpdk.org
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
This commit is contained in:
parent
5f4677c6ad
commit
a0edafe409
@ -924,12 +924,6 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
|
||||
/**
|
||||
* DPDK callback to change the MTU.
|
||||
*
|
||||
* Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
|
||||
* received). Use this as a hint to enable/disable scattered packets support
|
||||
* and improve performance when not needed.
|
||||
* Since failure is not an option, reconfiguring queues on the fly is not
|
||||
* recommended.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param in_mtu
|
||||
@ -942,122 +936,33 @@ int
|
||||
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
struct priv *priv = dev->data->dev_private;
|
||||
uint16_t kern_mtu;
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
unsigned int max_frame_len;
|
||||
int rehash;
|
||||
int restart = priv->started;
|
||||
|
||||
if (mlx5_is_secondary())
|
||||
return -E_RTE_SECONDARY;
|
||||
|
||||
priv_lock(priv);
|
||||
/* Set kernel interface MTU first. */
|
||||
if (priv_set_mtu(priv, mtu)) {
|
||||
ret = errno;
|
||||
WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
|
||||
strerror(ret));
|
||||
ret = priv_get_mtu(priv, &kern_mtu);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else
|
||||
/* Set kernel interface MTU first. */
|
||||
ret = priv_set_mtu(priv, mtu);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = priv_get_mtu(priv, &kern_mtu);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (kern_mtu == mtu) {
|
||||
priv->mtu = mtu;
|
||||
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
|
||||
/* Temporarily replace RX handler with a fake one, assuming it has not
|
||||
* been copied elsewhere. */
|
||||
dev->rx_pkt_burst = removed_rx_burst;
|
||||
/* Make sure everyone has left dev->rx_pkt_burst() and uses
|
||||
* removed_rx_burst() instead. */
|
||||
rte_wmb();
|
||||
usleep(1000);
|
||||
/* MTU does not include header and CRC. */
|
||||
max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
|
||||
/* Check if at least one queue is going to need a SGE update. */
|
||||
for (i = 0; i != priv->rxqs_n; ++i) {
|
||||
struct rxq *rxq = (*priv->rxqs)[i];
|
||||
unsigned int mb_len;
|
||||
unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
|
||||
unsigned int sges_n;
|
||||
|
||||
if (rxq == NULL)
|
||||
continue;
|
||||
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
|
||||
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
/*
|
||||
* Determine the number of SGEs needed for a full packet
|
||||
* and round it to the next power of two.
|
||||
*/
|
||||
sges_n = log2above((size / mb_len) + !!(size % mb_len));
|
||||
if (sges_n != rxq->sges_n)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* If all queues have the right number of SGEs, a simple rehash
|
||||
* of their buffers is enough, otherwise SGE information can only
|
||||
* be updated in a queue by recreating it. All resources that depend
|
||||
* on queues (flows, indirection tables) must be recreated as well in
|
||||
* that case.
|
||||
*/
|
||||
rehash = (i == priv->rxqs_n);
|
||||
if (!rehash) {
|
||||
/* Clean up everything as with mlx5_dev_stop(). */
|
||||
priv_special_flow_disable_all(priv);
|
||||
priv_mac_addrs_disable(priv);
|
||||
priv_destroy_hash_rxqs(priv);
|
||||
priv_fdir_disable(priv);
|
||||
priv_dev_interrupt_handler_uninstall(priv, dev);
|
||||
}
|
||||
recover:
|
||||
/* Reconfigure each RX queue. */
|
||||
for (i = 0; (i != priv->rxqs_n); ++i) {
|
||||
struct rxq *rxq = (*priv->rxqs)[i];
|
||||
struct rxq_ctrl *rxq_ctrl =
|
||||
container_of(rxq, struct rxq_ctrl, rxq);
|
||||
unsigned int mb_len;
|
||||
unsigned int tmp;
|
||||
|
||||
if (rxq == NULL)
|
||||
continue;
|
||||
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
|
||||
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
|
||||
/* Provide new values to rxq_setup(). */
|
||||
dev->data->dev_conf.rxmode.jumbo_frame =
|
||||
(max_frame_len > ETHER_MAX_LEN);
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
|
||||
if (rehash)
|
||||
ret = rxq_rehash(dev, rxq_ctrl);
|
||||
else
|
||||
ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
|
||||
rxq_ctrl->socket, NULL, rxq->mp);
|
||||
if (!ret)
|
||||
continue;
|
||||
/* Attempt to roll back in case of error. */
|
||||
tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
|
||||
if (max_frame_len != tmp) {
|
||||
max_frame_len = tmp;
|
||||
goto recover;
|
||||
}
|
||||
/* Double fault, disable RX. */
|
||||
break;
|
||||
}
|
||||
/* Mimic mlx5_dev_start(). */
|
||||
if (ret) {
|
||||
ERROR("unable to reconfigure RX queues, RX disabled");
|
||||
} else if (restart &&
|
||||
!rehash &&
|
||||
!priv_create_hash_rxqs(priv) &&
|
||||
!priv_rehash_flows(priv)) {
|
||||
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
|
||||
priv_fdir_enable(priv);
|
||||
priv_dev_interrupt_handler_install(priv, dev);
|
||||
}
|
||||
priv->mtu = mtu;
|
||||
/* Burst functions can now be called again. */
|
||||
rte_wmb();
|
||||
/*
|
||||
* Use a safe RX burst function in case of error, otherwise select RX
|
||||
* burst function again.
|
||||
*/
|
||||
if (!ret)
|
||||
priv_select_rx_function(priv);
|
||||
priv_unlock(priv);
|
||||
return 0;
|
||||
out:
|
||||
ret = errno;
|
||||
WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
|
||||
strerror(ret));
|
||||
priv_unlock(priv);
|
||||
assert(ret >= 0);
|
||||
return -ret;
|
||||
|
@ -796,73 +796,6 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
|
||||
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
|
||||
}
|
||||
|
||||
/**
|
||||
* Reconfigure RX queue buffers.
|
||||
*
|
||||
* rxq_rehash() does not allocate mbufs, which, if not done from the right
|
||||
* thread (such as a control thread), may corrupt the pool.
|
||||
* In case of failure, the queue is left untouched.
|
||||
*
|
||||
* @param dev
|
||||
* Pointer to Ethernet device structure.
|
||||
* @param rxq_ctrl
|
||||
* RX queue pointer.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
int
|
||||
rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
|
||||
{
|
||||
unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
|
||||
unsigned int i;
|
||||
struct ibv_exp_wq_attr mod;
|
||||
int err;
|
||||
|
||||
DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
|
||||
(void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
|
||||
assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
|
||||
/* From now on, any failure will render the queue unusable.
|
||||
* Reinitialize WQ. */
|
||||
mod = (struct ibv_exp_wq_attr){
|
||||
.attr_mask = IBV_EXP_WQ_ATTR_STATE,
|
||||
.wq_state = IBV_EXP_WQS_RESET,
|
||||
};
|
||||
err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
|
||||
if (err) {
|
||||
ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
|
||||
assert(err > 0);
|
||||
return err;
|
||||
}
|
||||
/* Snatch mbufs from original queue. */
|
||||
claim_zero(rxq_trim_elts(&rxq_ctrl->rxq));
|
||||
claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
|
||||
for (i = 0; i != elts_n; ++i) {
|
||||
struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
|
||||
|
||||
assert(rte_mbuf_refcnt_read(buf) == 2);
|
||||
rte_pktmbuf_free_seg(buf);
|
||||
}
|
||||
/* Change queue state to ready. */
|
||||
mod = (struct ibv_exp_wq_attr){
|
||||
.attr_mask = IBV_EXP_WQ_ATTR_STATE,
|
||||
.wq_state = IBV_EXP_WQS_RDY,
|
||||
};
|
||||
err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
|
||||
if (err) {
|
||||
ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
|
||||
(void *)dev, strerror(err));
|
||||
goto error;
|
||||
}
|
||||
/* Update doorbell counter. */
|
||||
rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
|
||||
rte_wmb();
|
||||
*rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
|
||||
error:
|
||||
assert(err >= 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize RX queue.
|
||||
*
|
||||
@ -927,7 +860,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
|
||||
* @return
|
||||
* 0 on success, errno value on failure.
|
||||
*/
|
||||
int
|
||||
static int
|
||||
rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
|
||||
uint16_t desc, unsigned int socket,
|
||||
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
|
||||
|
@ -307,10 +307,6 @@ void priv_destroy_hash_rxqs(struct priv *);
|
||||
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
|
||||
int priv_rehash_flows(struct priv *);
|
||||
void rxq_cleanup(struct rxq_ctrl *);
|
||||
int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
|
||||
int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
|
||||
unsigned int, const struct rte_eth_rxconf *,
|
||||
struct rte_mempool *);
|
||||
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
|
||||
const struct rte_eth_rxconf *, struct rte_mempool *);
|
||||
void mlx5_rx_queue_release(void *);
|
||||
|
Loading…
Reference in New Issue
Block a user