net/mlx: retrieve mbuf size through proper function

No need to allocate a mbuf for that.

Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Adrien Mazarguil 2016-06-08 11:43:27 +02:00 committed by Bruce Richardson
parent f3b492d7b4
commit 78d9c95d91
5 changed files with 22 additions and 34 deletions

View File

@ -196,7 +196,6 @@ struct rxq {
unsigned int sp:1; /* Use scattered RX elements. */
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
uint32_t mb_len; /* Length of a mp-issued mbuf. */
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
@ -3159,7 +3158,6 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rep->ol_flags = -1;
#endif
assert(rep->buf_len == seg->buf_len);
assert(rep->buf_len == rxq->mb_len);
/* Reconfigure sge to use rep instead of seg. */
assert(sge->lkey == rxq->mr->lkey);
sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
@ -3580,6 +3578,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
unsigned int i, k;
struct ibv_exp_qp_attr mod;
struct ibv_recv_wr *bad_wr;
unsigned int mb_len;
int err;
int parent = (rxq == &priv->rxq_parent);
@ -3588,6 +3587,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
(void *)dev, (void *)rxq);
return EINVAL;
}
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
/* Number of descriptors and mbufs currently allocated. */
desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1));
@ -3602,9 +3602,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
rxq->csum_l2tun = tmpl.csum_l2tun;
}
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
(tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
(mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
desc_n /= MLX4_PMD_SGE_WR_N;
} else
@ -3795,7 +3796,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
} attr;
enum ibv_exp_query_intf_status status;
struct ibv_recv_wr *bad_wr;
struct rte_mbuf *buf;
unsigned int mb_len;
int ret = 0;
int parent = (rxq == &priv->rxq_parent);
@ -3811,31 +3812,22 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
desc = 1;
goto skip_mr;
}
mb_len = rte_pktmbuf_data_room_size(mp);
if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) {
ERROR("%p: invalid number of RX descriptors (must be a"
" multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N);
return EINVAL;
}
/* Get mbuf length. */
buf = rte_pktmbuf_alloc(mp);
if (buf == NULL) {
ERROR("%p: unable to allocate mbuf", (void *)dev);
return ENOMEM;
}
tmpl.mb_len = buf->buf_len;
assert((rte_pktmbuf_headroom(buf) +
rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
rte_pktmbuf_free(buf);
/* Toggle RX checksum offload if hardware supports it. */
if (priv->hw_csum)
tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
if (priv->hw_csum_l2tun)
tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
(tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
(mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
desc /= MLX4_PMD_SGE_WR_N;
}
@ -4872,6 +4864,7 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* Reconfigure each RX queue. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
unsigned int mb_len;
unsigned int max_frame_len;
int sp;
@ -4881,7 +4874,9 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* toggle scattered support (sp) if necessary. */
max_frame_len = (priv->mtu + ETHER_HDR_LEN +
(ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM));
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
dev->data->dev_conf.rxmode.jumbo_frame = sp;
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;

View File

@ -717,6 +717,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* Reconfigure each RX queue. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
unsigned int mb_len;
unsigned int max_frame_len;
int sp;
@ -726,7 +727,9 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* toggle scattered support (sp) if necessary. */
max_frame_len = (priv->mtu + ETHER_HDR_LEN +
(ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM));
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
dev->data->dev_conf.rxmode.jumbo_frame = sp;
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;

View File

@ -973,6 +973,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
struct rte_mbuf **pool;
unsigned int i, k;
struct ibv_exp_wq_attr mod;
unsigned int mb_len = rte_pktmbuf_data_room_size(rxq->mp);
int err;
DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
@ -989,9 +990,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
rxq->csum_l2tun = tmpl.csum_l2tun;
}
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
(tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
(mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
desc_n /= MLX5_PMD_SGE_WR_N;
} else
@ -1156,7 +1158,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
struct ibv_exp_wq_init_attr wq;
} attr;
enum ibv_exp_query_intf_status status;
struct rte_mbuf *buf;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
int ret = 0;
unsigned int i;
unsigned int cq_size = desc;
@ -1167,26 +1169,16 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
" multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N);
return EINVAL;
}
/* Get mbuf length. */
buf = rte_pktmbuf_alloc(mp);
if (buf == NULL) {
ERROR("%p: unable to allocate mbuf", (void *)dev);
return ENOMEM;
}
tmpl.mb_len = buf->buf_len;
assert((rte_pktmbuf_headroom(buf) +
rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
rte_pktmbuf_free(buf);
/* Toggle RX checksum offload if hardware supports it. */
if (priv->hw_csum)
tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
if (priv->hw_csum_l2tun)
tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
(tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
(mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
desc /= MLX5_PMD_SGE_WR_N;
}

View File

@ -1005,7 +1005,6 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rep->ol_flags = -1;
#endif
assert(rep->buf_len == seg->buf_len);
assert(rep->buf_len == rxq->mb_len);
/* Reconfigure sge to use rep instead of seg. */
assert(sge->lkey == rxq->mr->lkey);
sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);

View File

@ -121,7 +121,6 @@ struct rxq {
struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
struct rxq_elt (*no_sp)[]; /* RX elements. */
} elts;
uint32_t mb_len; /* Length of a mp-issued mbuf. */
unsigned int socket; /* CPU socket ID for allocations. */
struct mlx5_rxq_stats stats; /* RX queue counters. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */