net/enic: add simple Rx handler

Add an optimized Rx handler for non-scattered Rx.

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Signed-off-by: John Daley <johndale@cisco.com>
This commit is contained in:
John Daley 2018-06-29 02:29:42 -07:00 committed by Ferruh Yigit
parent 5a12c38740
commit 35e2cb6a17
7 changed files with 156 additions and 3 deletions

View File

@ -38,6 +38,7 @@ struct cq_desc {
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12

View File

@ -52,6 +52,8 @@ struct vnic_rq {
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
struct rte_mbuf **free_mbufs; /* reserve of free mbufs */
int num_free_mbufs;
struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
unsigned int mbuf_next_idx; /* next mb to consume */
void *os_buf_head;

View File

@ -313,6 +313,8 @@ int enic_clsf_init(struct enic *enic);
void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_dummy_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

View File

@ -524,7 +524,8 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
if (dev->rx_pkt_burst == enic_recv_pkts)
if (dev->rx_pkt_burst == enic_recv_pkts ||
dev->rx_pkt_burst == enic_noscatter_recv_pkts)
return ptypes;
return NULL;
}

View File

@ -571,6 +571,14 @@ int enic_enable(struct enic *enic)
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
}
/* Use the non-scatter, simplified RX handler if possible. */
if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
} else {
PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
}
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
for (index = 0; index < enic->rq_count; index++)
@ -623,6 +631,19 @@ void enic_free_rq(void *rxq)
enic = vnic_dev_priv(rq_sop->vdev);
rq_data = &enic->rq[rq_sop->data_queue_idx];
if (rq_sop->free_mbufs) {
struct rte_mbuf **mb;
int i;
mb = rq_sop->free_mbufs;
for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
i < ENIC_RX_BURST_MAX; i++)
rte_pktmbuf_free(mb[i]);
rte_free(rq_sop->free_mbufs);
rq_sop->free_mbufs = NULL;
rq_sop->num_free_mbufs = 0;
}
enic_rxmbuf_queue_release(enic, rq_sop);
if (rq_data->in_use)
enic_rxmbuf_queue_release(enic, rq_data);
@ -786,13 +807,13 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
if (mbufs_per_pkt > 1) {
min_sop = 64;
min_sop = ENIC_RX_BURST_MAX;
max_sop = ((enic->config.rq_desc_count /
(mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
min_data = min_sop * (mbufs_per_pkt - 1);
max_data = enic->config.rq_desc_count;
} else {
min_sop = 64;
min_sop = ENIC_RX_BURST_MAX;
max_sop = enic->config.rq_desc_count;
min_data = 0;
max_data = 0;
@ -863,10 +884,21 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
rq_sop->free_mbufs = (struct rte_mbuf **)
rte_zmalloc_socket("rq->free_mbufs",
sizeof(struct rte_mbuf *) *
ENIC_RX_BURST_MAX,
RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
if (rq_sop->free_mbufs == NULL)
goto err_free_data_mbuf;
rq_sop->num_free_mbufs = 0;
rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
return 0;
err_free_data_mbuf:
rte_free(rq_data->mbuf_ring);
err_free_sop_mbuf:
rte_free(rq_sop->mbuf_ring);
err_free_cq:

View File

@ -37,6 +37,7 @@
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_DEFAULT_RX_FREE_THRESH 32
#define ENIC_TX_XMIT_MAX 64
#define ENIC_RX_BURST_MAX 64
/* Defaults for dev_info.default_{rx,tx}portconf */
#define ENIC_DEFAULT_RX_BURST 32

View File

@ -471,6 +471,120 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
uint16_t
enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct rte_mbuf *mb, **rx, **rxmb;
uint16_t cq_idx, nb_rx, max_rx;
struct cq_enet_rq_desc *cqd;
struct rq_enet_desc *rqd;
unsigned int port_id;
struct vnic_cq *cq;
struct vnic_rq *rq;
struct enic *enic;
uint8_t color;
bool overlay;
bool tnl;
rq = rx_queue;
enic = vnic_dev_priv(rq->vdev);
cq = &enic->cq[enic_cq_rq(enic, rq->index)];
cq_idx = cq->to_clean;
/*
* Fill up the reserve of free mbufs. Below, we restock the receive
* ring with these mbufs to avoid allocation failures.
*/
if (rq->num_free_mbufs == 0) {
if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
ENIC_RX_BURST_MAX))
return 0;
rq->num_free_mbufs = ENIC_RX_BURST_MAX;
}
/* Receive until the end of the ring, at most. */
max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
color = cq->last_color;
rxmb = rq->mbuf_ring + cq_idx;
port_id = enic->port_id;
overlay = enic->overlay_offload;
rx = rx_pkts;
while (max_rx) {
max_rx--;
if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
break;
if (unlikely(cqd->bytes_written_flags &
CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
rte_pktmbuf_free(*rxmb++);
rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
cqd++;
continue;
}
mb = *rxmb++;
/* prefetch mbuf data for caller */
rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
RTE_PKTMBUF_HEADROOM));
mb->data_len = cqd->bytes_written_flags &
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
mb->pkt_len = mb->data_len;
mb->port = port_id;
tnl = overlay && (cqd->completed_index_flags &
CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
mb->packet_type =
enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
tnl);
enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
if (tnl) {
mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
RTE_PTYPE_L4_MASK);
}
cqd++;
*rx++ = mb;
}
/* Number of descriptors visited */
nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
if (nb_rx == 0)
return 0;
rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
rxmb = rq->mbuf_ring + cq_idx;
cq_idx += nb_rx;
rq->rx_nb_hold += nb_rx;
if (unlikely(cq_idx == cq->ring.desc_count)) {
cq_idx = 0;
cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
}
cq->to_clean = cq_idx;
memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
sizeof(struct rte_mbuf *) * nb_rx);
rq->num_free_mbufs -= nb_rx;
while (nb_rx) {
nb_rx--;
mb = *rxmb++;
mb->data_off = RTE_PKTMBUF_HEADROOM;
rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
rqd++;
}
if (rq->rx_nb_hold > rq->rx_free_thresh) {
rq->posted_index = enic_ring_add(rq->ring.desc_count,
rq->posted_index,
rq->rx_nb_hold);
rq->rx_nb_hold = 0;
rte_wmb();
iowrite32_relaxed(rq->posted_index,
&rq->ctrl->posted_index);
}
return rx - rx_pkts;
}
static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
unsigned int desc_count, n, nb_to_free, tail_idx;