enic: improve Tx packet rate

For every packet sent, a completion was being requested and the
posted_index register on the nic was being updated. Instead, request a
completion and update the posted index once per burst after all
packets have been sent by the burst function.

Signed-off-by: John Daley <johndale@cisco.com>
Acked-by: Sujith Sankar <ssujith@cisco.com>
This commit is contained in:
John Daley 2015-10-29 11:45:16 -07:00 committed by Thomas Monjalon
parent 4009b369e7
commit d739ba4c6a
4 changed files with 115 additions and 26 deletions

View File

@ -0,0 +1,79 @@
/*
* Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* Copyright (c) 2015, Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _ENIC_VNIC_WQ_H_
#define _ENIC_VNIC_WQ_H_
#include "vnic_dev.h"
#include "vnic_cq.h"
static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf = wq->to_use;
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(buf->index, &wq->ctrl->posted_index);
}
static inline void enic_vnic_post_wq(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop,
uint8_t desc_skip_cnt, uint8_t cq_entry,
uint8_t compressed_send, uint64_t wrid)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
buf->cq_entry = cq_entry;
buf->compressed_send = compressed_send;
buf->desc_skip_cnt = desc_skip_cnt;
buf->os_buf = os_buf;
buf->dma_addr = dma_addr;
buf->len = len;
buf->wr_id = wrid;
buf = buf->next;
if (cq_entry)
enic_vnic_post_wq_index(wq);
wq->to_use = buf;
wq->ring.desc_avail -= desc_skip_cnt;
}
#endif /* _ENIC_VNIC_WQ_H_ */

View File

@ -48,7 +48,7 @@
#define DRV_NAME "enic_pmd"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
#define DRV_VERSION "1.0.0.5"
#define DRV_VERSION "1.0.0.6"
#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
#define ENIC_WQ_MAX 8
@ -187,10 +187,12 @@ extern void enic_add_packet_filter(struct enic *enic);
extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
extern void enic_del_mac_address(struct enic *enic);
extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
extern int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
uint8_t sop, uint8_t eop,
uint16_t ol_flags, uint16_t vlan_tag);
extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
uint8_t sop, uint8_t eop, uint8_t cq_entry,
uint16_t ol_flags, uint16_t vlan_tag);
extern void enic_post_wq_index(struct vnic_wq *wq);
extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done);
extern int enic_probe(struct enic *enic);

View File

@ -488,21 +488,26 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
unsigned int seg_len;
unsigned int inc_len;
unsigned int nb_segs;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *tx_pkt, *next_tx_pkt;
struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
struct enic *enic = vnic_dev_priv(wq->vdev);
unsigned short vlan_id;
unsigned short ol_flags;
uint8_t last_seg, eop;
for (index = 0; index < nb_pkts; index++) {
tx_pkt = *tx_pkts++;
inc_len = 0;
nb_segs = tx_pkt->nb_segs;
if (nb_segs > vnic_wq_desc_avail(wq)) {
if (index > 0)
enic_post_wq_index(wq);
/* wq cleanup and try again */
if (!enic_cleanup_wq(enic, wq) ||
(nb_segs > vnic_wq_desc_avail(wq)))
(nb_segs > vnic_wq_desc_avail(wq))) {
return index;
}
}
pkt_len = tx_pkt->pkt_len;
vlan_id = tx_pkt->vlan_tci;
@ -510,14 +515,15 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (frags = 0; inc_len < pkt_len; frags++) {
if (!tx_pkt)
break;
next_tx_pkt = tx_pkt->next;
seg_len = tx_pkt->data_len;
inc_len += seg_len;
if (enic_send_pkt(enic, wq, tx_pkt,
(unsigned short)seg_len, !frags,
(pkt_len == inc_len), ol_flags, vlan_id)) {
break;
}
tx_pkt = tx_pkt->next;
eop = (pkt_len == inc_len) || (!next_tx_pkt);
last_seg = eop &&
(index == ((unsigned int)nb_pkts - 1));
enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
!frags, eop, last_seg, ol_flags, vlan_id);
tx_pkt = next_tx_pkt;
}
}

View File

@ -58,6 +58,7 @@
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_nic.h"
#include "enic_vnic_wq.h"
static inline int enic_is_sriov_vf(struct enic *enic)
{
@ -151,15 +152,18 @@ unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
-1 /*wq_work_to_do*/, enic_wq_service, NULL);
}
void enic_post_wq_index(struct vnic_wq *wq)
{
enic_vnic_post_wq_index(wq);
}
int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
uint8_t sop, uint8_t eop,
uint16_t ol_flags, uint16_t vlan_tag)
void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
uint8_t sop, uint8_t eop, uint8_t cq_entry,
uint16_t ol_flags, uint16_t vlan_tag)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
uint16_t mss = 0;
uint8_t cq_entry = eop;
uint8_t vlan_tag_insert = 0;
uint64_t bus_addr = (dma_addr_t)
(tx_pkt->buf_physaddr + RTE_PKTMBUF_HEADROOM);
@ -190,14 +194,12 @@ int enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
vlan_tag,
0 /* loopback */);
vnic_wq_post(wq, (void *)tx_pkt, bus_addr, len,
sop, eop,
1 /*desc_skip_cnt*/,
cq_entry,
0 /*compressed send*/,
0 /*wrid*/);
return 0;
enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
sop,
1 /*desc_skip_cnt*/,
cq_entry,
0 /*compressed send*/,
0 /*wrid*/);
}
void enic_dev_stats_clear(struct enic *enic)