Improve TX path of the VNIC driver

- Avoid memory leak when nicvf_tx_mbuf_locked() fails
- Introduce nicvf_xmit_locked() routine that uses drbr_peek(),
  drbr_advance() or drbr_putback() for a specific ifnet.
  This gives more clear and efficient design as well as
  prevents from dropping mbufs that where not sent due to temporary
  lack of descriptors.
- Add missing ETHER_BPF_MTAP() hook

Pointed out by: yongari
Reviewed by:    wma
Obtained from:  Semihalf
Sponsored by:   Cavium
Differential Revision: https://reviews.freebsd.org/D5534
This commit is contained in:
Zbigniew Bodek 2016-03-31 13:10:29 +00:00
parent 8efb5aa133
commit 7c617ace70
3 changed files with 78 additions and 30 deletions

View File

@ -661,8 +661,8 @@ nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
sq = &qs->sq[qidx];
if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) {
if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) || !nic->link_up) {
err = drbr_enqueue(ifp, sq->br, mbuf);
return (err);
}
@ -679,17 +679,16 @@ nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
}
}
err = drbr_enqueue(ifp, sq->br, mbuf);
if (err != 0)
return (err);
if (NICVF_TX_TRYLOCK(sq) != 0) {
err = nicvf_tx_mbuf_locked(sq, mbuf);
err = nicvf_xmit_locked(sq);
NICVF_TX_UNLOCK(sq);
return (err);
} else {
err = drbr_enqueue(ifp, sq->br, mbuf);
if (err != 0)
return (err);
} else
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
}
return (0);
}

View File

@ -60,11 +60,12 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#include <machine/vmparam.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ifq.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
@ -105,6 +106,8 @@ static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
boolean_t);
static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
static void nicvf_rbdr_task(void *, int);
static void nicvf_rbdr_task_nowait(void *, int);
@ -739,6 +742,7 @@ nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
int cqe_count, cqe_head;
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_idx];
struct snd_queue *sq = &qs->sq[cq_idx];
struct rcv_queue *rq;
struct cqe_rx_t *cq_desc;
struct lro_ctrl *lro;
@ -818,6 +822,7 @@ nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
/* Reenable TXQ if its stopped earlier due to SQ full */
if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
}
out:
/*
@ -991,25 +996,62 @@ nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
}
int
nicvf_xmit_locked(struct snd_queue *sq)
{
struct nicvf *nic;
struct ifnet *ifp;
struct mbuf *next;
int err;
NICVF_TX_LOCK_ASSERT(sq);
nic = sq->nic;
ifp = nic->ifp;
err = 0;
while ((next = drbr_peek(ifp, sq->br)) != NULL) {
err = nicvf_tx_mbuf_locked(sq, &next);
if (err != 0) {
if (next == NULL)
drbr_advance(ifp, sq->br);
else
drbr_putback(ifp, sq->br, next);
break;
}
drbr_advance(ifp, sq->br);
/* Send a copy of the frame to the BPF listener */
ETHER_BPF_MTAP(ifp, next);
}
return (err);
}
static void
nicvf_snd_task(void *arg, int pending)
{
struct snd_queue *sq = (struct snd_queue *)arg;
struct mbuf *mbuf;
struct nicvf *nic;
struct ifnet *ifp;
int err;
nic = sq->nic;
ifp = nic->ifp;
/*
* Skip sending anything if the driver is not running,
* SQ full or link is down.
*/
if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) || !nic->link_up)
return;
NICVF_TX_LOCK(sq);
while (1) {
mbuf = drbr_dequeue(NULL, sq->br);
if (mbuf == NULL)
break;
if (nicvf_tx_mbuf_locked(sq, mbuf) != 0) {
/* XXX ARM64TODO: Increase Tx drop counter */
m_freem(mbuf);
break;
}
}
err = nicvf_xmit_locked(sq);
NICVF_TX_UNLOCK(sq);
/* Try again */
if (err != 0)
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
}
/* Initialize transmit queue */
@ -1862,8 +1904,8 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
}
/* Put an mbuf to a SQ for packet transfer. */
int
nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf)
static int
nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
{
bus_dma_segment_t segs[256];
struct nicvf *nic;
@ -1881,15 +1923,17 @@ nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf)
snd_buff = &sq->snd_buff[sq->tail];
err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
if (err != 0) {
*mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
if (__predict_false(err != 0)) {
/* ARM64TODO: Add mbuf defragmenting if we lack maps */
m_freem(*mbufp);
*mbufp = NULL;
return (err);
}
/* Set how many subdescriptors is required */
nic = sq->nic;
if (mbuf->m_pkthdr.tso_segsz != 0 && nic->hw_tso)
if ((*mbufp)->m_pkthdr.tso_segsz != 0 && nic->hw_tso)
subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
else
subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
@ -1903,10 +1947,15 @@ nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf)
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
/* Add SQ header subdesc */
err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, mbuf,
mbuf->m_pkthdr.len);
err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
(*mbufp)->m_pkthdr.len);
if (err != 0) {
nicvf_put_sq_desc(sq, subdesc_cnt);
bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
if (err == ENOBUFS) {
m_freem(*mbufp);
*mbufp = NULL;
}
return (err);
}

View File

@ -388,7 +388,7 @@ void nicvf_disable_intr(struct nicvf *, int, int);
void nicvf_clear_intr(struct nicvf *, int, int);
int nicvf_is_intr_enabled(struct nicvf *, int, int);
int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *);
int nicvf_xmit_locked(struct snd_queue *sq);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *, uint64_t, uint64_t);