Improve VNIC performance on Tx path by immediate packet transmission

Don't postpone Tx if the Tx lock can be acquired now.
This gives 3x better performance on egress.

Reviewed by:   wma
Obtained from: Semihalf
Sponsored by:  Cavium
Differential Revision: https://reviews.freebsd.org/D5325
This commit is contained in:
Zbigniew Bodek 2016-02-25 14:23:02 +00:00
parent 6dc234599f
commit 332c869727
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=296035
3 changed files with 11 additions and 3 deletions

View File

@ -663,11 +663,18 @@ nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
mbuf = mtmp;
}
}
if (NICVF_TX_TRYLOCK(sq) != 0) {
err = nicvf_tx_mbuf_locked(sq, mbuf);
NICVF_TX_UNLOCK(sq);
return (err);
} else {
err = drbr_enqueue(ifp, sq->br, mbuf);
if (err != 0)
return (err);
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
}
return (0);
}

View File

@ -98,7 +98,6 @@ __FBSDID("$FreeBSD$");
MALLOC_DECLARE(M_NICVF);
static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *);
static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
static void nicvf_sq_disable(struct nicvf *, int);
static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
@ -1856,7 +1855,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
}
/* Put an mbuf to a SQ for packet transfer. */
static int
int
nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf)
{
bus_dma_segment_t segs[256];

View File

@ -385,6 +385,8 @@ void nicvf_disable_intr(struct nicvf *, int, int);
void nicvf_clear_intr(struct nicvf *, int, int);
int nicvf_is_intr_enabled(struct nicvf *, int, int);
int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *, uint64_t, uint64_t);
uint64_t nicvf_reg_read(struct nicvf *, uint64_t);