if_vtnet: Move the Tx interrupt threshold into the Txq structure
Reviewed by: grehan (mentor) Differential Revision: https://reviews.freebsd.org/D27911
This commit is contained in:
parent
05041794d0
commit
baa5234fbe
@ -140,6 +140,7 @@ static void vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
|
||||
static void vtnet_rx_vq_intr(void *);
|
||||
static void vtnet_rxq_tq_intr(void *, int);
|
||||
|
||||
static int vtnet_txq_intr_threshold(struct vtnet_txq *);
|
||||
static int vtnet_txq_below_threshold(struct vtnet_txq *);
|
||||
static int vtnet_txq_notify(struct vtnet_txq *);
|
||||
static void vtnet_txq_free_mbufs(struct vtnet_txq *);
|
||||
@ -219,7 +220,6 @@ static void vtnet_set_macaddr(struct vtnet_softc *);
|
||||
static void vtnet_attached_set_macaddr(struct vtnet_softc *);
|
||||
static void vtnet_vlan_tag_remove(struct mbuf *);
|
||||
static void vtnet_set_rx_process_limit(struct vtnet_softc *);
|
||||
static void vtnet_set_tx_intr_threshold(struct vtnet_softc *);
|
||||
|
||||
static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
|
||||
struct sysctl_oid_list *, struct vtnet_rxq *);
|
||||
@ -1090,7 +1090,6 @@ vtnet_setup_interface(struct vtnet_softc *sc)
|
||||
}
|
||||
|
||||
vtnet_set_rx_process_limit(sc);
|
||||
vtnet_set_tx_intr_threshold(sc);
|
||||
|
||||
DEBUGNET_SET(ifp, vtnet);
|
||||
|
||||
@ -2024,15 +2023,42 @@ vtnet_rxq_tq_intr(void *xrxq, int pending)
|
||||
}
|
||||
|
||||
static int
|
||||
vtnet_txq_below_threshold(struct vtnet_txq *txq)
|
||||
vtnet_txq_intr_threshold(struct vtnet_txq *txq)
|
||||
{
|
||||
struct vtnet_softc *sc;
|
||||
struct virtqueue *vq;
|
||||
int threshold;
|
||||
|
||||
sc = txq->vtntx_sc;
|
||||
|
||||
/*
|
||||
* The Tx interrupt is disabled until the queue free count falls
|
||||
* below our threshold. Completed frames are drained from the Tx
|
||||
* virtqueue before transmitting new frames and in the watchdog
|
||||
* callout, so the frequency of Tx interrupts is greatly reduced,
|
||||
* at the cost of not freeing mbufs as quickly as they otherwise
|
||||
* would be.
|
||||
*/
|
||||
threshold = virtqueue_size(txq->vtntx_vq) / 4;
|
||||
|
||||
/*
|
||||
* Without indirect descriptors, leave enough room for the most
|
||||
* segments we handle.
|
||||
*/
|
||||
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
|
||||
threshold < sc->vtnet_tx_nsegs)
|
||||
threshold = sc->vtnet_tx_nsegs;
|
||||
|
||||
return (threshold);
|
||||
}
|
||||
|
||||
static int
|
||||
vtnet_txq_below_threshold(struct vtnet_txq *txq)
|
||||
{
|
||||
struct virtqueue *vq;
|
||||
|
||||
vq = txq->vtntx_vq;
|
||||
|
||||
return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh);
|
||||
return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -3058,6 +3084,7 @@ vtnet_init_tx_queues(struct vtnet_softc *sc)
|
||||
for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
|
||||
txq = &sc->vtnet_txqs[i];
|
||||
txq->vtntx_watchdog = 0;
|
||||
txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
|
||||
#ifdef DEV_NETMAP
|
||||
netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0);
|
||||
#endif /* DEV_NETMAP */
|
||||
@ -3749,36 +3776,6 @@ vtnet_set_rx_process_limit(struct vtnet_softc *sc)
|
||||
sc->vtnet_rx_process_limit = limit;
|
||||
}
|
||||
|
||||
static void
|
||||
vtnet_set_tx_intr_threshold(struct vtnet_softc *sc)
|
||||
{
|
||||
int size, thresh;
|
||||
|
||||
size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq);
|
||||
|
||||
/*
|
||||
* The Tx interrupt is disabled until the queue free count falls
|
||||
* below our threshold. Completed frames are drained from the Tx
|
||||
* virtqueue before transmitting new frames and in the watchdog
|
||||
* callout, so the frequency of Tx interrupts is greatly reduced,
|
||||
* at the cost of not freeing mbufs as quickly as they otherwise
|
||||
* would be.
|
||||
*
|
||||
* N.B. We assume all the Tx queues are the same size.
|
||||
*/
|
||||
thresh = size / 4;
|
||||
|
||||
/*
|
||||
* Without indirect descriptors, leave enough room for the most
|
||||
* segments we handle.
|
||||
*/
|
||||
if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
|
||||
thresh < sc->vtnet_tx_nsegs)
|
||||
thresh = sc->vtnet_tx_nsegs;
|
||||
|
||||
sc->vtnet_tx_intr_thresh = thresh;
|
||||
}
|
||||
|
||||
static void
|
||||
vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
|
||||
struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
|
||||
|
@ -112,6 +112,7 @@ struct vtnet_txq {
|
||||
#endif
|
||||
int vtntx_id;
|
||||
int vtntx_watchdog;
|
||||
int vtntx_intr_threshold;
|
||||
struct vtnet_txq_stats vtntx_stats;
|
||||
struct taskqueue *vtntx_tq;
|
||||
struct task vtntx_intrtask;
|
||||
@ -161,7 +162,6 @@ struct vtnet_softc {
|
||||
int vtnet_rx_nsegs;
|
||||
int vtnet_rx_nmbufs;
|
||||
int vtnet_rx_clustersz;
|
||||
int vtnet_tx_intr_thresh;
|
||||
int vtnet_tx_nsegs;
|
||||
int vtnet_if_flags;
|
||||
int vtnet_max_mtu;
|
||||
|
Loading…
x
Reference in New Issue
Block a user