cxgbe: Add tunables to control the number of LRO entries and the number

of rx mbufs that should be presorted before LRO.  There is no change in
default behavior.

MFC after:	1 week
Sponsored by:	Chelsio Communications
This commit is contained in:
np 2017-04-17 09:00:20 +00:00
parent 1edfaa0706
commit 9ad04f7ac0
2 changed files with 58 additions and 10 deletions

View File

@ -322,6 +322,7 @@ enum {
IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */
IQ_INTR = (1 << 2), /* iq takes direct interrupt */
IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */
IQ_ADJ_CREDIT = (1 << 4), /* hw is off by 1 credit for this iq */
/* iq state */
IQS_DISABLED = 0,

View File

@ -157,6 +157,18 @@ TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster);
static int tscale = 1;
TUNABLE_INT("hw.cxgbe.tscale", &tscale);
/*
* Number of LRO entries in the lro_ctrl structure per rx queue.
*/
static int lro_entries = TCP_LRO_ENTRIES;
TUNABLE_INT("hw.cxgbe.lro_entries", &lro_entries);
/*
* This enables presorting of frames before they're fed into tcp_lro_rx.
*/
static int lro_mbufs = 0;
TUNABLE_INT("hw.cxgbe.lro_mbufs", &lro_mbufs);
struct txpkts {
u_int wr_type; /* type 0 or type 1 */
u_int npkt; /* # of packets in this work request */
@ -1380,6 +1392,13 @@ t4_vi_intr(void *arg)
t4_intr(irq->rxq);
}
static inline int
sort_before_lro(struct lro_ctrl *lro)
{
return (lro->lro_mbuf_max != 0);
}
/*
* Deals with anything and everything on the given ingress queue.
*/
@ -1399,6 +1418,7 @@ service_iq(struct sge_iq *iq, int budget)
STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
#if defined(INET) || defined(INET6)
const struct timeval lro_timeout = {0, sc->lro_timeout};
struct lro_ctrl *lro = &rxq->lro;
#endif
KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
@ -1413,6 +1433,23 @@ service_iq(struct sge_iq *iq, int budget)
fl_hw_cidx = 0; /* to silence gcc warning */
}
#if defined(INET) || defined(INET6)
if (iq->flags & IQ_ADJ_CREDIT) {
MPASS(sort_before_lro(lro));
iq->flags &= ~IQ_ADJ_CREDIT;
if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
tcp_lro_flush_all(lro);
t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
V_INGRESSQID((u32)iq->cntxt_id) |
V_SEINTARM(iq->intr_params));
return (0);
}
ndescs = 1;
}
#else
MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
#endif
/*
* We always come back and check the descriptor ring for new indirect
* interrupts and other responses after running a single handler.
@ -1524,8 +1561,9 @@ service_iq(struct sge_iq *iq, int budget)
#if defined(INET) || defined(INET6)
if (iq->flags & IQ_LRO_ENABLED &&
!sort_before_lro(lro) &&
sc->lro_timeout != 0) {
tcp_lro_flush_inactive(&rxq->lro,
tcp_lro_flush_inactive(lro,
&lro_timeout);
}
#endif
@ -1565,9 +1603,14 @@ service_iq(struct sge_iq *iq, int budget)
#if defined(INET) || defined(INET6)
if (iq->flags & IQ_LRO_ENABLED) {
struct lro_ctrl *lro = &rxq->lro;
tcp_lro_flush_all(lro);
if (ndescs > 0 && lro->lro_mbuf_count > 8) {
MPASS(sort_before_lro(lro));
/* hold back one credit and don't flush LRO state */
iq->flags |= IQ_ADJ_CREDIT;
ndescs--;
} else {
tcp_lro_flush_all(lro);
}
}
#endif
@ -1856,10 +1899,14 @@ t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
}
#if defined(INET) || defined(INET6)
if (iq->flags & IQ_LRO_ENABLED &&
tcp_lro_rx(lro, m0, 0) == 0) {
/* queued for LRO */
} else
if (iq->flags & IQ_LRO_ENABLED) {
if (sort_before_lro(lro)) {
tcp_lro_queue_mbuf(lro, m0);
return (0); /* queued for sort, then LRO */
}
if (tcp_lro_rx(lro, m0, 0) == 0)
return (0); /* queued for LRO */
}
#endif
ifp->if_input(ifp, m0);
@ -3050,10 +3097,10 @@ alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
FL_UNLOCK(&rxq->fl);
#if defined(INET) || defined(INET6)
rc = tcp_lro_init(&rxq->lro);
rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs);
if (rc != 0)
return (rc);
rxq->lro.ifp = vi->ifp; /* also indicates LRO init'ed */
MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */
if (vi->ifp->if_capenable & IFCAP_LRO)
rxq->iq.flags |= IQ_LRO_ENABLED;