tcp/lro: Use tcp_lro_flush_all in device drivers to avoid code duplication
And factor out tcp_lro_rx_done, which deduplicates the same logic with netinet/tcp_lro.c Reviewed by: gallatin (1st version), hps, zbb, np, Dexuan Cui <decui microsoft com> Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5725
This commit is contained in:
parent
cff4748967
commit
d0428dd51c
@ -2976,11 +2976,7 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
|
||||
|
||||
#if defined(INET6) || defined(INET)
|
||||
/* Flush LRO */
|
||||
while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
|
||||
struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
|
||||
SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
|
||||
tcp_lro_flush(lro_ctrl, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro_ctrl);
|
||||
#endif
|
||||
|
||||
if (sleeping)
|
||||
|
@ -1397,13 +1397,8 @@ process_iql:
|
||||
#if defined(INET) || defined(INET6)
|
||||
if (iq->flags & IQ_LRO_ENABLED) {
|
||||
struct lro_ctrl *lro = &rxq->lro;
|
||||
struct lro_entry *l;
|
||||
|
||||
while (!SLIST_EMPTY(&lro->lro_active)) {
|
||||
l = SLIST_FIRST(&lro->lro_active);
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, l);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -4974,7 +4974,6 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
|
||||
struct rx_ring *rxr = que->rxr;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
struct lro_ctrl *lro = &rxr->lro;
|
||||
struct lro_entry *queued;
|
||||
int i, processed = 0, rxdone = 0;
|
||||
u32 ptype, staterr = 0;
|
||||
union e1000_adv_rx_desc *cur;
|
||||
@ -5202,10 +5201,7 @@ next_desc:
|
||||
/*
|
||||
* Flush any outstanding LRO work
|
||||
*/
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
|
||||
if (done != NULL)
|
||||
*done += rxdone;
|
||||
|
@ -778,13 +778,8 @@ netvsc_channel_rollup(struct hv_vmbus_channel *chan)
|
||||
struct hn_tx_ring *txr = chan->hv_chan_txr;
|
||||
#if defined(INET) || defined(INET6)
|
||||
struct hn_rx_ring *rxr = chan->hv_chan_rxr;
|
||||
struct lro_ctrl *lro = &rxr->hn_lro;
|
||||
struct lro_entry *queued;
|
||||
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(&rxr->hn_lro);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -1753,7 +1753,6 @@ ixgbe_rxeof(struct ix_queue *que)
|
||||
struct rx_ring *rxr = que->rxr;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
struct lro_ctrl *lro = &rxr->lro;
|
||||
struct lro_entry *queued;
|
||||
int i, nextp, processed = 0;
|
||||
u32 staterr = 0;
|
||||
u32 count = adapter->rx_process_limit;
|
||||
@ -2003,10 +2002,7 @@ next_desc:
|
||||
/*
|
||||
* Flush any outstanding LRO work
|
||||
*/
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
|
||||
IXGBE_RX_UNLOCK(rxr);
|
||||
|
||||
|
@ -1511,7 +1511,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
|
||||
struct ifnet *ifp = vsi->ifp;
|
||||
#if defined(INET6) || defined(INET)
|
||||
struct lro_ctrl *lro = &rxr->lro;
|
||||
struct lro_entry *queued;
|
||||
#endif
|
||||
int i, nextp, processed = 0;
|
||||
union i40e_rx_desc *cur;
|
||||
@ -1735,10 +1734,7 @@ next_desc:
|
||||
/*
|
||||
* Flush any outstanding LRO work
|
||||
*/
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
#endif
|
||||
|
||||
IXL_RX_UNLOCK(rxr);
|
||||
|
@ -322,9 +322,6 @@ mlx5e_decompress_cqes(struct mlx5e_cq *cq)
|
||||
static int
|
||||
mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
|
||||
{
|
||||
#ifndef HAVE_TURBO_LRO
|
||||
struct lro_entry *queued;
|
||||
#endif
|
||||
int i;
|
||||
|
||||
for (i = 0; i < budget; i++) {
|
||||
@ -399,10 +396,7 @@ wq_ll_pop:
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
#ifndef HAVE_TURBO_LRO
|
||||
while ((queued = SLIST_FIRST(&rq->lro.lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&rq->lro.lro_active, next);
|
||||
tcp_lro_flush(&rq->lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(&rq->lro);
|
||||
#endif
|
||||
return (i);
|
||||
}
|
||||
|
@ -2819,11 +2819,7 @@ mxge_clean_rx_done(struct mxge_slice_state *ss)
|
||||
break;
|
||||
}
|
||||
#if defined(INET) || defined (INET6)
|
||||
while (!SLIST_EMPTY(&ss->lc.lro_active)) {
|
||||
struct lro_entry *lro = SLIST_FIRST(&ss->lc.lro_active);
|
||||
SLIST_REMOVE_HEAD(&ss->lc.lro_active, next);
|
||||
tcp_lro_flush(&ss->lc, lro);
|
||||
}
|
||||
tcp_lro_flush_all(&ss->lc);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1497,16 +1497,12 @@ static void
|
||||
oce_rx_flush_lro(struct oce_rq *rq)
|
||||
{
|
||||
struct lro_ctrl *lro = &rq->lro;
|
||||
struct lro_entry *queued;
|
||||
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
|
||||
|
||||
if (!IF_LRO_ENABLED(sc))
|
||||
return;
|
||||
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
rq->lro_pkts_queued = 0;
|
||||
|
||||
return;
|
||||
|
@ -267,7 +267,6 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
|
||||
uint32_t comp_idx, desc_count;
|
||||
q80_stat_desc_t *sdesc;
|
||||
struct lro_ctrl *lro;
|
||||
struct lro_entry *queued;
|
||||
uint32_t ret = 0;
|
||||
|
||||
dev = ha->pci_dev;
|
||||
@ -324,11 +323,7 @@ qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
|
||||
}
|
||||
}
|
||||
|
||||
while((!SLIST_EMPTY(&lro->lro_active))) {
|
||||
queued = SLIST_FIRST(&lro->lro_active);
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
|
||||
if (hw->sds[sds_idx].sdsr_next != comp_idx) {
|
||||
QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
|
||||
|
@ -232,7 +232,6 @@ qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
|
||||
uint32_t i, cq_comp_idx;
|
||||
int ret = 0, tx_comp_done = 0;
|
||||
struct lro_ctrl *lro;
|
||||
struct lro_entry *queued;
|
||||
|
||||
cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
|
||||
lro = &ha->rx_ring[cq_idx].lro;
|
||||
@ -287,11 +286,7 @@ qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
|
||||
}
|
||||
}
|
||||
|
||||
while((!SLIST_EMPTY(&lro->lro_active))) {
|
||||
queued = SLIST_FIRST(&lro->lro_active);
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
|
||||
ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
|
||||
|
||||
|
@ -746,7 +746,6 @@ nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
|
||||
struct rcv_queue *rq;
|
||||
struct cqe_rx_t *cq_desc;
|
||||
struct lro_ctrl *lro;
|
||||
struct lro_entry *queued;
|
||||
int rq_idx;
|
||||
int cmp_err;
|
||||
|
||||
@ -831,10 +830,7 @@ out:
|
||||
rq_idx = cq_idx;
|
||||
rq = &nic->qs->rq[rq_idx];
|
||||
lro = &rq->lro;
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
|
||||
NICVF_CMP_UNLOCK(cq);
|
||||
|
||||
|
@ -996,7 +996,6 @@ vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
|
||||
vxge_vpath_t *vpath = (vxge_vpath_t *) userdata;
|
||||
vxge_dev_t *vdev = vpath->vdev;
|
||||
|
||||
struct lro_entry *queued = NULL;
|
||||
struct lro_ctrl *lro = &vpath->lro;
|
||||
|
||||
/* get the interface pointer */
|
||||
@ -1083,12 +1082,8 @@ vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
|
||||
&dtr_priv, &t_code) == VXGE_HAL_OK);
|
||||
|
||||
/* Flush any outstanding LRO work */
|
||||
if (vpath->lro_enable && vpath->lro.lro_cnt) {
|
||||
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
}
|
||||
if (vpath->lro_enable && vpath->lro.lro_cnt)
|
||||
tcp_lro_flush_all(lro);
|
||||
|
||||
return (status);
|
||||
}
|
||||
|
@ -1202,7 +1202,6 @@ xn_rxeof(struct netfront_rxq *rxq)
|
||||
struct netfront_info *np = rxq->info;
|
||||
#if (defined(INET) || defined(INET6))
|
||||
struct lro_ctrl *lro = &rxq->lro;
|
||||
struct lro_entry *queued;
|
||||
#endif
|
||||
struct netfront_rx_info rinfo;
|
||||
struct netif_rx_response *rx = &rinfo.rx;
|
||||
@ -1296,11 +1295,7 @@ xn_rxeof(struct netfront_rxq *rxq)
|
||||
/*
|
||||
* Flush any outstanding LRO work
|
||||
*/
|
||||
while (!SLIST_EMPTY(&lro->lro_active)) {
|
||||
queued = SLIST_FIRST(&lro->lro_active);
|
||||
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
||||
tcp_lro_flush(lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(lro);
|
||||
#endif
|
||||
|
||||
xn_alloc_rx_buffers(rxq);
|
||||
|
@ -67,6 +67,8 @@ static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
|
||||
#define TCP_LRO_INVALID_CSUM 0x0000
|
||||
#endif
|
||||
|
||||
static void tcp_lro_rx_done(struct lro_ctrl *lc);
|
||||
|
||||
int
|
||||
tcp_lro_init(struct lro_ctrl *lc)
|
||||
{
|
||||
@ -226,6 +228,17 @@ tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
tcp_lro_rx_done(struct lro_ctrl *lc)
|
||||
{
|
||||
struct lro_entry *le;
|
||||
|
||||
while ((le = SLIST_FIRST(&lc->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lc->lro_active, next);
|
||||
tcp_lro_flush(lc, le);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
|
||||
{
|
||||
@ -362,13 +375,12 @@ done:
|
||||
void
|
||||
tcp_lro_flush_all(struct lro_ctrl *lc)
|
||||
{
|
||||
struct lro_entry *le;
|
||||
uint32_t hashtype;
|
||||
uint32_t flowid;
|
||||
unsigned x;
|
||||
|
||||
/* check if no mbufs to flush */
|
||||
if (__predict_false(lc->lro_mbuf_count == 0))
|
||||
if (lc->lro_mbuf_count == 0)
|
||||
goto done;
|
||||
|
||||
/* sort all mbufs according to stream */
|
||||
@ -390,10 +402,7 @@ tcp_lro_flush_all(struct lro_ctrl *lc)
|
||||
hashtype = M_HASHTYPE_GET(mb);
|
||||
|
||||
/* flush active streams */
|
||||
while ((le = SLIST_FIRST(&lc->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lc->lro_active, next);
|
||||
tcp_lro_flush(lc, le);
|
||||
}
|
||||
tcp_lro_rx_done(lc);
|
||||
}
|
||||
#ifdef TCP_LRO_RESET_SEQUENCE
|
||||
/* reset sequence number */
|
||||
@ -409,10 +418,8 @@ tcp_lro_flush_all(struct lro_ctrl *lc)
|
||||
}
|
||||
done:
|
||||
/* flush active streams */
|
||||
while ((le = SLIST_FIRST(&lc->lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&lc->lro_active, next);
|
||||
tcp_lro_flush(lc, le);
|
||||
}
|
||||
tcp_lro_rx_done(lc);
|
||||
|
||||
lc->lro_mbuf_count = 0;
|
||||
}
|
||||
|
||||
|
@ -561,9 +561,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
||||
struct mbuf *mb;
|
||||
struct mlx4_cq *mcq = &cq->mcq;
|
||||
struct mlx4_cqe *buf = cq->buf;
|
||||
#ifdef INET
|
||||
struct lro_entry *queued;
|
||||
#endif
|
||||
int index;
|
||||
unsigned int length;
|
||||
int polled = 0;
|
||||
@ -669,10 +666,7 @@ next:
|
||||
/* Flush all pending IP reassembly sessions */
|
||||
out:
|
||||
#ifdef INET
|
||||
while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
|
||||
SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
|
||||
tcp_lro_flush(&ring->lro, queued);
|
||||
}
|
||||
tcp_lro_flush_all(&ring->lro);
|
||||
#endif
|
||||
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
|
||||
mcq->cons_index = cons_index;
|
||||
|
Loading…
x
Reference in New Issue
Block a user