From 7127e6acf095488ca05c7f79eae1a3a35b43dcdf Mon Sep 17 00:00:00 2001 From: Navdeep Parhar Date: Wed, 28 Aug 2013 23:00:34 +0000 Subject: [PATCH] Merge r254336 from user/np/cxl_tuning. Add a last-modified timestamp to each LRO entry and provide an interface to flush all inactive entries. Drivers decide when to flush and what the inactivity threshold should be. Network drivers that process an rx queue to completion can enter a livelock type situation when the rate at which packets are received reaches equilibrium with the rate at which the rx thread is processing them. When this happens the final LRO flush (normally when the rx routine is done) does not occur. Pure ACKs and segments with total payload < 64K can get stuck in an LRO entry. Symptoms are that TCP tx-mostly connections' performance falls off a cliff during heavy, unrelated rx on the interface. Flushing only inactive LRO entries works better than any of these alternates that I tried: - don't LRO pure ACKs - flush _all_ LRO entries periodically (every 'x' microseconds or every 'y' descriptors) - stop rx processing in the driver periodically and schedule remaining work for later. Reviewed by: andre --- sys/netinet/tcp_lro.c | 23 ++++++++++++++++++++++- sys/netinet/tcp_lro.h | 4 ++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c index 032d47c2fe7f..63a6bbafc878 100644 --- a/sys/netinet/tcp_lro.c +++ b/sys/netinet/tcp_lro.c @@ -193,6 +193,25 @@ tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th, } #endif +void +tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) +{ + struct lro_entry *le, *le_tmp; + struct timeval tv; + + if (SLIST_EMPTY(&lc->lro_active)) + return; + + getmicrotime(&tv); + timevalsub(&tv, timeout); + SLIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { + if (timevalcmp(&tv, &le->mtime, >=)) { + SLIST_REMOVE(&lc->lro_active, le, lro_entry, next); + tcp_lro_flush(lc, le); + } + } +} + void tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) { @@ -543,7 +562,8 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) if (le->p_len > (65535 - lc->ifp->if_mtu)) { SLIST_REMOVE(&lc->lro_active, le, lro_entry, next); tcp_lro_flush(lc, le); - } + } else + getmicrotime(&le->mtime); return (0); } @@ -556,6 +576,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) le = SLIST_FIRST(&lc->lro_free); SLIST_REMOVE_HEAD(&lc->lro_free, next); SLIST_INSERT_HEAD(&lc->lro_active, le, next); + getmicrotime(&le->mtime); /* Start filling in details. */ switch (eh_type) { diff --git a/sys/netinet/tcp_lro.h b/sys/netinet/tcp_lro.h index b3a501798aa5..ab6d74ac900b 100644 --- a/sys/netinet/tcp_lro.h +++ b/sys/netinet/tcp_lro.h @@ -30,6 +30,8 @@ #ifndef _TCP_LRO_H_ #define _TCP_LRO_H_ +#include + struct lro_entry { SLIST_ENTRY(lro_entry) next; @@ -59,6 +61,7 @@ struct lro_entry uint32_t tsecr; uint16_t window; uint16_t timestamp; /* flag, not a TCP hdr field. */ + struct timeval mtime; }; SLIST_HEAD(lro_head, lro_entry); @@ -83,6 +86,7 @@ struct lro_ctrl { int tcp_lro_init(struct lro_ctrl *); void tcp_lro_free(struct lro_ctrl *); +void tcp_lro_flush_inactive(struct lro_ctrl *, const struct timeval *); void tcp_lro_flush(struct lro_ctrl *, struct lro_entry *); int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t);