Merge r254336 from user/np/cxl_tuning.

Add a last-modified timestamp to each LRO entry and provide an interface
to flush all inactive entries.  Drivers decide when to flush and what
the inactivity threshold should be.

Network drivers that process an rx queue to completion can enter a
livelock type situation when the rate at which packets are received
reaches equilibrium with the rate at which the rx thread is processing
them.  When this happens the final LRO flush (normally when the rx
routine is done) does not occur.  Pure ACKs and segments with total
payload < 64K can get stuck in an LRO entry.  Symptoms are that TCP
tx-mostly connections' performance falls off a cliff during heavy,
unrelated rx on the interface.

Flushing only inactive LRO entries works better than any of these
alternates that I tried:
- don't LRO pure ACKs
- flush _all_ LRO entries periodically (every 'x' microseconds or every
  'y' descriptors)
- stop rx processing in the driver periodically and schedule remaining
  work for later.

Reviewed by:	andre
This commit is contained in:
Navdeep Parhar 2013-08-28 23:00:34 +00:00
parent 2d0196c405
commit 7127e6acf0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=255010
2 changed files with 26 additions and 1 deletions

View File

@ -193,6 +193,25 @@ tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
}
#endif
void
tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
{
struct lro_entry *le, *le_tmp;
struct timeval tv;
if (SLIST_EMPTY(&lc->lro_active))
return;
getmicrotime(&tv);
timevalsub(&tv, timeout);
SLIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
if (timevalcmp(&tv, &le->mtime, >=)) {
SLIST_REMOVE(&lc->lro_active, le, lro_entry, next);
tcp_lro_flush(lc, le);
}
}
}
void
tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
{
@ -543,7 +562,8 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
if (le->p_len > (65535 - lc->ifp->if_mtu)) {
SLIST_REMOVE(&lc->lro_active, le, lro_entry, next);
tcp_lro_flush(lc, le);
}
} else
getmicrotime(&le->mtime);
return (0);
}
@ -556,6 +576,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
le = SLIST_FIRST(&lc->lro_free);
SLIST_REMOVE_HEAD(&lc->lro_free, next);
SLIST_INSERT_HEAD(&lc->lro_active, le, next);
getmicrotime(&le->mtime);
/* Start filling in details. */
switch (eh_type) {

View File

@ -30,6 +30,8 @@
#ifndef _TCP_LRO_H_
#define _TCP_LRO_H_
#include <sys/time.h>
struct lro_entry
{
SLIST_ENTRY(lro_entry) next;
@ -59,6 +61,7 @@ struct lro_entry
uint32_t tsecr;
uint16_t window;
uint16_t timestamp; /* flag, not a TCP hdr field. */
struct timeval mtime;
};
SLIST_HEAD(lro_head, lro_entry);
@ -83,6 +86,7 @@ struct lro_ctrl {
int tcp_lro_init(struct lro_ctrl *);
void tcp_lro_free(struct lro_ctrl *);
void tcp_lro_flush_inactive(struct lro_ctrl *, const struct timeval *);
void tcp_lro_flush(struct lro_ctrl *, struct lro_entry *);
int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t);