tcp/lro: Refactor the active list operation.

Ease more work concerning active list, e.g. hash table etc.

Reviewed by:	gallatin, rrs (earlier version)
Sponsored by:	Microsoft OSTC
Differential Revision:	https://reviews.freebsd.org/D6137
This commit is contained in:
Sepherosa Ziehau 2016-05-03 08:13:25 +00:00
parent 71718d3c73
commit 51e3c20d36

View File

@ -69,6 +69,20 @@ static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
static void tcp_lro_rx_done(struct lro_ctrl *lc);
static __inline void
tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_entry *le)
{
LIST_INSERT_HEAD(&lc->lro_active, le, next);
}
static __inline void
tcp_lro_active_remove(struct lro_entry *le)
{
LIST_REMOVE(le, next);
}
int
tcp_lro_init(struct lro_ctrl *lc)
{
@ -129,7 +143,7 @@ tcp_lro_free(struct lro_ctrl *lc)
/* free active mbufs, if any */
while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
m_freem(le->m_head);
}
@ -234,7 +248,7 @@ tcp_lro_rx_done(struct lro_ctrl *lc)
struct lro_entry *le;
while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
}
}
@ -252,7 +266,7 @@ tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
timevalsub(&tv, timeout);
LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
if (timevalcmp(&tv, &le->mtime, >=)) {
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
}
}
@ -620,7 +634,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
/* Flush now if appending will result in overflow. */
if (le->p_len > (lc->lro_length_lim - tcp_data_len)) {
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
break;
}
@ -629,7 +643,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
if (__predict_false(seq != le->next_seq ||
(tcp_data_len == 0 && le->ack_seq == th->th_ack))) {
/* Out of order packet or duplicate ACK. */
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
return (TCP_LRO_CANNOT);
}
@ -662,7 +676,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
* be further delayed.
*/
if (le->append_cnt >= lc->lro_ackcnt_lim) {
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
}
return (0);
@ -686,7 +700,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
* overflow, pro-actively flush now.
*/
if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) {
LIST_REMOVE(le, next);
tcp_lro_active_remove(le);
tcp_lro_flush(lc, le);
} else
getmicrotime(&le->mtime);
@ -701,7 +715,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
/* Start a new segment chain. */
le = LIST_FIRST(&lc->lro_free);
LIST_REMOVE(le, next);
LIST_INSERT_HEAD(&lc->lro_active, le, next);
tcp_lro_active_insert(lc, le);
getmicrotime(&le->mtime);
/* Start filling in details. */