ip_frag: add function to delete expired entries

A fragmented packets is supposed to live no longer than max_cycles,
but the lib deletes an expired packet only occasionally when it scans
a bucket to find an empty slot while adding a new packet.
Therefore a fragment might sit in the table forever.

Signed-off-by: Alex Kiselev <alex@therouter.net>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Alex Kiselev 2018-06-04 13:13:02 +03:00 committed by Thomas Monjalon
parent e480688dce
commit d5946eef6a
5 changed files with 63 additions and 19 deletions

View File

@ -25,6 +25,12 @@
#define IPv6_KEY_BYTES_FMT \
"%08" PRIx64 "%08" PRIx64 "%08" PRIx64 "%08" PRIx64
#ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
#else
#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
#endif /* IP_FRAG_TBL_STAT */
/* internal functions declarations */
struct rte_mbuf * ip_frag_process(struct ip_frag_pkt *fp,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb,
@ -149,4 +155,16 @@ ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
fp->frags[IP_FIRST_FRAG_IDX] = zero_frag;
}
/* local frag table helper functions */
static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
struct ip_frag_pkt *fp)
{
ip_frag_free(fp, dr);
ip_frag_key_invalidate(&fp->key);
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
}
#endif /* _IP_FRAG_COMMON_H_ */

View File

@ -14,24 +14,6 @@
#define IP_FRAG_TBL_POS(tbl, sig) \
((tbl)->pkt + ((sig) & (tbl)->entry_mask))
#ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
#else
#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
#endif /* IP_FRAG_TBL_STAT */
/* local frag table helper functions */
static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
struct ip_frag_pkt *fp)
{
ip_frag_free(fp, dr);
ip_frag_key_invalidate(&fp->key);
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
}
static inline void
ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
const struct ip_frag_key *key, uint64_t tms)

View File

@ -65,10 +65,13 @@ struct ip_frag_pkt {
#define IP_FRAG_DEATH_ROW_LEN 32 /**< death row size (in packets) */
/* death row size in mbufs */
#define IP_FRAG_DEATH_ROW_MBUF_LEN (IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1))
/** mbuf death row (packets to be freed) */
struct rte_ip_frag_death_row {
uint32_t cnt; /**< number of mbufs currently on death row */
struct rte_mbuf *row[IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1)];
struct rte_mbuf *row[IP_FRAG_DEATH_ROW_MBUF_LEN];
/**< mbufs to be freed */
};
@ -325,6 +328,20 @@ void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr,
void
rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl);
/**
* Delete expired fragments
*
* @param tbl
* Table to delete expired fragments from
* @param dr
* Death row to free buffers to
* @param tms
* Current timestamp
*/
void __rte_experimental
rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, uint64_t tms);
#ifdef __cplusplus
}
#endif

View File

@ -121,3 +121,24 @@ rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
fail_nospace,
fail_total - fail_nospace);
}
/* Delete expired fragments */
void __rte_experimental
rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, uint64_t tms)
{
uint64_t max_cycles;
struct ip_frag_pkt *fp;
max_cycles = tbl->max_cycles;
TAILQ_FOREACH(fp, &tbl->lru, lru)
if (max_cycles + fp->start < tms) {
/* check that death row has enough space */
if (IP_FRAG_DEATH_ROW_MBUF_LEN - dr->cnt >= fp->last_idx)
ip_frag_tbl_del(tbl, dr, fp);
else
return;
} else
return;
}

View File

@ -18,3 +18,9 @@ DPDK_17.08 {
rte_ip_frag_table_destroy;
} DPDK_2.0;
EXPERIMENTAL {
global:
rte_frag_table_del_expired_entries;
};