ip_frag: remove unneeded rte prefixes

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
This commit is contained in:
Anatoly Burakov 2014-06-18 15:50:28 +01:00 committed by Thomas Monjalon
parent 495ea3d9c6
commit 4ae2a4f8cf
7 changed files with 35 additions and 35 deletions

View File

@ -41,14 +41,14 @@
#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
#define RTE_IP_FRAG_ASSERT(exp) \
#define IP_FRAG_ASSERT(exp) \
if (!(exp)) { \
rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
__func__, __LINE__); \
}
#else
#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
#define RTE_IP_FRAG_ASSERT(exp) do { } while(0)
#define IP_FRAG_ASSERT(exp) do {} while (0)
#endif /* IP_FRAG_DEBUG */
#define IPV4_KEYLEN 1
@ -63,21 +63,21 @@ if (!(exp)) { \
"%08" PRIx64 "%08" PRIx64 "%08" PRIx64 "%08" PRIx64
/* internal functions declarations */
struct rte_mbuf * ip_frag_process(struct rte_ip_frag_pkt *fp,
struct rte_mbuf * ip_frag_process(struct ip_frag_pkt *fp,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb,
uint16_t ofs, uint16_t len, uint16_t more_frags);
struct rte_ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
struct ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms);
struct rte_ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
struct ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
struct rte_ip_frag_pkt **free, struct rte_ip_frag_pkt **stale);
struct ip_frag_pkt **free, struct ip_frag_pkt **stale);
/* these functions need to be declared here as ip_frag_process relies on them */
struct rte_mbuf * ipv4_frag_reassemble(const struct rte_ip_frag_pkt *fp);
struct rte_mbuf * ipv6_frag_reassemble(const struct rte_ip_frag_pkt *fp);
struct rte_mbuf * ipv4_frag_reassemble(const struct ip_frag_pkt *fp);
struct rte_mbuf * ipv6_frag_reassemble(const struct ip_frag_pkt *fp);
@ -122,7 +122,7 @@ ip_frag_key_cmp(const struct ip_frag_key * k1, const struct ip_frag_key * k2)
/* put fragment on death row */
static inline void
ip_frag_free(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
{
uint32_t i, k;
@ -140,7 +140,7 @@ ip_frag_free(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
/* if key is empty, mark key as in use */
static inline void
ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct rte_ip_frag_pkt *fp)
ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
{
if (ip_frag_key_is_empty(&fp->key)) {
TAILQ_REMOVE(&tbl->lru, fp, lru);
@ -150,7 +150,7 @@ ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct rte_ip_frag_pkt *fp)
/* reset the fragment */
static inline void
ip_frag_reset(struct rte_ip_frag_pkt *fp, uint64_t tms)
ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
{
static const struct ip_frag zero_frag = {
.ofs = 0,

View File

@ -54,7 +54,7 @@
/* local frag table helper functions */
static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
struct rte_ip_frag_pkt *fp)
struct ip_frag_pkt *fp)
{
ip_frag_free(fp, dr);
ip_frag_key_invalidate(&fp->key);
@ -64,7 +64,7 @@ ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
}
static inline void
ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_pkt *fp,
ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
const struct ip_frag_key *key, uint64_t tms)
{
fp->key = key[0];
@ -76,7 +76,7 @@ ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_pkt *fp,
static inline void
ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
struct rte_ip_frag_pkt *fp, uint64_t tms)
struct ip_frag_pkt *fp, uint64_t tms)
{
ip_frag_free(fp, dr);
ip_frag_reset(fp, tms);
@ -137,7 +137,7 @@ ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
}
struct rte_mbuf *
ip_frag_process(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
{
uint32_t idx;
@ -268,11 +268,11 @@ ip_frag_process(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
* If such entry is not present, then allocate a new one.
* If the entry is stale, then free and reuse it.
*/
struct rte_ip_frag_pkt *
struct ip_frag_pkt *
ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms)
{
struct rte_ip_frag_pkt *pkt, *free, *stale, *lru;
struct ip_frag_pkt *pkt, *free, *stale, *lru;
uint64_t max_cycles;
/*
@ -330,13 +330,13 @@ ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
return (pkt);
}
struct rte_ip_frag_pkt *
struct ip_frag_pkt *
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
struct rte_ip_frag_pkt **free, struct rte_ip_frag_pkt **stale)
struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
{
struct rte_ip_frag_pkt *p1, *p2;
struct rte_ip_frag_pkt *empty, *old;
struct ip_frag_pkt *p1, *p2;
struct ip_frag_pkt *empty, *old;
uint64_t max_cycles;
uint32_t i, assoc, sig1, sig2;

View File

@ -75,8 +75,8 @@ struct ip_frag_key {
* @internal Fragmented packet to reassemble.
* First two entries in the frags[] array are for the last and first fragments.
*/
struct rte_ip_frag_pkt {
TAILQ_ENTRY(rte_ip_frag_pkt) lru; /**< LRU list */
struct ip_frag_pkt {
TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
struct ip_frag_key key; /**< fragmentation key */
uint64_t start; /**< creation timestamp */
uint32_t total_size; /**< expected reassembled size */
@ -94,10 +94,10 @@ struct rte_ip_frag_death_row {
/**< mbufs to be freed */
};
TAILQ_HEAD(rte_ip_pkt_list, rte_ip_frag_pkt); /**< @internal fragments tailq */
TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
/** fragmentation table statistics */
struct rte_ip_frag_tbl_stat {
struct ip_frag_tbl_stat {
uint64_t find_num; /**< total # of find/insert attempts. */
uint64_t add_num; /**< # of add ops. */
uint64_t del_num; /**< # of del ops. */
@ -115,10 +115,10 @@ struct rte_ip_frag_tbl {
uint32_t bucket_entries; /**< hash assocaitivity. */
uint32_t nb_entries; /**< total size of the table. */
uint32_t nb_buckets; /**< num of associativity lines. */
struct rte_ip_frag_pkt *last; /**< last used entry. */
struct rte_ip_pkt_list lru; /**< LRU list for table entries. */
struct rte_ip_frag_tbl_stat stat; /**< statistics counters. */
struct rte_ip_frag_pkt pkt[0]; /**< hash table. */
struct ip_frag_pkt *last; /**< last used entry. */
struct ip_pkt_list lru; /**< LRU list for table entries. */
struct ip_frag_tbl_stat stat; /**< statistics counters. */
struct ip_frag_pkt pkt[0]; /**< hash table. */
};
/** IPv6 fragment extension header */

View File

@ -107,7 +107,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
/* Fragment size should be a multiply of 8. */
RTE_IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
in_hdr = (struct ipv4_hdr *) pkt_in->pkt.data;
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);

View File

@ -42,7 +42,7 @@
* Reassemble fragments into one packet.
*/
struct rte_mbuf *
ipv4_frag_reassemble(const struct rte_ip_frag_pkt *fp)
ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
{
struct ipv4_hdr *ip_hdr;
struct rte_mbuf *m, *prev;
@ -119,7 +119,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
struct ipv4_hdr *ip_hdr)
{
struct rte_ip_frag_pkt *fp;
struct ip_frag_pkt *fp;
struct ip_frag_key key;
const uint64_t *psd;
uint16_t ip_len;

View File

@ -118,7 +118,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
/* Fragment size should be a multiple of 8. */
RTE_IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <

View File

@ -49,7 +49,7 @@
* Reassemble fragments into one packet.
*/
struct rte_mbuf *
ipv6_frag_reassemble(const struct rte_ip_frag_pkt *fp)
ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
{
struct ipv6_hdr *ip_hdr;
struct ipv6_extension_fragment *frag_hdr;
@ -148,7 +148,7 @@ rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
{
struct rte_ip_frag_pkt *fp;
struct ip_frag_pkt *fp;
struct ip_frag_key key;
uint16_t ip_len, ip_ofs;