ip_frag: hide internal structures
Move internal reassembly structures into new private header 'ip_reassembly.h'. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
parent
694d61b8e7
commit
060ef29dc0
@ -6,6 +6,7 @@
|
||||
#define _IP_FRAG_COMMON_H_
|
||||
|
||||
#include "rte_ip_frag.h"
|
||||
#include "ip_reassembly.h"
|
||||
|
||||
/* logging macros. */
|
||||
#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
|
||||
|
89
lib/ip_frag/ip_reassembly.h
Normal file
89
lib/ip_frag/ip_reassembly.h
Normal file
@ -0,0 +1,89 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2010-2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _IP_REASSEMBLY_H_
|
||||
#define _IP_REASSEMBLY_H_
|
||||
|
||||
/*
|
||||
* IP Fragmentation and Reassembly
|
||||
* Implementation of IP packet fragmentation and reassembly.
|
||||
*/
|
||||
|
||||
#include <rte_ip_frag.h>
|
||||
|
||||
enum {
|
||||
IP_LAST_FRAG_IDX, /* index of last fragment */
|
||||
IP_FIRST_FRAG_IDX, /* index of first fragment */
|
||||
IP_MIN_FRAG_NUM, /* minimum number of fragments */
|
||||
IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
|
||||
/* maximum number of fragments per packet */
|
||||
};
|
||||
|
||||
/* fragmented mbuf */
|
||||
struct ip_frag {
|
||||
uint16_t ofs; /* offset into the packet */
|
||||
uint16_t len; /* length of fragment */
|
||||
struct rte_mbuf *mb; /* fragment mbuf */
|
||||
};
|
||||
|
||||
/*
|
||||
* key: <src addr, dst_addr, id> to uniquely identify fragmented datagram.
|
||||
*/
|
||||
struct ip_frag_key {
|
||||
uint64_t src_dst[4];
|
||||
/* src and dst address, only first 8 bytes used for IPv4 */
|
||||
RTE_STD_C11
|
||||
union {
|
||||
uint64_t id_key_len; /* combined for easy fetch */
|
||||
__extension__
|
||||
struct {
|
||||
uint32_t id; /* packet id */
|
||||
uint32_t key_len; /* src/dst key length */
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* Fragmented packet to reassemble.
|
||||
* First two entries in the frags[] array are for the last and first fragments.
|
||||
*/
|
||||
struct ip_frag_pkt {
|
||||
RTE_TAILQ_ENTRY(ip_frag_pkt) lru; /* LRU list */
|
||||
struct ip_frag_key key; /* fragmentation key */
|
||||
uint64_t start; /* creation timestamp */
|
||||
uint32_t total_size; /* expected reassembled size */
|
||||
uint32_t frag_size; /* size of fragments received */
|
||||
uint32_t last_idx; /* index of next entry to fill */
|
||||
struct ip_frag frags[IP_MAX_FRAG_NUM]; /* fragments */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/* fragments tailq */
|
||||
RTE_TAILQ_HEAD(ip_pkt_list, ip_frag_pkt);
|
||||
|
||||
/* fragmentation table statistics */
|
||||
struct ip_frag_tbl_stat {
|
||||
uint64_t find_num; /* total # of find/insert attempts. */
|
||||
uint64_t add_num; /* # of add ops. */
|
||||
uint64_t del_num; /* # of del ops. */
|
||||
uint64_t reuse_num; /* # of reuse (del/add) ops. */
|
||||
uint64_t fail_total; /* total # of add failures. */
|
||||
uint64_t fail_nospace; /* # of 'no space' add failures. */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/* fragmentation table */
|
||||
struct rte_ip_frag_tbl {
|
||||
uint64_t max_cycles; /* ttl for table entries. */
|
||||
uint32_t entry_mask; /* hash value mask. */
|
||||
uint32_t max_entries; /* max entries allowed. */
|
||||
uint32_t use_entries; /* entries in use. */
|
||||
uint32_t bucket_entries; /* hash associativity. */
|
||||
uint32_t nb_entries; /* total size of the table. */
|
||||
uint32_t nb_buckets; /* num of associativity lines. */
|
||||
struct ip_frag_pkt *last; /* last used entry. */
|
||||
struct ip_pkt_list lru; /* LRU list for table entries. */
|
||||
struct ip_frag_tbl_stat stat; /* statistics counters. */
|
||||
__extension__ struct ip_frag_pkt pkt[0]; /* hash table. */
|
||||
};
|
||||
|
||||
#endif /* _IP_REASSEMBLY_H_ */
|
@ -27,54 +27,11 @@ extern "C" {
|
||||
|
||||
struct rte_mbuf;
|
||||
|
||||
enum {
|
||||
IP_LAST_FRAG_IDX, /**< index of last fragment */
|
||||
IP_FIRST_FRAG_IDX, /**< index of first fragment */
|
||||
IP_MIN_FRAG_NUM, /**< minimum number of fragments */
|
||||
IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
|
||||
/**< maximum number of fragments per packet */
|
||||
};
|
||||
|
||||
/** @internal fragmented mbuf */
|
||||
struct ip_frag {
|
||||
uint16_t ofs; /**< offset into the packet */
|
||||
uint16_t len; /**< length of fragment */
|
||||
struct rte_mbuf *mb; /**< fragment mbuf */
|
||||
};
|
||||
|
||||
/** @internal <src addr, dst_addr, id> to uniquely identify fragmented datagram. */
|
||||
struct ip_frag_key {
|
||||
uint64_t src_dst[4];
|
||||
/**< src and dst address, only first 8 bytes used for IPv4 */
|
||||
RTE_STD_C11
|
||||
union {
|
||||
uint64_t id_key_len; /**< combined for easy fetch */
|
||||
__extension__
|
||||
struct {
|
||||
uint32_t id; /**< packet id */
|
||||
uint32_t key_len; /**< src/dst key length */
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* @internal Fragmented packet to reassemble.
|
||||
* First two entries in the frags[] array are for the last and first fragments.
|
||||
*/
|
||||
struct ip_frag_pkt {
|
||||
RTE_TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
|
||||
struct ip_frag_key key; /**< fragmentation key */
|
||||
uint64_t start; /**< creation timestamp */
|
||||
uint32_t total_size; /**< expected reassembled size */
|
||||
uint32_t frag_size; /**< size of fragments received */
|
||||
uint32_t last_idx; /**< index of next entry to fill */
|
||||
struct ip_frag frags[IP_MAX_FRAG_NUM]; /**< fragments */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
#define IP_FRAG_DEATH_ROW_LEN 32 /**< death row size (in packets) */
|
||||
|
||||
/* death row size in mbufs */
|
||||
#define IP_FRAG_DEATH_ROW_MBUF_LEN (IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1))
|
||||
#define IP_FRAG_DEATH_ROW_MBUF_LEN \
|
||||
(IP_FRAG_DEATH_ROW_LEN * (RTE_LIBRTE_IP_FRAG_MAX_FRAG + 1))
|
||||
|
||||
/** mbuf death row (packets to be freed) */
|
||||
struct rte_ip_frag_death_row {
|
||||
@ -83,33 +40,6 @@ struct rte_ip_frag_death_row {
|
||||
/**< mbufs to be freed */
|
||||
};
|
||||
|
||||
RTE_TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
|
||||
|
||||
/** fragmentation table statistics */
|
||||
struct ip_frag_tbl_stat {
|
||||
uint64_t find_num; /**< total # of find/insert attempts. */
|
||||
uint64_t add_num; /**< # of add ops. */
|
||||
uint64_t del_num; /**< # of del ops. */
|
||||
uint64_t reuse_num; /**< # of reuse (del/add) ops. */
|
||||
uint64_t fail_total; /**< total # of add failures. */
|
||||
uint64_t fail_nospace; /**< # of 'no space' add failures. */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
/** fragmentation table */
|
||||
struct rte_ip_frag_tbl {
|
||||
uint64_t max_cycles; /**< ttl for table entries. */
|
||||
uint32_t entry_mask; /**< hash value mask. */
|
||||
uint32_t max_entries; /**< max entries allowed. */
|
||||
uint32_t use_entries; /**< entries in use. */
|
||||
uint32_t bucket_entries; /**< hash associativity. */
|
||||
uint32_t nb_entries; /**< total size of the table. */
|
||||
uint32_t nb_buckets; /**< num of associativity lines. */
|
||||
struct ip_frag_pkt *last; /**< last used entry. */
|
||||
struct ip_pkt_list lru; /**< LRU list for table entries. */
|
||||
struct ip_frag_tbl_stat stat; /**< statistics counters. */
|
||||
__extension__ struct ip_frag_pkt pkt[0]; /**< hash table. */
|
||||
};
|
||||
|
||||
/* struct ipv6_extension_fragment moved to librte_net/rte_ip.h and renamed. */
|
||||
#define ipv6_extension_fragment rte_ipv6_fragment_ext
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user