mbuf: reorder fields by time of use

*  Reorder the fields in the mbuf so that we have fields that are used
together side-by-side in the structure. This means that we have a
contiguous block of 8-bytes in the mbuf which are used to reset an mbuf
of descriptor rearm, and a block of 16-bytes of data (excluding flags)
which are set on RX from the received packet descriptor.
* Use dummy fields as appropriate to ensure alignment or to reserve gaps
for later field additions.
* Place most items which are not used by fast-path RX separately at the end
of the structure so they can later be moved to a separate cache line.
[The l2/l3 length fields are not moved at this stage as doing so will
cause overflow to the next cache line].

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
Bruce Richardson 2014-09-11 14:15:36 +01:00 committed by Thomas Monjalon
parent 08b563ffb1
commit 21dc08a991
2 changed files with 21 additions and 16 deletions

View File

@ -108,15 +108,17 @@ struct rte_kni_fifo {
* Padding is necessary to assure the offsets of these fields
*/
struct rte_kni_mbuf {
void *pool;
void *buf_addr;
char pad0[16];
void *next;
char pad0[10];
uint16_t data_off; /**< Start address of data in segment buffer. */
char pad1[4];
uint16_t ol_flags; /**< Offload features. */
char pad2[8];
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
char pad2[4];
uint16_t ol_flags; /**< Offload features. */
char pad3[8];
void *pool;
void *next;
} __attribute__((__aligned__(64)));
/*

View File

@ -115,16 +115,12 @@ extern "C" {
* The generic rte_mbuf, containing a packet mbuf.
*/
struct rte_mbuf {
struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
void *buf_addr; /**< Virtual address of segment buffer. */
phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */
uint16_t buf_len; /**< Length of segment buffer. */
/* valid for any segment */
struct rte_mbuf *next; /**< Next segment of scattered packet. */
/* next 8 bytes are initialised on RX descriptor rearm */
uint16_t buf_len; /**< Length of segment buffer. */
uint16_t data_off;
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
#ifdef RTE_MBUF_REFCNT
/**
@ -142,14 +138,17 @@ struct rte_mbuf {
#else
uint16_t refcnt_reserved; /**< Do not use this field */
#endif
uint16_t reserved; /**< Unused field. Required for padding */
uint16_t ol_flags; /**< Offload features. */
/* these fields are valid for first segment only */
uint8_t nb_segs; /**< Number of segments. */
uint8_t port; /**< Input port. */
/* offload features, valid for first segment only */
uint16_t ol_flags; /**< Offload features. */
uint16_t reserved0; /**< Unused field. Required for padding */
uint32_t reserved1; /**< Unused field. Required for padding */
/* remaining bytes are set on RX when pulling packet from descriptor */
uint16_t reserved2; /**< Unused field. Required for padding */
uint16_t data_len; /**< Amount of data in segment buffer. */
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
union {
uint16_t l2_l3_len; /**< combined l2/l3 lengths as single var */
struct {
@ -167,6 +166,10 @@ struct rte_mbuf {
uint32_t sched; /**< Hierarchical scheduler */
} hash; /**< hash information */
/* fields only used in slow path or on TX */
struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
struct rte_mbuf *next; /**< Next segment of scattered packet. */
union {
uint8_t metadata[0];
uint16_t metadata16[0];