2018-01-29 13:11:21 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2014 Intel Corporation.
|
|
|
|
* Copyright 2014 6WIND S.A.
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
|
2018-01-22 01:48:06 +00:00
|
|
|
#include <rte_compat.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_launch.h>
|
|
|
|
#include <rte_eal.h>
|
|
|
|
#include <rte_per_lcore.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_mempool.h>
|
|
|
|
#include <rte_mbuf.h>
|
2018-01-29 08:10:45 +00:00
|
|
|
#include <rte_mbuf_pool_ops.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
#include <rte_string_fns.h>
|
2013-06-03 00:00:00 +00:00
|
|
|
#include <rte_hexdump.h>
|
2015-07-30 16:22:17 +00:00
|
|
|
#include <rte_errno.h>
|
2016-10-03 08:38:42 +00:00
|
|
|
#include <rte_memcpy.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* pktmbuf pool constructor, given as a callback function to
|
2017-03-14 09:14:40 +00:00
|
|
|
* rte_mempool_create(), or called directly if using
|
|
|
|
* rte_mempool_create_empty()/rte_mempool_populate()
|
2012-09-04 12:54:00 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
|
|
|
|
{
|
2015-04-22 09:57:18 +00:00
|
|
|
struct rte_pktmbuf_pool_private *user_mbp_priv, *mbp_priv;
|
|
|
|
struct rte_pktmbuf_pool_private default_mbp_priv;
|
2012-09-04 12:54:00 +00:00
|
|
|
uint16_t roomsz;
|
|
|
|
|
2016-04-22 12:21:26 +00:00
|
|
|
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-04-22 09:57:18 +00:00
|
|
|
/* if no structure is provided, assume no mbuf private area */
|
|
|
|
user_mbp_priv = opaque_arg;
|
|
|
|
if (user_mbp_priv == NULL) {
|
|
|
|
default_mbp_priv.mbuf_priv_size = 0;
|
|
|
|
if (mp->elt_size > sizeof(struct rte_mbuf))
|
|
|
|
roomsz = mp->elt_size - sizeof(struct rte_mbuf);
|
|
|
|
else
|
|
|
|
roomsz = 0;
|
|
|
|
default_mbp_priv.mbuf_data_room_size = roomsz;
|
|
|
|
user_mbp_priv = &default_mbp_priv;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2016-04-22 12:21:26 +00:00
|
|
|
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
|
2015-04-22 09:57:18 +00:00
|
|
|
user_mbp_priv->mbuf_data_room_size +
|
|
|
|
user_mbp_priv->mbuf_priv_size);
|
|
|
|
|
|
|
|
mbp_priv = rte_mempool_get_priv(mp);
|
|
|
|
memcpy(mbp_priv, user_mbp_priv, sizeof(*mbp_priv));
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pktmbuf constructor, given as a callback function to
|
2017-03-14 09:14:40 +00:00
|
|
|
* rte_mempool_obj_iter() or rte_mempool_create().
|
2012-09-04 12:54:00 +00:00
|
|
|
* Set the fields of a packet mbuf to their default values.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rte_pktmbuf_init(struct rte_mempool *mp,
|
|
|
|
__attribute__((unused)) void *opaque_arg,
|
|
|
|
void *_m,
|
|
|
|
__attribute__((unused)) unsigned i)
|
|
|
|
{
|
|
|
|
struct rte_mbuf *m = _m;
|
2015-04-22 09:57:25 +00:00
|
|
|
uint32_t mbuf_size, buf_len, priv_size;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-04-22 09:57:25 +00:00
|
|
|
priv_size = rte_pktmbuf_priv_size(mp);
|
|
|
|
mbuf_size = sizeof(struct rte_mbuf) + priv_size;
|
2015-04-22 09:57:21 +00:00
|
|
|
buf_len = rte_pktmbuf_data_room_size(mp);
|
|
|
|
|
2016-04-22 12:21:26 +00:00
|
|
|
RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
|
|
|
|
RTE_ASSERT(mp->elt_size >= mbuf_size);
|
|
|
|
RTE_ASSERT(buf_len <= UINT16_MAX);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-06-27 11:57:51 +00:00
|
|
|
memset(m, 0, mbuf_size);
|
2015-04-22 09:57:25 +00:00
|
|
|
/* start of buffer is after mbuf structure and priv data */
|
|
|
|
m->priv_size = priv_size;
|
2015-04-22 09:57:21 +00:00
|
|
|
m->buf_addr = (char *)m + mbuf_size;
|
2017-10-20 12:31:32 +00:00
|
|
|
m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
|
2012-09-04 12:54:00 +00:00
|
|
|
m->buf_len = (uint16_t)buf_len;
|
|
|
|
|
|
|
|
/* keep some headroom between start of buffer and data */
|
2014-09-11 13:15:35 +00:00
|
|
|
m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* init some constant fields */
|
|
|
|
m->pool = mp;
|
2014-08-28 15:42:37 +00:00
|
|
|
m->nb_segs = 1;
|
2017-09-29 07:17:27 +00:00
|
|
|
m->port = MBUF_INVALID_PORT;
|
2017-04-04 16:28:02 +00:00
|
|
|
rte_mbuf_refcnt_set(m, 1);
|
|
|
|
m->next = NULL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 08:10:46 +00:00
|
|
|
/* Helper to create a mbuf pool with given mempool ops name*/
|
2018-01-22 01:48:06 +00:00
|
|
|
struct rte_mempool * __rte_experimental
|
2018-01-29 08:10:46 +00:00
|
|
|
rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
|
|
|
|
unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
|
|
|
|
int socket_id, const char *ops_name)
|
2015-04-22 09:57:23 +00:00
|
|
|
{
|
2016-06-22 09:27:29 +00:00
|
|
|
struct rte_mempool *mp;
|
2015-04-22 09:57:23 +00:00
|
|
|
struct rte_pktmbuf_pool_private mbp_priv;
|
2018-01-29 08:10:46 +00:00
|
|
|
const char *mp_ops_name = ops_name;
|
2015-04-22 09:57:23 +00:00
|
|
|
unsigned elt_size;
|
2016-07-11 10:20:26 +00:00
|
|
|
int ret;
|
2015-04-22 09:57:23 +00:00
|
|
|
|
2015-07-30 16:22:17 +00:00
|
|
|
if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) {
|
|
|
|
RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n",
|
|
|
|
priv_size);
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
2015-04-22 09:57:23 +00:00
|
|
|
elt_size = sizeof(struct rte_mbuf) + (unsigned)priv_size +
|
|
|
|
(unsigned)data_room_size;
|
|
|
|
mbp_priv.mbuf_data_room_size = data_room_size;
|
|
|
|
mbp_priv.mbuf_priv_size = priv_size;
|
|
|
|
|
2016-06-22 09:27:29 +00:00
|
|
|
mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
|
|
|
|
sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
|
|
|
|
if (mp == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2018-01-29 08:10:46 +00:00
|
|
|
if (mp_ops_name == NULL)
|
|
|
|
mp_ops_name = rte_mbuf_best_mempool_ops();
|
2017-10-06 07:45:29 +00:00
|
|
|
ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
|
2016-09-19 12:34:41 +00:00
|
|
|
if (ret != 0) {
|
2016-06-22 09:27:29 +00:00
|
|
|
RTE_LOG(ERR, MBUF, "error setting mempool handler\n");
|
2016-09-19 12:34:41 +00:00
|
|
|
rte_mempool_free(mp);
|
|
|
|
rte_errno = -ret;
|
2016-06-22 09:27:29 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
rte_pktmbuf_pool_init(mp, &mbp_priv);
|
|
|
|
|
2016-07-11 10:20:26 +00:00
|
|
|
ret = rte_mempool_populate_default(mp);
|
|
|
|
if (ret < 0) {
|
2016-06-22 09:27:29 +00:00
|
|
|
rte_mempool_free(mp);
|
2016-07-11 10:20:26 +00:00
|
|
|
rte_errno = -ret;
|
2016-06-22 09:27:29 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
|
|
|
|
|
|
|
|
return mp;
|
2015-04-22 09:57:23 +00:00
|
|
|
}
|
|
|
|
|
2018-01-29 08:10:46 +00:00
|
|
|
/* helper to create a mbuf pool */
|
|
|
|
struct rte_mempool *
|
|
|
|
rte_pktmbuf_pool_create(const char *name, unsigned int n,
|
|
|
|
unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
|
|
|
|
int socket_id)
|
|
|
|
{
|
|
|
|
return rte_pktmbuf_pool_create_by_ops(name, n, cache_size, priv_size,
|
|
|
|
data_room_size, socket_id, NULL);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/* do some sanity checks on a mbuf: panic if it fails */
|
|
|
|
void
|
2014-08-28 15:42:36 +00:00
|
|
|
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2017-12-09 21:39:18 +00:00
|
|
|
unsigned int nb_segs, pkt_len;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
if (m == NULL)
|
|
|
|
rte_panic("mbuf is NULL\n");
|
|
|
|
|
|
|
|
/* generic checks */
|
|
|
|
if (m->pool == NULL)
|
|
|
|
rte_panic("bad mbuf pool\n");
|
2017-10-20 12:31:32 +00:00
|
|
|
if (m->buf_iova == 0)
|
|
|
|
rte_panic("bad IO addr\n");
|
2012-09-04 12:54:00 +00:00
|
|
|
if (m->buf_addr == NULL)
|
|
|
|
rte_panic("bad virt addr\n");
|
|
|
|
|
|
|
|
uint16_t cnt = rte_mbuf_refcnt_read(m);
|
|
|
|
if ((cnt == 0) || (cnt == UINT16_MAX))
|
|
|
|
rte_panic("bad ref cnt\n");
|
|
|
|
|
2014-08-28 15:42:36 +00:00
|
|
|
/* nothing to check for sub-segments */
|
|
|
|
if (is_header == 0)
|
2012-09-04 12:54:00 +00:00
|
|
|
return;
|
|
|
|
|
2017-12-09 21:39:18 +00:00
|
|
|
/* data_len is supposed to be not more than pkt_len */
|
|
|
|
if (m->data_len > m->pkt_len)
|
|
|
|
rte_panic("bad data_len\n");
|
|
|
|
|
2014-08-28 15:42:37 +00:00
|
|
|
nb_segs = m->nb_segs;
|
2017-12-09 21:39:18 +00:00
|
|
|
pkt_len = m->pkt_len;
|
|
|
|
|
|
|
|
do {
|
|
|
|
nb_segs -= 1;
|
|
|
|
pkt_len -= m->data_len;
|
|
|
|
} while ((m = m->next) != NULL);
|
|
|
|
|
|
|
|
if (nb_segs)
|
2014-08-28 15:42:36 +00:00
|
|
|
rte_panic("bad nb_segs\n");
|
2017-12-09 21:39:18 +00:00
|
|
|
if (pkt_len)
|
|
|
|
rte_panic("bad pkt_len\n");
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* dump a mbuf on console */
|
|
|
|
void
|
2014-05-02 23:42:56 +00:00
|
|
|
rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
|
|
|
unsigned int len;
|
2017-11-10 13:56:43 +00:00
|
|
|
unsigned int nb_segs;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2014-08-28 15:42:36 +00:00
|
|
|
__rte_mbuf_sanity_check(m, 1);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2017-10-20 12:31:32 +00:00
|
|
|
fprintf(f, "dump mbuf at %p, iova=%"PRIx64", buf_len=%u\n",
|
|
|
|
m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
|
2014-09-11 13:15:37 +00:00
|
|
|
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
|
2014-08-28 15:42:37 +00:00
|
|
|
"in_port=%u\n", m->pkt_len, m->ol_flags,
|
2014-08-28 15:42:38 +00:00
|
|
|
(unsigned)m->nb_segs, (unsigned)m->port);
|
2014-08-28 15:42:37 +00:00
|
|
|
nb_segs = m->nb_segs;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
while (m && nb_segs != 0) {
|
2014-08-28 15:42:36 +00:00
|
|
|
__rte_mbuf_sanity_check(m, 0);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2016-06-20 10:44:35 +00:00
|
|
|
fprintf(f, " segment at %p, data=%p, data_len=%u\n",
|
2014-09-11 13:15:35 +00:00
|
|
|
m, rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
|
2012-09-04 12:54:00 +00:00
|
|
|
len = dump_len;
|
2014-08-28 15:42:37 +00:00
|
|
|
if (len > m->data_len)
|
|
|
|
len = m->data_len;
|
2012-09-04 12:54:00 +00:00
|
|
|
if (len != 0)
|
2014-09-11 13:15:35 +00:00
|
|
|
rte_hexdump(f, NULL, rte_pktmbuf_mtod(m, void *), len);
|
2012-09-04 12:54:00 +00:00
|
|
|
dump_len -= len;
|
2014-08-28 15:42:37 +00:00
|
|
|
m = m->next;
|
2012-09-04 12:54:00 +00:00
|
|
|
nb_segs --;
|
|
|
|
}
|
|
|
|
}
|
2014-11-26 15:04:48 +00:00
|
|
|
|
2016-10-03 08:38:42 +00:00
|
|
|
/* read len data bytes in a mbuf at specified offset (internal) */
|
|
|
|
const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
|
|
|
|
uint32_t len, void *buf)
|
|
|
|
{
|
|
|
|
const struct rte_mbuf *seg = m;
|
|
|
|
uint32_t buf_off = 0, copy_len;
|
|
|
|
|
|
|
|
if (off + len > rte_pktmbuf_pkt_len(m))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
while (off >= rte_pktmbuf_data_len(seg)) {
|
|
|
|
off -= rte_pktmbuf_data_len(seg);
|
|
|
|
seg = seg->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (off + len <= rte_pktmbuf_data_len(seg))
|
|
|
|
return rte_pktmbuf_mtod_offset(seg, char *, off);
|
|
|
|
|
|
|
|
/* rare case: header is split among several segments */
|
|
|
|
while (len > 0) {
|
|
|
|
copy_len = rte_pktmbuf_data_len(seg) - off;
|
|
|
|
if (copy_len > len)
|
|
|
|
copy_len = len;
|
|
|
|
rte_memcpy((char *)buf + buf_off,
|
|
|
|
rte_pktmbuf_mtod_offset(seg, char *, off), copy_len);
|
|
|
|
off = 0;
|
|
|
|
buf_off += copy_len;
|
|
|
|
len -= copy_len;
|
|
|
|
seg = seg->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2014-11-26 15:04:48 +00:00
|
|
|
/*
|
|
|
|
* Get the name of a RX offload flag. Must be kept synchronized with flag
|
|
|
|
* definitions in rte_mbuf.h.
|
|
|
|
*/
|
|
|
|
const char *rte_get_rx_ol_flag_name(uint64_t mask)
|
|
|
|
{
|
|
|
|
switch (mask) {
|
2017-10-25 15:12:57 +00:00
|
|
|
case PKT_RX_VLAN: return "PKT_RX_VLAN";
|
2014-11-26 15:04:48 +00:00
|
|
|
case PKT_RX_RSS_HASH: return "PKT_RX_RSS_HASH";
|
|
|
|
case PKT_RX_FDIR: return "PKT_RX_FDIR";
|
|
|
|
case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
|
2016-10-13 14:16:04 +00:00
|
|
|
case PKT_RX_L4_CKSUM_GOOD: return "PKT_RX_L4_CKSUM_GOOD";
|
|
|
|
case PKT_RX_L4_CKSUM_NONE: return "PKT_RX_L4_CKSUM_NONE";
|
2014-11-26 15:04:48 +00:00
|
|
|
case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD";
|
2016-10-13 14:16:04 +00:00
|
|
|
case PKT_RX_IP_CKSUM_GOOD: return "PKT_RX_IP_CKSUM_GOOD";
|
|
|
|
case PKT_RX_IP_CKSUM_NONE: return "PKT_RX_IP_CKSUM_NONE";
|
2016-03-10 02:42:13 +00:00
|
|
|
case PKT_RX_EIP_CKSUM_BAD: return "PKT_RX_EIP_CKSUM_BAD";
|
mbuf: add new Rx flags for stripped VLAN
The behavior of PKT_RX_VLAN_PKT was not very well defined, resulting in
PMDs not advertising the same flags in similar conditions.
Following discussion in [1], introduce 2 new flags PKT_RX_VLAN_STRIPPED
and PKT_RX_QINQ_STRIPPED that are better defined:
PKT_RX_VLAN_STRIPPED: a vlan has been stripped by the hardware and its
tci is saved in mbuf->vlan_tci. This can only happen if vlan stripping
is enabled in the RX configuration of the PMD.
For now, the old flag PKT_RX_VLAN_PKT is kept but marked as deprecated.
It should be removed from applications and PMDs in a future revision.
This patch also updates the drivers. For PKT_RX_VLAN_PKT:
- e1000, enic, i40e, mlx5, nfp, vmxnet3: done, PKT_RX_VLAN_PKT already
had the same meaning than PKT_RX_VLAN_STRIPPED, minor update is
required.
- fm10k: done, PKT_RX_VLAN_PKT already had the same meaning than
PKT_RX_VLAN_STRIPPED, and vlan stripping is always enabled on fm10k.
- ixgbe: modification done (vector and normal), the old flag was set
when a vlan was recognized, even if vlan stripping was disabled.
- the other drivers do not support vlan stripping.
For PKT_RX_QINQ_PKT, it was only supported on i40e, and the behavior was
already correct, so we can reuse the same bit value for
PKT_RX_QINQ_STRIPPED.
[1] http://dpdk.org/ml/archives/dev/2016-April/037837.html,
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
2016-06-15 11:48:07 +00:00
|
|
|
case PKT_RX_VLAN_STRIPPED: return "PKT_RX_VLAN_STRIPPED";
|
2014-11-26 15:04:48 +00:00
|
|
|
case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
|
|
|
|
case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
|
mbuf: add new Rx flags for stripped VLAN
The behavior of PKT_RX_VLAN_PKT was not very well defined, resulting in
PMDs not advertising the same flags in similar conditions.
Following discussion in [1], introduce 2 new flags PKT_RX_VLAN_STRIPPED
and PKT_RX_QINQ_STRIPPED that are better defined:
PKT_RX_VLAN_STRIPPED: a vlan has been stripped by the hardware and its
tci is saved in mbuf->vlan_tci. This can only happen if vlan stripping
is enabled in the RX configuration of the PMD.
For now, the old flag PKT_RX_VLAN_PKT is kept but marked as deprecated.
It should be removed from applications and PMDs in a future revision.
This patch also updates the drivers. For PKT_RX_VLAN_PKT:
- e1000, enic, i40e, mlx5, nfp, vmxnet3: done, PKT_RX_VLAN_PKT already
had the same meaning than PKT_RX_VLAN_STRIPPED, minor update is
required.
- fm10k: done, PKT_RX_VLAN_PKT already had the same meaning than
PKT_RX_VLAN_STRIPPED, and vlan stripping is always enabled on fm10k.
- ixgbe: modification done (vector and normal), the old flag was set
when a vlan was recognized, even if vlan stripping was disabled.
- the other drivers do not support vlan stripping.
For PKT_RX_QINQ_PKT, it was only supported on i40e, and the behavior was
already correct, so we can reuse the same bit value for
PKT_RX_QINQ_STRIPPED.
[1] http://dpdk.org/ml/archives/dev/2016-April/037837.html,
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
2016-06-15 11:48:07 +00:00
|
|
|
case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
|
2016-10-13 14:16:06 +00:00
|
|
|
case PKT_RX_LRO: return "PKT_RX_LRO";
|
2017-04-04 16:28:07 +00:00
|
|
|
case PKT_RX_TIMESTAMP: return "PKT_RX_TIMESTAMP";
|
2017-10-25 15:07:20 +00:00
|
|
|
case PKT_RX_SEC_OFFLOAD: return "PKT_RX_SEC_OFFLOAD";
|
|
|
|
case PKT_RX_SEC_OFFLOAD_FAILED: return "PKT_RX_SEC_OFFLOAD_FAILED";
|
2014-11-26 15:04:48 +00:00
|
|
|
default: return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-12 15:39:43 +00:00
|
|
|
struct flag_mask {
|
|
|
|
uint64_t flag;
|
|
|
|
uint64_t mask;
|
|
|
|
const char *default_name;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* write the list of rx ol flags in buffer buf */
|
|
|
|
int
|
|
|
|
rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
const struct flag_mask rx_flags[] = {
|
2017-10-25 15:12:57 +00:00
|
|
|
{ PKT_RX_VLAN, PKT_RX_VLAN, NULL },
|
2016-10-12 15:39:43 +00:00
|
|
|
{ PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, NULL },
|
|
|
|
{ PKT_RX_FDIR, PKT_RX_FDIR, NULL },
|
2016-10-13 14:16:04 +00:00
|
|
|
{ PKT_RX_L4_CKSUM_BAD, PKT_RX_L4_CKSUM_MASK, NULL },
|
|
|
|
{ PKT_RX_L4_CKSUM_GOOD, PKT_RX_L4_CKSUM_MASK, NULL },
|
|
|
|
{ PKT_RX_L4_CKSUM_NONE, PKT_RX_L4_CKSUM_MASK, NULL },
|
|
|
|
{ PKT_RX_L4_CKSUM_UNKNOWN, PKT_RX_L4_CKSUM_MASK,
|
|
|
|
"PKT_RX_L4_CKSUM_UNKNOWN" },
|
|
|
|
{ PKT_RX_IP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK, NULL },
|
|
|
|
{ PKT_RX_IP_CKSUM_GOOD, PKT_RX_IP_CKSUM_MASK, NULL },
|
|
|
|
{ PKT_RX_IP_CKSUM_NONE, PKT_RX_IP_CKSUM_MASK, NULL },
|
|
|
|
{ PKT_RX_IP_CKSUM_UNKNOWN, PKT_RX_IP_CKSUM_MASK,
|
|
|
|
"PKT_RX_IP_CKSUM_UNKNOWN" },
|
2016-10-12 15:39:43 +00:00
|
|
|
{ PKT_RX_EIP_CKSUM_BAD, PKT_RX_EIP_CKSUM_BAD, NULL },
|
|
|
|
{ PKT_RX_VLAN_STRIPPED, PKT_RX_VLAN_STRIPPED, NULL },
|
|
|
|
{ PKT_RX_IEEE1588_PTP, PKT_RX_IEEE1588_PTP, NULL },
|
|
|
|
{ PKT_RX_IEEE1588_TMST, PKT_RX_IEEE1588_TMST, NULL },
|
|
|
|
{ PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL },
|
2016-10-13 14:16:06 +00:00
|
|
|
{ PKT_RX_LRO, PKT_RX_LRO, NULL },
|
2017-04-04 16:28:07 +00:00
|
|
|
{ PKT_RX_TIMESTAMP, PKT_RX_TIMESTAMP, NULL },
|
2017-10-25 15:07:20 +00:00
|
|
|
{ PKT_RX_SEC_OFFLOAD, PKT_RX_SEC_OFFLOAD, NULL },
|
|
|
|
{ PKT_RX_SEC_OFFLOAD_FAILED, PKT_RX_SEC_OFFLOAD_FAILED, NULL },
|
2017-10-25 15:12:57 +00:00
|
|
|
{ PKT_RX_QINQ, PKT_RX_QINQ, NULL },
|
2016-10-12 15:39:43 +00:00
|
|
|
};
|
|
|
|
const char *name;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (buflen == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
buf[0] = '\0';
|
|
|
|
for (i = 0; i < RTE_DIM(rx_flags); i++) {
|
|
|
|
if ((mask & rx_flags[i].mask) != rx_flags[i].flag)
|
|
|
|
continue;
|
|
|
|
name = rte_get_rx_ol_flag_name(rx_flags[i].flag);
|
|
|
|
if (name == NULL)
|
|
|
|
name = rx_flags[i].default_name;
|
|
|
|
ret = snprintf(buf, buflen, "%s ", name);
|
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
if ((size_t)ret >= buflen)
|
|
|
|
return -1;
|
|
|
|
buf += ret;
|
|
|
|
buflen -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-26 15:04:48 +00:00
|
|
|
/*
|
|
|
|
* Get the name of a TX offload flag. Must be kept synchronized with flag
|
|
|
|
* definitions in rte_mbuf.h.
|
|
|
|
*/
|
|
|
|
const char *rte_get_tx_ol_flag_name(uint64_t mask)
|
|
|
|
{
|
|
|
|
switch (mask) {
|
|
|
|
case PKT_TX_VLAN_PKT: return "PKT_TX_VLAN_PKT";
|
|
|
|
case PKT_TX_IP_CKSUM: return "PKT_TX_IP_CKSUM";
|
|
|
|
case PKT_TX_TCP_CKSUM: return "PKT_TX_TCP_CKSUM";
|
|
|
|
case PKT_TX_SCTP_CKSUM: return "PKT_TX_SCTP_CKSUM";
|
|
|
|
case PKT_TX_UDP_CKSUM: return "PKT_TX_UDP_CKSUM";
|
|
|
|
case PKT_TX_IEEE1588_TMST: return "PKT_TX_IEEE1588_TMST";
|
2014-11-26 15:04:52 +00:00
|
|
|
case PKT_TX_TCP_SEG: return "PKT_TX_TCP_SEG";
|
2014-12-02 15:06:06 +00:00
|
|
|
case PKT_TX_IPV4: return "PKT_TX_IPV4";
|
|
|
|
case PKT_TX_IPV6: return "PKT_TX_IPV6";
|
|
|
|
case PKT_TX_OUTER_IP_CKSUM: return "PKT_TX_OUTER_IP_CKSUM";
|
|
|
|
case PKT_TX_OUTER_IPV4: return "PKT_TX_OUTER_IPV4";
|
|
|
|
case PKT_TX_OUTER_IPV6: return "PKT_TX_OUTER_IPV6";
|
2016-08-01 03:56:53 +00:00
|
|
|
case PKT_TX_TUNNEL_VXLAN: return "PKT_TX_TUNNEL_VXLAN";
|
|
|
|
case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
|
|
|
|
case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
|
|
|
|
case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
|
2017-06-27 07:21:31 +00:00
|
|
|
case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
|
2018-04-23 12:16:32 +00:00
|
|
|
case PKT_TX_TUNNEL_VXLAN_GPE: return "PKT_TX_TUNNEL_VXLAN_GPE";
|
2018-04-23 11:49:35 +00:00
|
|
|
case PKT_TX_TUNNEL_IP: return "PKT_TX_TUNNEL_IP";
|
|
|
|
case PKT_TX_TUNNEL_UDP: return "PKT_TX_TUNNEL_UDP";
|
2017-01-13 11:21:36 +00:00
|
|
|
case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
|
2017-10-25 15:07:20 +00:00
|
|
|
case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD";
|
2014-11-26 15:04:48 +00:00
|
|
|
default: return NULL;
|
|
|
|
}
|
|
|
|
}
|
2016-10-12 15:39:43 +00:00
|
|
|
|
|
|
|
/* write the list of tx ol flags in buffer buf */
|
|
|
|
int
|
|
|
|
rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
const struct flag_mask tx_flags[] = {
|
|
|
|
{ PKT_TX_VLAN_PKT, PKT_TX_VLAN_PKT, NULL },
|
|
|
|
{ PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM, NULL },
|
|
|
|
{ PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK, NULL },
|
|
|
|
{ PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK, NULL },
|
|
|
|
{ PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK, NULL },
|
|
|
|
{ PKT_TX_L4_NO_CKSUM, PKT_TX_L4_MASK, "PKT_TX_L4_NO_CKSUM" },
|
|
|
|
{ PKT_TX_IEEE1588_TMST, PKT_TX_IEEE1588_TMST, NULL },
|
|
|
|
{ PKT_TX_TCP_SEG, PKT_TX_TCP_SEG, NULL },
|
|
|
|
{ PKT_TX_IPV4, PKT_TX_IPV4, NULL },
|
|
|
|
{ PKT_TX_IPV6, PKT_TX_IPV6, NULL },
|
|
|
|
{ PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM, NULL },
|
|
|
|
{ PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4, NULL },
|
|
|
|
{ PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6, NULL },
|
|
|
|
{ PKT_TX_TUNNEL_VXLAN, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
|
|
|
{ PKT_TX_TUNNEL_GRE, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
|
|
|
{ PKT_TX_TUNNEL_IPIP, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
|
|
|
{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
2017-06-27 07:21:31 +00:00
|
|
|
{ PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
2018-04-23 12:16:32 +00:00
|
|
|
{ PKT_TX_TUNNEL_VXLAN_GPE, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
2018-04-23 11:49:35 +00:00
|
|
|
{ PKT_TX_TUNNEL_IP, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
|
|
|
{ PKT_TX_TUNNEL_UDP, PKT_TX_TUNNEL_MASK,
|
|
|
|
"PKT_TX_TUNNEL_NONE" },
|
2017-01-13 11:21:36 +00:00
|
|
|
{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
|
2017-10-25 15:07:20 +00:00
|
|
|
{ PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL },
|
2016-10-12 15:39:43 +00:00
|
|
|
};
|
|
|
|
const char *name;
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (buflen == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
buf[0] = '\0';
|
|
|
|
for (i = 0; i < RTE_DIM(tx_flags); i++) {
|
|
|
|
if ((mask & tx_flags[i].mask) != tx_flags[i].flag)
|
|
|
|
continue;
|
|
|
|
name = rte_get_tx_ol_flag_name(tx_flags[i].flag);
|
|
|
|
if (name == NULL)
|
|
|
|
name = tx_flags[i].default_name;
|
|
|
|
ret = snprintf(buf, buflen, "%s ", name);
|
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
if ((size_t)ret >= buflen)
|
|
|
|
return -1;
|
|
|
|
buf += ret;
|
|
|
|
buflen -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|