diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index c1f69cdd3d..bf6edb0276 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -45,6 +45,7 @@ The public API headers are grouped by topics: [ixgbe] (@ref rte_pmd_ixgbe.h), [i40e] (@ref rte_pmd_i40e.h), [ice] (@ref rte_pmd_ice.h), + [iavf] (@ref rte_pmd_iavf.h), [ioat] (@ref rte_ioat_rawdev.h), [bnxt] (@ref rte_pmd_bnxt.h), [dpaa] (@ref rte_pmd_dpaa.h), diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in index aba187b547..5c883b613b 100644 --- a/doc/api/doxy-api.conf.in +++ b/doc/api/doxy-api.conf.in @@ -16,6 +16,7 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \ @TOPDIR@/drivers/net/dpaa \ @TOPDIR@/drivers/net/dpaa2 \ @TOPDIR@/drivers/net/i40e \ + @TOPDIR@/drivers/net/iavf \ @TOPDIR@/drivers/net/ice \ @TOPDIR@/drivers/net/ixgbe \ @TOPDIR@/drivers/net/mlx5 \ diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst index 723a9c0fa2..529ff4a955 100644 --- a/doc/guides/nics/intel_vf.rst +++ b/doc/guides/nics/intel_vf.rst @@ -88,6 +88,10 @@ For more detail on SR-IOV, please refer to the following documents: assignment in hypervisor. Take qemu for example, the device assignment should carry the IAVF device id (0x1889) like ``-device vfio-pci,x-pci-device-id=0x1889,host=03:0a.0``. + When IAVF is backed by an IntelĀ® E810 device, the "Protocol Extraction" feature which is supported by ice PMD is also + available for IAVF PMD. The same devargs with the same parameters can be applied to IAVF PMD, for detail please reference + the section ``Protocol extraction for per queue`` of ice.rst. + The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index b5cfcb78d1..35f37c45e8 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -174,6 +174,9 @@ New Features * **Updated Intel iavf driver.** + Updated iavf PMD with new features and improvements, including: + + * Added support for flexible descriptor metadata extraction. * Added support of AVX512 instructions in Rx and Tx path. * **Updated Intel ice driver.** diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 3d3b0da5dd..6d5912d8c1 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -133,7 +133,7 @@ struct iavf_info { struct virtchnl_vf_resource *vf_res; /* VF resource */ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ uint64_t supported_rxdid; - + uint8_t *proto_xtr; /* proto xtr type for all queues */ volatile enum virtchnl_ops pend_cmd; /* pending command not finished */ uint32_t cmd_retval; /* return value of the cmd response from PF */ uint8_t *aq_resp; /* buffer to store the adminq response from PF */ @@ -169,6 +169,27 @@ struct iavf_info { #define IAVF_MAX_PKT_TYPE 1024 +#define IAVF_MAX_QUEUE_NUM 2048 + +enum iavf_proto_xtr_type { + IAVF_PROTO_XTR_NONE, + IAVF_PROTO_XTR_VLAN, + IAVF_PROTO_XTR_IPV4, + IAVF_PROTO_XTR_IPV6, + IAVF_PROTO_XTR_IPV6_FLOW, + IAVF_PROTO_XTR_TCP, + IAVF_PROTO_XTR_IP_OFFSET, + IAVF_PROTO_XTR_MAX, +}; + +/** + * Cache devargs parse result. + */ +struct iavf_devargs { + uint8_t proto_xtr_dflt; + uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM]; +}; + /* Structure to store private data for each VF instance. */ struct iavf_adapter { struct iavf_hw hw; @@ -182,6 +203,7 @@ struct iavf_adapter { const uint32_t *ptype_tbl; bool stopped; uint16_t fdir_ref_cnt; + struct iavf_devargs devargs; }; /* IAVF_DEV_PRIVATE_TO */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 9eea8bf90c..7e3c26a94e 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -28,6 +28,49 @@ #include "iavf.h" #include "iavf_rxtx.h" #include "iavf_generic_flow.h" +#include "rte_pmd_iavf.h" + +/* devargs */ +#define IAVF_PROTO_XTR_ARG "proto_xtr" + +static const char * const iavf_valid_args[] = { + IAVF_PROTO_XTR_ARG, + NULL +}; + +static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = { + .name = "intel_pmd_dynfield_proto_xtr_metadata", + .size = sizeof(uint32_t), + .align = __alignof__(uint32_t), + .flags = 0, +}; + +struct iavf_proto_xtr_ol { + const struct rte_mbuf_dynflag param; + uint64_t *ol_flag; + bool required; +}; + +static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = { + [IAVF_PROTO_XTR_VLAN] = { + .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" }, + .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask }, + [IAVF_PROTO_XTR_IPV4] = { + .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" }, + .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask }, + [IAVF_PROTO_XTR_IPV6] = { + .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" }, + .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask }, + [IAVF_PROTO_XTR_IPV6_FLOW] = { + .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" }, + .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask }, + [IAVF_PROTO_XTR_TCP] = { + .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" }, + .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask }, + [IAVF_PROTO_XTR_IP_OFFSET] = { + .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" }, + .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask }, +}; static int iavf_dev_configure(struct rte_eth_dev *dev); static int iavf_dev_start(struct rte_eth_dev *dev); @@ -1394,6 +1437,349 @@ iavf_check_vf_reset_done(struct iavf_hw *hw) return 0; } +static int +iavf_lookup_proto_xtr_type(const char *flex_name) +{ + static struct { + const char *name; + enum iavf_proto_xtr_type type; + } xtr_type_map[] = { + { "vlan", IAVF_PROTO_XTR_VLAN }, + { "ipv4", IAVF_PROTO_XTR_IPV4 }, + { "ipv6", IAVF_PROTO_XTR_IPV6 }, + { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW }, + { "tcp", IAVF_PROTO_XTR_TCP }, + { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET }, + }; + uint32_t i; + + for (i = 0; i < RTE_DIM(xtr_type_map); i++) { + if (strcmp(flex_name, xtr_type_map[i].name) == 0) + return xtr_type_map[i].type; + } + + PMD_DRV_LOG(ERR, "wrong proto_xtr type, " + "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset"); + + return -1; +} + +/** + * Parse elem, the elem could be single number/range or '(' ')' group + * 1) A single number elem, it's just a simple digit. e.g. 9 + * 2) A single range elem, two digits with a '-' between. e.g. 2-6 + * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) + * Within group elem, '-' used for a range separator; + * ',' used for a single number. + */ +static int +iavf_parse_queue_set(const char *input, int xtr_type, + struct iavf_devargs *devargs) +{ + const char *str = input; + char *end = NULL; + uint32_t min, max; + uint32_t idx; + + while (isblank(*str)) + str++; + + if (!isdigit(*str) && *str != '(') + return -1; + + /* process single number or single range of number */ + if (*str != '(') { + errno = 0; + idx = strtoul(str, &end, 10); + if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM) + return -1; + + while (isblank(*end)) + end++; + + min = idx; + max = idx; + + /* process single - */ + if (*end == '-') { + end++; + while (isblank(*end)) + end++; + if (!isdigit(*end)) + return -1; + + errno = 0; + idx = strtoul(end, &end, 10); + if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM) + return -1; + + max = idx; + while (isblank(*end)) + end++; + } + + if (*end != ':') + return -1; + + for (idx = RTE_MIN(min, max); + idx <= RTE_MAX(min, max); idx++) + devargs->proto_xtr[idx] = xtr_type; + + return 0; + } + + /* process set within bracket */ + str++; + while (isblank(*str)) + str++; + if (*str == '\0') + return -1; + + min = IAVF_MAX_QUEUE_NUM; + do { + /* go ahead to the first digit */ + while (isblank(*str)) + str++; + if (!isdigit(*str)) + return -1; + + /* get the digit value */ + errno = 0; + idx = strtoul(str, &end, 10); + if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM) + return -1; + + /* go ahead to separator '-',',' and ')' */ + while (isblank(*end)) + end++; + if (*end == '-') { + if (min == IAVF_MAX_QUEUE_NUM) + min = idx; + else /* avoid continuous '-' */ + return -1; + } else if (*end == ',' || *end == ')') { + max = idx; + if (min == IAVF_MAX_QUEUE_NUM) + min = idx; + + for (idx = RTE_MIN(min, max); + idx <= RTE_MAX(min, max); idx++) + devargs->proto_xtr[idx] = xtr_type; + + min = IAVF_MAX_QUEUE_NUM; + } else { + return -1; + } + + str = end + 1; + } while (*end != ')' && *end != '\0'); + + return 0; +} + +static int +iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs) +{ + const char *queue_start; + uint32_t idx; + int xtr_type; + char flex_name[32]; + + while (isblank(*queues)) + queues++; + + if (*queues != '[') { + xtr_type = iavf_lookup_proto_xtr_type(queues); + if (xtr_type < 0) + return -1; + + devargs->proto_xtr_dflt = xtr_type; + + return 0; + } + + queues++; + do { + while (isblank(*queues)) + queues++; + if (*queues == '\0') + return -1; + + queue_start = queues; + + /* go across a complete bracket */ + if (*queue_start == '(') { + queues += strcspn(queues, ")"); + if (*queues != ')') + return -1; + } + + /* scan the separator ':' */ + queues += strcspn(queues, ":"); + if (*queues++ != ':') + return -1; + while (isblank(*queues)) + queues++; + + for (idx = 0; ; idx++) { + if (isblank(queues[idx]) || + queues[idx] == ',' || + queues[idx] == ']' || + queues[idx] == '\0') + break; + + if (idx > sizeof(flex_name) - 2) + return -1; + + flex_name[idx] = queues[idx]; + } + flex_name[idx] = '\0'; + xtr_type = iavf_lookup_proto_xtr_type(flex_name); + if (xtr_type < 0) + return -1; + + queues += idx; + + while (isblank(*queues) || *queues == ',' || *queues == ']') + queues++; + + if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0) + return -1; + } while (*queues != '\0'); + + return 0; +} + +static int +iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value, + void *extra_args) +{ + struct iavf_devargs *devargs = extra_args; + + if (!value || !extra_args) + return -EINVAL; + + if (iavf_parse_queue_proto_xtr(value, devargs) < 0) { + PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'", + value); + return -1; + } + + return 0; +} + +static int iavf_parse_devargs(struct rte_eth_dev *dev) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_devargs *devargs = dev->device->devargs; + struct rte_kvargs *kvlist; + int ret; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args); + if (!kvlist) { + PMD_INIT_LOG(ERR, "invalid kvargs key\n"); + return -EINVAL; + } + + ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE; + memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE, + sizeof(ad->devargs.proto_xtr)); + + ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG, + &iavf_handle_proto_xtr_arg, &ad->devargs); + if (ret) + goto bail; + +bail: + rte_kvargs_free(kvlist); + return ret; +} + +static void +iavf_init_proto_xtr(struct rte_eth_dev *dev) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + const struct iavf_proto_xtr_ol *xtr_ol; + bool proto_xtr_enable = false; + int offset; + uint16_t i; + + vf->proto_xtr = rte_zmalloc("vf proto xtr", + vf->vsi_res->num_queue_pairs, 0); + if (unlikely(!(vf->proto_xtr))) { + PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table"); + return; + } + + for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) { + vf->proto_xtr[i] = ad->devargs.proto_xtr[i] != + IAVF_PROTO_XTR_NONE ? + ad->devargs.proto_xtr[i] : + ad->devargs.proto_xtr_dflt; + + if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) { + uint8_t type = vf->proto_xtr[i]; + + iavf_proto_xtr_params[type].required = true; + proto_xtr_enable = true; + } + } + + if (likely(!proto_xtr_enable)) + return; + + offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param); + if (unlikely(offset == -1)) { + PMD_DRV_LOG(ERR, + "failed to extract protocol metadata, error %d", + -rte_errno); + return; + } + + PMD_DRV_LOG(DEBUG, + "proto_xtr metadata offset in mbuf is : %d", + offset); + rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset; + + for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) { + xtr_ol = &iavf_proto_xtr_params[i]; + + uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i); + + if (!xtr_ol->required) + continue; + + if (!(vf->supported_rxdid & BIT(rxdid))) { + PMD_DRV_LOG(ERR, + "rxdid[%u] is not supported in hardware", + rxdid); + rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1; + break; + } + + offset = rte_mbuf_dynflag_register(&xtr_ol->param); + if (unlikely(offset == -1)) { + PMD_DRV_LOG(ERR, + "failed to register proto_xtr offload '%s', error %d", + xtr_ol->param.name, -rte_errno); + + rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1; + break; + } + + PMD_DRV_LOG(DEBUG, + "proto_xtr offload '%s' offset in mbuf is : %d", + xtr_ol->param.name, offset); + *xtr_ol->ol_flag = 1ULL << offset; + } +} + static int iavf_init_vf(struct rte_eth_dev *dev) { @@ -1403,6 +1789,12 @@ iavf_init_vf(struct rte_eth_dev *dev) struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + err = iavf_parse_devargs(dev); + if (err) { + PMD_INIT_LOG(ERR, "Failed to parse devargs"); + goto err; + } + err = iavf_set_mac_type(hw); if (err) { PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); @@ -1466,6 +1858,8 @@ iavf_init_vf(struct rte_eth_dev *dev) } } + iavf_init_proto_xtr(dev); + return 0; err_rss: rte_free(vf->rss_key); diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 160d81b761..baac5d65c8 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -27,6 +27,35 @@ #include "iavf.h" #include "iavf_rxtx.h" +#include "rte_pmd_iavf.h" + +/* Offset of mbuf dynamic field for protocol extraction's metadata */ +int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1; + +/* Mask of mbuf dynamic flags for protocol extraction's type */ +uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask; +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask; +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask; +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; +uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; + +uint8_t +iavf_proto_xtr_type_to_rxdid(uint8_t flex_type) +{ + static uint8_t rxdid_map[] = { + [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1, + [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN, + [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4, + [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6, + [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW, + [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP, + [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET, + }; + + return flex_type < RTE_DIM(rxdid_map) ? + rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1; +} static inline int check_rx_thresh(uint16_t nb_desc, uint16_t thresh) @@ -295,6 +324,160 @@ static const struct iavf_txq_ops def_txq_ops = { .release_mbufs = release_txq_mbufs, }; +static inline void +iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq, + struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp) +{ + volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc = + (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp; +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + uint16_t stat_err; +#endif + + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + stat_err = rte_le_to_cpu_16(desc->status_error0); + if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { + mb->ol_flags |= PKT_RX_RSS_HASH; + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); + } +#endif +} + +static inline void +iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq, + struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp) +{ + volatile struct iavf_32b_rx_flex_desc_comms *desc = + (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp; + uint16_t stat_err; + + stat_err = rte_le_to_cpu_16(desc->status_error0); + if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { + mb->ol_flags |= PKT_RX_RSS_HASH; + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); + } + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } + + if (rxq->xtr_ol_flag) { + uint32_t metadata = 0; + + stat_err = rte_le_to_cpu_16(desc->status_error1); + + if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); + + if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) + metadata |= + rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; + + if (metadata) { + mb->ol_flags |= rxq->xtr_ol_flag; + + *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata; + } + } +#endif +} + +static inline void +iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq, + struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp) +{ + volatile struct iavf_32b_rx_flex_desc_comms *desc = + (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp; + uint16_t stat_err; + + stat_err = rte_le_to_cpu_16(desc->status_error0); + if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { + mb->ol_flags |= PKT_RX_RSS_HASH; + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); + } + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } + + if (rxq->xtr_ol_flag) { + uint32_t metadata = 0; + + if (desc->flex_ts.flex.aux0 != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); + else if (desc->flex_ts.flex.aux1 != 0xFFFF) + metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); + + if (metadata) { + mb->ol_flags |= rxq->xtr_ol_flag; + + *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata; + } + } +#endif +} + +static void +iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid) +{ + switch (rxdid) { + case IAVF_RXDID_COMMS_AUX_VLAN: + rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v1; + break; + case IAVF_RXDID_COMMS_AUX_IPV4: + rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v1; + break; + case IAVF_RXDID_COMMS_AUX_IPV6: + rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v1; + break; + case IAVF_RXDID_COMMS_AUX_IPV6_FLOW: + rxq->xtr_ol_flag = + rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v1; + break; + case IAVF_RXDID_COMMS_AUX_TCP: + rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v1; + break; + case IAVF_RXDID_COMMS_AUX_IP_OFFSET: + rxq->xtr_ol_flag = + rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; + rxq->rxd_to_pkt_fields = + iavf_rxd_to_pkt_fields_by_comms_aux_v2; + break; + case IAVF_RXDID_COMMS_OVS_1: + rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs; + break; + default: + /* update this according to the RXDID for FLEX_DESC_NONE */ + rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs; + break; + } + + if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail()) + rxq->xtr_ol_flag = 0; +} + int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, @@ -310,6 +493,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, struct iavf_rx_queue *rxq; const struct rte_memzone *mz; uint32_t ring_size; + uint8_t proto_xtr; uint16_t len; uint16_t rx_free_thresh; @@ -347,14 +531,18 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return -ENOMEM; } - if (vf->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && - vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) { - rxq->rxdid = IAVF_RXDID_COMMS_OVS_1; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { + proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] : + IAVF_PROTO_XTR_NONE; + rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr); + rxq->proto_xtr = proto_xtr; } else { rxq->rxdid = IAVF_RXDID_LEGACY_1; + rxq->proto_xtr = IAVF_PROTO_XTR_NONE; } + iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid); + rxq->mp = mp; rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_free_thresh; @@ -735,6 +923,14 @@ iavf_stop_queues(struct rte_eth_dev *dev) } } +#define IAVF_RX_FLEX_ERR0_BITS \ + ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ + (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S)) + static inline void iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp) { @@ -760,6 +956,21 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb, } else { mb->vlan_tci = 0; } + +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC + if (rte_le_to_cpu_16(rxdp->wb.status_error1) & + (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { + mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | + PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; + mb->vlan_tci_outer = mb->vlan_tci; + mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); + PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", + rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), + rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); + } else { + mb->vlan_tci_outer = 0; + } +#endif } /* Translate the rx descriptor status and error fields to pkt flags */ @@ -824,30 +1035,6 @@ iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb) return flags; } - -/* Translate the rx flex descriptor status to pkt flags */ -static inline void -iavf_rxd_to_pkt_fields(struct rte_mbuf *mb, - volatile union iavf_rx_flex_desc *rxdp) -{ - volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc = - (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp; -#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC - uint16_t stat_err; - - stat_err = rte_le_to_cpu_16(desc->status_error0); - if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { - mb->ol_flags |= PKT_RX_RSS_HASH; - mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); - } -#endif - - if (desc->flow_id != 0xFFFFFFFF) { - mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; - mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); - } -} - #define IAVF_RX_FLEX_ERR0_BITS \ ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ @@ -1102,7 +1289,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; iavf_flex_rxd_to_vlan_tci(rxm, &rxd); - iavf_rxd_to_pkt_fields(rxm, &rxd); + rxq->rxd_to_pkt_fields(rxq, rxm, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); rxm->ol_flags |= pkt_flags; @@ -1243,7 +1430,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); - iavf_rxd_to_pkt_fields(first_seg, &rxd); + rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); first_seg->ol_flags |= pkt_flags; @@ -1480,7 +1667,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); - iavf_rxd_to_pkt_fields(mb, &rxdp[j]); + rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]); stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); @@ -1672,7 +1859,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (rxq->rx_nb_avail) return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); - if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1) + if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST) nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq); else nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq); @@ -2119,6 +2306,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + #ifdef RTE_ARCH_X86 struct iavf_rx_queue *rxq; int i; diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index b22ccc42eb..d4b4935be6 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -57,115 +57,8 @@ #define IAVF_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK) -/* HW desc structure, both 16-byte and 32-byte types are supported */ -#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC -#define iavf_rx_desc iavf_16byte_rx_desc -#define iavf_rx_flex_desc iavf_16b_rx_flex_desc -#else -#define iavf_rx_desc iavf_32byte_rx_desc -#define iavf_rx_flex_desc iavf_32b_rx_flex_desc -#endif - -struct iavf_rxq_ops { - void (*release_mbufs)(struct iavf_rx_queue *rxq); -}; - -struct iavf_txq_ops { - void (*release_mbufs)(struct iavf_tx_queue *txq); -}; - -/* Structure associated with each Rx queue. */ -struct iavf_rx_queue { - struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ - const struct rte_memzone *mz; /* memzone for Rx ring */ - volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */ - uint64_t rx_ring_phys_addr; /* Rx ring DMA address */ - struct rte_mbuf **sw_ring; /* address of SW ring */ - uint16_t nb_rx_desc; /* ring length */ - uint16_t rx_tail; /* current value of tail */ - volatile uint8_t *qrx_tail; /* register address of tail */ - uint16_t rx_free_thresh; /* max free RX desc to hold */ - uint16_t nb_rx_hold; /* number of held free RX desc */ - struct rte_mbuf *pkt_first_seg; /* first segment of current packet */ - struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ - struct rte_mbuf fake_mbuf; /* dummy mbuf */ - uint8_t rxdid; - - /* used for VPMD */ - uint16_t rxrearm_nb; /* number of remaining to be re-armed */ - uint16_t rxrearm_start; /* the idx we start the re-arming from */ - uint64_t mbuf_initializer; /* value to init mbufs */ - - /* for rx bulk */ - uint16_t rx_nb_avail; /* number of staged packets ready */ - uint16_t rx_next_avail; /* index of next staged packets */ - uint16_t rx_free_trigger; /* triggers rx buffer allocation */ - struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */ - - uint16_t port_id; /* device port ID */ - uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ - uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ - uint16_t queue_id; /* Rx queue index */ - uint16_t rx_buf_len; /* The packet buffer size */ - uint16_t rx_hdr_len; /* The header buffer size */ - uint16_t max_pkt_len; /* Maximum packet length */ - struct iavf_vsi *vsi; /**< the VSI this queue belongs to */ - - bool q_set; /* if rx queue has been configured */ - bool rx_deferred_start; /* don't start this queue in dev start */ - const struct iavf_rxq_ops *ops; -}; - -struct iavf_tx_entry { - struct rte_mbuf *mbuf; - uint16_t next_id; - uint16_t last_id; -}; - -struct iavf_tx_vec_entry { - struct rte_mbuf *mbuf; -}; - -/* Structure associated with each TX queue. */ -struct iavf_tx_queue { - const struct rte_memzone *mz; /* memzone for Tx ring */ - volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */ - uint64_t tx_ring_phys_addr; /* Tx ring DMA address */ - struct iavf_tx_entry *sw_ring; /* address array of SW ring */ - uint16_t nb_tx_desc; /* ring length */ - uint16_t tx_tail; /* current value of tail */ - volatile uint8_t *qtx_tail; /* register address of tail */ - /* number of used desc since RS bit set */ - uint16_t nb_used; - uint16_t nb_free; - uint16_t last_desc_cleaned; /* last desc have been cleaned*/ - uint16_t free_thresh; - uint16_t rs_thresh; - - uint16_t port_id; - uint16_t queue_id; - uint64_t offloads; - uint16_t next_dd; /* next to set RS, for VPMD */ - uint16_t next_rs; /* next to check DD, for VPMD */ - - bool q_set; /* if rx queue has been configured */ - bool tx_deferred_start; /* don't start this queue in dev start */ - const struct iavf_txq_ops *ops; -}; - -/* Offload features */ -union iavf_tx_offload { - uint64_t data; - struct { - uint64_t l2_len:7; /* L2 (MAC) Header Length. */ - uint64_t l3_len:9; /* L3 (IP) Header Length. */ - uint64_t l4_len:8; /* L4 Header Length. */ - uint64_t tso_segsz:16; /* TCP TSO segment size */ - /* uint64_t unused : 24; */ - }; -}; - -/* Rx Flex Descriptors +/** + * Rx Flex Descriptors * These descriptors are used instead of the legacy version descriptors */ union iavf_16b_rx_flex_desc { @@ -236,6 +129,123 @@ union iavf_32b_rx_flex_desc { } wb; /* writeback */ }; +/* HW desc structure, both 16-byte and 32-byte types are supported */ +#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC +#define iavf_rx_desc iavf_16byte_rx_desc +#define iavf_rx_flex_desc iavf_16b_rx_flex_desc +#else +#define iavf_rx_desc iavf_32byte_rx_desc +#define iavf_rx_flex_desc iavf_32b_rx_flex_desc +#endif + +typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq, + struct rte_mbuf *mb, + volatile union iavf_rx_flex_desc *rxdp); + +struct iavf_rxq_ops { + void (*release_mbufs)(struct iavf_rx_queue *rxq); +}; + +struct iavf_txq_ops { + void (*release_mbufs)(struct iavf_tx_queue *txq); +}; + +/* Structure associated with each Rx queue. */ +struct iavf_rx_queue { + struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ + const struct rte_memzone *mz; /* memzone for Rx ring */ + volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */ + uint64_t rx_ring_phys_addr; /* Rx ring DMA address */ + struct rte_mbuf **sw_ring; /* address of SW ring */ + uint16_t nb_rx_desc; /* ring length */ + uint16_t rx_tail; /* current value of tail */ + volatile uint8_t *qrx_tail; /* register address of tail */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t nb_rx_hold; /* number of held free RX desc */ + struct rte_mbuf *pkt_first_seg; /* first segment of current packet */ + struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ + struct rte_mbuf fake_mbuf; /* dummy mbuf */ + uint8_t rxdid; + + /* used for VPMD */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ + uint16_t rxrearm_start; /* the idx we start the re-arming from */ + uint64_t mbuf_initializer; /* value to init mbufs */ + + /* for rx bulk */ + uint16_t rx_nb_avail; /* number of staged packets ready */ + uint16_t rx_next_avail; /* index of next staged packets */ + uint16_t rx_free_trigger; /* triggers rx buffer allocation */ + struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */ + + uint16_t port_id; /* device port ID */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ + uint16_t queue_id; /* Rx queue index */ + uint16_t rx_buf_len; /* The packet buffer size */ + uint16_t rx_hdr_len; /* The header buffer size */ + uint16_t max_pkt_len; /* Maximum packet length */ + struct iavf_vsi *vsi; /**< the VSI this queue belongs to */ + + bool q_set; /* if rx queue has been configured */ + bool rx_deferred_start; /* don't start this queue in dev start */ + const struct iavf_rxq_ops *ops; + uint8_t proto_xtr; /* protocol extraction type */ + uint64_t xtr_ol_flag; + /* flexible descriptor metadata extraction offload flag */ + iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields; + /* handle flexible descriptor by RXDID */ +}; + +struct iavf_tx_entry { + struct rte_mbuf *mbuf; + uint16_t next_id; + uint16_t last_id; +}; + +struct iavf_tx_vec_entry { + struct rte_mbuf *mbuf; +}; + +/* Structure associated with each TX queue. */ +struct iavf_tx_queue { + const struct rte_memzone *mz; /* memzone for Tx ring */ + volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */ + uint64_t tx_ring_phys_addr; /* Tx ring DMA address */ + struct iavf_tx_entry *sw_ring; /* address array of SW ring */ + uint16_t nb_tx_desc; /* ring length */ + uint16_t tx_tail; /* current value of tail */ + volatile uint8_t *qtx_tail; /* register address of tail */ + /* number of used desc since RS bit set */ + uint16_t nb_used; + uint16_t nb_free; + uint16_t last_desc_cleaned; /* last desc have been cleaned*/ + uint16_t free_thresh; + uint16_t rs_thresh; + + uint16_t port_id; + uint16_t queue_id; + uint64_t offloads; + uint16_t next_dd; /* next to set RS, for VPMD */ + uint16_t next_rs; /* next to check DD, for VPMD */ + + bool q_set; /* if rx queue has been configured */ + bool tx_deferred_start; /* don't start this queue in dev start */ + const struct iavf_txq_ops *ops; +}; + +/* Offload features */ +union iavf_tx_offload { + uint64_t data; + struct { + uint64_t l2_len:7; /* L2 (MAC) Header Length. */ + uint64_t l3_len:9; /* L3 (IP) Header Length. */ + uint64_t l4_len:8; /* L4 Header Length. */ + uint64_t tso_segsz:16; /* TCP TSO segment size */ + /* uint64_t unused : 24; */ + }; +}; + /* Rx Flex Descriptor * RxDID Profile ID 16-21 * Flex-field 0: RSS hash lower 16-bits @@ -335,6 +345,7 @@ enum iavf_rxdid { IAVF_RXDID_COMMS_AUX_TCP = 21, IAVF_RXDID_COMMS_OVS_1 = 22, IAVF_RXDID_COMMS_OVS_2 = 23, + IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25, IAVF_RXDID_LAST = 63, }; @@ -359,6 +370,20 @@ enum iavf_rx_flex_desc_status_error_0_bits { IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ }; +enum iavf_rx_flex_desc_status_error_1_bits { + /* Note: These are predefined bit offsets */ + IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */ + IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4, + IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5, + /* [10:6] reserved */ + IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11, + IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12, + IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13, + IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14, + IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15, + IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ +}; + /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */ #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ @@ -457,6 +482,8 @@ uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq); +uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type); + const uint32_t *iavf_get_default_ptype_table(void); static inline diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h index 25bb502de2..7ad1e0f68a 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_common.h +++ b/drivers/net/iavf/iavf_rxtx_vec_common.h @@ -224,6 +224,9 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq) if (rxq->nb_rx_desc % rxq->rx_free_thresh) return -1; + if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE) + return -1; + return 0; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 54d9917c0a..64d194670b 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -850,25 +850,27 @@ iavf_configure_queues(struct iavf_adapter *adapter, #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC if (vf->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && - vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) { - vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1; - PMD_DRV_LOG(NOTICE, "request RXDID == %d in " - "Queue[%d]", vc_qp->rxq.rxdid, i); + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && + vf->supported_rxdid & BIT(rxq[i]->rxdid)) { + vc_qp->rxq.rxdid = rxq[i]->rxdid; + PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]", + vc_qp->rxq.rxdid, i); } else { + PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, " + "request default RXDID[%d] in Queue[%d]", + rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i); vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1; - PMD_DRV_LOG(NOTICE, "request RXDID == %d in " - "Queue[%d]", vc_qp->rxq.rxdid, i); } #else if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) { vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0; - PMD_DRV_LOG(NOTICE, "request RXDID == %d in " - "Queue[%d]", vc_qp->rxq.rxdid, i); + PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]", + vc_qp->rxq.rxdid, i); } else { - PMD_DRV_LOG(ERR, "RXDID == 0 is not supported"); + PMD_DRV_LOG(ERR, "RXDID[%d] is not supported", + IAVF_RXDID_LEGACY_0); return -1; } #endif diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index dcd028530c..26c02c4401 100644 --- a/drivers/net/iavf/meson.build +++ b/drivers/net/iavf/meson.build @@ -59,3 +59,5 @@ if arch_subdir == 'x86' objs += iavf_avx512_lib.extract_objects('iavf_rxtx_vec_avx512.c') endif endif + +headers = files('rte_pmd_iavf.h') diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h new file mode 100644 index 0000000000..955084e197 --- /dev/null +++ b/drivers/net/iavf/rte_pmd_iavf.h @@ -0,0 +1,250 @@ +/* SPDX-Liavfnse-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _RTE_PMD_IAVF_H_ +#define _RTE_PMD_IAVF_H_ + +/** + * @file rte_pmd_iavf.h + * + * iavf PMD specific functions. + * + * @b EXPERIMENTAL: this API may change, or be removed, without prior notiavf + * + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * The supported network flexible descriptor's extraction metadata format. + */ +union rte_pmd_ifd_proto_xtr_metadata { + uint32_t metadata; + + struct { + uint16_t data0; + uint16_t data1; + } raw; + + struct { + uint16_t stag_vid:12, + stag_dei:1, + stag_pcp:3; + uint16_t ctag_vid:12, + ctag_dei:1, + ctag_pcp:3; + } vlan; + + struct { + uint16_t protocol:8, + ttl:8; + uint16_t tos:8, + ihl:4, + version:4; + } ipv4; + + struct { + uint16_t hoplimit:8, + nexthdr:8; + uint16_t flowhi4:4, + tc:8, + version:4; + } ipv6; + + struct { + uint16_t flowlo16; + uint16_t flowhi4:4, + tc:8, + version:4; + } ipv6_flow; + + struct { + uint16_t fin:1, + syn:1, + rst:1, + psh:1, + ack:1, + urg:1, + ece:1, + cwr:1, + res1:4, + doff:4; + uint16_t rsvd; + } tcp; + + uint32_t ip_ofs; +}; + +/* Offset of mbuf dynamic field for flexible descriptor's extraction data */ +extern int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs; + +/* Mask of mbuf dynamic flags for flexible descriptor's extraction type */ +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask; +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask; +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask; +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; + +/** + * The mbuf dynamic field pointer for flexible descriptor's extraction metadata. + */ +#define RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(m) \ + RTE_MBUF_DYNFIELD((m), \ + rte_pmd_ifd_dynfield_proto_xtr_metadata_offs, \ + uint32_t *) + +/** + * The mbuf dynamic flag for VLAN protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'vlan' specified. + */ +#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_VLAN \ + (rte_pmd_ifd_dynflag_proto_xtr_vlan_mask) + +/** + * The mbuf dynamic flag for IPv4 protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'ipv4' specified. + */ +#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV4 \ + (rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask) + +/** + * The mbuf dynamic flag for IPv6 protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'ipv6' specified. + */ +#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6 \ + (rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask) + +/** + * The mbuf dynamic flag for IPv6 with flow protocol extraction metadata, it is + * valid when dev_args 'proto_xtr' has 'ipv6_flow' specified. + */ +#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6_FLOW \ + (rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask) + +/** + * The mbuf dynamic flag for TCP protocol extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'tcp' specified. + */ +#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_TCP \ + (rte_pmd_ifd_dynflag_proto_xtr_tcp_mask) + +/** + * The mbuf dynamic flag for IP_OFFSET extraction metadata, it is valid + * when dev_args 'proto_xtr' has 'ip_offset' specified. + */ +#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IP_OFFSET \ + (rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask) + +/** + * Check if mbuf dynamic field for flexible descriptor's extraction metadata + * is registered. + * + * @return + * True if registered, false otherwise. + */ +__rte_experimental +static __rte_always_inline int +rte_pmd_ifd_dynf_proto_xtr_metadata_avail(void) +{ + return rte_pmd_ifd_dynfield_proto_xtr_metadata_offs != -1; +} + +/** + * Get the mbuf dynamic field for flexible descriptor's extraction metadata. + * + * @param m + * The pointer to the mbuf. + * @return + * The saved protocol extraction metadata. + */ +__rte_experimental +static __rte_always_inline uint32_t +rte_pmd_ifd_dynf_proto_xtr_metadata_get(struct rte_mbuf *m) +{ + return *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(m); +} + +/** + * Dump the mbuf dynamic field for flexible descriptor's extraction metadata. + * + * @param m + * The pointer to the mbuf. + */ +__rte_experimental +static inline void +rte_pmd_ifd_dump_proto_xtr_metadata(struct rte_mbuf *m) +{ + union rte_pmd_ifd_proto_xtr_metadata data; + + if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail()) + return; + + data.metadata = rte_pmd_ifd_dynf_proto_xtr_metadata_get(m); + + if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_VLAN) + printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x]," + "vlan,stag=%u:%u:%u,ctag=%u:%u:%u", + data.raw.data0, data.raw.data1, + data.vlan.stag_pcp, + data.vlan.stag_dei, + data.vlan.stag_vid, + data.vlan.ctag_pcp, + data.vlan.ctag_dei, + data.vlan.ctag_vid); + else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV4) + printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x]," + "ipv4,ver=%u,hdrlen=%u,tos=%u,ttl=%u,proto=%u", + data.raw.data0, data.raw.data1, + data.ipv4.version, + data.ipv4.ihl, + data.ipv4.tos, + data.ipv4.ttl, + data.ipv4.protocol); + else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6) + printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x]," + "ipv6,ver=%u,tc=%u,flow_hi4=0x%x,nexthdr=%u,hoplimit=%u", + data.raw.data0, data.raw.data1, + data.ipv6.version, + data.ipv6.tc, + data.ipv6.flowhi4, + data.ipv6.nexthdr, + data.ipv6.hoplimit); + else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6_FLOW) + printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x]," + "ipv6_flow,ver=%u,tc=%u,flow=0x%x%04x", + data.raw.data0, data.raw.data1, + data.ipv6_flow.version, + data.ipv6_flow.tc, + data.ipv6_flow.flowhi4, + data.ipv6_flow.flowlo16); + else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_TCP) + printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x]," + "tcp,doff=%u,flags=%s%s%s%s%s%s%s%s", + data.raw.data0, data.raw.data1, + data.tcp.doff, + data.tcp.cwr ? "C" : "", + data.tcp.ece ? "E" : "", + data.tcp.urg ? "U" : "", + data.tcp.ack ? "A" : "", + data.tcp.psh ? "P" : "", + data.tcp.rst ? "R" : "", + data.tcp.syn ? "S" : "", + data.tcp.fin ? "F" : ""); + else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IP_OFFSET) + printf(" - Flexible descriptor's Extraction: ip_offset=%u", + data.ip_ofs); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PMD_IAVF_H_ */ diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map index 4a76d1d52d..2a411da2e9 100644 --- a/drivers/net/iavf/version.map +++ b/drivers/net/iavf/version.map @@ -1,3 +1,16 @@ DPDK_21 { local: *; }; + +EXPERIMENTAL { + global: + + # added in 20.11 + rte_pmd_ifd_dynfield_proto_xtr_metadata_offs; + rte_pmd_ifd_dynflag_proto_xtr_vlan_mask; + rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask; + rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask; + rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask; + rte_pmd_ifd_dynflag_proto_xtr_tcp_mask; + rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask; +};