2018-01-10 09:17:10 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
|
2014-11-25 17:26:43 +00:00
|
|
|
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ENIC_H_
|
|
|
|
#define _ENIC_H_
|
|
|
|
|
2019-10-23 13:19:27 +00:00
|
|
|
#include <rte_vxlan.h>
|
2020-01-14 00:24:49 +00:00
|
|
|
#include <rte_ether.h>
|
2014-11-25 17:26:43 +00:00
|
|
|
#include "vnic_enet.h"
|
|
|
|
#include "vnic_dev.h"
|
2019-09-23 14:41:34 +00:00
|
|
|
#include "vnic_flowman.h"
|
2014-11-25 17:26:43 +00:00
|
|
|
#include "vnic_wq.h"
|
|
|
|
#include "vnic_rq.h"
|
|
|
|
#include "vnic_cq.h"
|
|
|
|
#include "vnic_intr.h"
|
|
|
|
#include "vnic_stats.h"
|
|
|
|
#include "vnic_nic.h"
|
|
|
|
#include "vnic_rss.h"
|
|
|
|
#include "enic_res.h"
|
2016-03-04 21:09:00 +00:00
|
|
|
#include "cq_enet_desc.h"
|
2018-04-16 21:49:10 +00:00
|
|
|
#include <stdbool.h>
|
2016-06-23 23:14:58 +00:00
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <rte_spinlock.h>
|
2014-11-25 17:26:43 +00:00
|
|
|
|
|
|
|
#define DRV_NAME "enic_pmd"
|
|
|
|
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
|
2015-04-21 05:51:28 +00:00
|
|
|
#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
|
2014-11-25 17:26:43 +00:00
|
|
|
|
|
|
|
#define VLAN_ETH_HLEN 18
|
|
|
|
|
|
|
|
#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
|
|
|
|
|
|
|
|
#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */
|
|
|
|
#define ENIC_CALC_IP_CKSUM 1
|
|
|
|
#define ENIC_CALC_TCP_UDP_CKSUM 2
|
|
|
|
#define ENIC_MAX_MTU 9000
|
2015-02-17 02:08:08 +00:00
|
|
|
#define ENIC_PAGE_SIZE 4096
|
2014-11-25 17:26:43 +00:00
|
|
|
#define PAGE_ROUND_UP(x) \
|
2015-02-17 02:08:08 +00:00
|
|
|
((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1)))
|
2018-10-10 22:11:31 +00:00
|
|
|
|
2014-11-25 17:26:43 +00:00
|
|
|
#define ENICPMD_VFIO_PATH "/dev/vfio/vfio"
|
|
|
|
/*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/
|
|
|
|
|
|
|
|
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
|
|
|
|
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
|
2019-09-05 21:33:17 +00:00
|
|
|
/* enet SRIOV Standalone vNic VF */
|
|
|
|
#define PCI_DEVICE_ID_CISCO_VIC_ENET_SN 0x02B7
|
2014-11-25 17:26:43 +00:00
|
|
|
|
net/enic: flow API for NICs with advanced filters enabled
Flow support for 1300 series adapters with the 'Advanced Filter'
mode enabled via the UCS management interface. This enables:
Attributes: ingress
Items: Outer eth, ipv4, ipv6, udp, sctp, tcp, vxlan. Inner eth, ipv4,
ipv6, udp, tcp.
Actions: queue, and void
Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Nelson Escobar <neescoba@cisco.com>
2017-05-17 22:38:06 +00:00
|
|
|
/* Special Filter id for non-specific packet flagging. Don't change value */
|
|
|
|
#define ENIC_MAGIC_FILTER_ID 0xffff
|
2014-11-25 17:26:43 +00:00
|
|
|
|
|
|
|
#define ENICPMD_FDIR_MAX 64
|
|
|
|
|
2018-03-08 02:46:59 +00:00
|
|
|
/*
|
|
|
|
* Interrupt 0: LSC and errors
|
|
|
|
* Interrupt 1: rx queue 0
|
|
|
|
* Interrupt 2: rx queue 1
|
|
|
|
* ...
|
|
|
|
*/
|
|
|
|
#define ENICPMD_LSC_INTR_OFFSET 0
|
|
|
|
#define ENICPMD_RXQ_INTR_OFFSET 1
|
|
|
|
|
2014-11-25 17:26:43 +00:00
|
|
|
struct enic_fdir_node {
|
2015-04-09 09:29:32 +00:00
|
|
|
struct rte_eth_fdir_filter filter;
|
2020-01-14 00:24:50 +00:00
|
|
|
uint16_t fltr_id;
|
|
|
|
uint16_t rq_index;
|
2014-11-25 17:26:43 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct enic_fdir {
|
2015-04-09 09:29:32 +00:00
|
|
|
struct rte_eth_fdir_stats stats;
|
2014-11-25 17:26:43 +00:00
|
|
|
struct rte_hash *hash;
|
|
|
|
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
|
2020-01-14 00:24:50 +00:00
|
|
|
uint32_t modes;
|
|
|
|
uint32_t types_mask;
|
2016-09-29 20:56:39 +00:00
|
|
|
void (*copy_fltr_fn)(struct filter_v2 *filt,
|
2019-03-02 10:42:40 +00:00
|
|
|
const struct rte_eth_fdir_input *input,
|
|
|
|
const struct rte_eth_fdir_masks *masks);
|
2014-11-25 17:26:43 +00:00
|
|
|
};
|
|
|
|
|
2016-06-03 00:22:45 +00:00
|
|
|
struct enic_soft_stats {
|
|
|
|
rte_atomic64_t rx_nombuf;
|
2016-06-03 00:22:47 +00:00
|
|
|
rte_atomic64_t rx_packet_errors;
|
2017-02-03 01:18:40 +00:00
|
|
|
rte_atomic64_t tx_oversized;
|
2016-06-03 00:22:45 +00:00
|
|
|
};
|
|
|
|
|
2016-06-23 23:14:58 +00:00
|
|
|
struct enic_memzone_entry {
|
|
|
|
const struct rte_memzone *rz;
|
|
|
|
LIST_ENTRY(enic_memzone_entry) entries;
|
|
|
|
};
|
|
|
|
|
2019-09-23 14:41:34 +00:00
|
|
|
/* Defined in enic_fm_flow.c */
|
|
|
|
struct enic_flowman;
|
|
|
|
struct enic_fm_flow;
|
|
|
|
|
net/enic: flow API for NICs with advanced filters enabled
Flow support for 1300 series adapters with the 'Advanced Filter'
mode enabled via the UCS management interface. This enables:
Attributes: ingress
Items: Outer eth, ipv4, ipv6, udp, sctp, tcp, vxlan. Inner eth, ipv4,
ipv6, udp, tcp.
Actions: queue, and void
Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Nelson Escobar <neescoba@cisco.com>
2017-05-17 22:38:06 +00:00
|
|
|
struct rte_flow {
|
|
|
|
LIST_ENTRY(rte_flow) next;
|
2019-09-23 14:41:34 +00:00
|
|
|
/* Data for filter API based flow (enic_flow.c) */
|
|
|
|
uint16_t enic_filter_id;
|
net/enic: flow API for NICs with advanced filters enabled
Flow support for 1300 series adapters with the 'Advanced Filter'
mode enabled via the UCS management interface. This enables:
Attributes: ingress
Items: Outer eth, ipv4, ipv6, udp, sctp, tcp, vxlan. Inner eth, ipv4,
ipv6, udp, tcp.
Actions: queue, and void
Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Nelson Escobar <neescoba@cisco.com>
2017-05-17 22:38:06 +00:00
|
|
|
struct filter_v2 enic_filter;
|
2019-09-23 14:41:34 +00:00
|
|
|
/* Data for flow manager based flow (enic_fm_flow.c) */
|
|
|
|
struct enic_fm_flow *fm;
|
net/enic: flow API for NICs with advanced filters enabled
Flow support for 1300 series adapters with the 'Advanced Filter'
mode enabled via the UCS management interface. This enables:
Attributes: ingress
Items: Outer eth, ipv4, ipv6, udp, sctp, tcp, vxlan. Inner eth, ipv4,
ipv6, udp, tcp.
Actions: queue, and void
Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Nelson Escobar <neescoba@cisco.com>
2017-05-17 22:38:06 +00:00
|
|
|
};
|
|
|
|
|
2014-11-25 17:26:43 +00:00
|
|
|
/* Per-instance private data structure */
|
|
|
|
struct enic {
|
|
|
|
struct enic *next;
|
|
|
|
struct rte_pci_device *pdev;
|
|
|
|
struct vnic_enet_config config;
|
|
|
|
struct vnic_dev_bar bar0;
|
|
|
|
struct vnic_dev *vdev;
|
|
|
|
|
2018-10-03 20:09:28 +00:00
|
|
|
/*
|
|
|
|
* mbuf_initializer contains 64 bits of mbuf rearm_data, used by
|
|
|
|
* the avx2 handler at this time.
|
|
|
|
*/
|
|
|
|
uint64_t mbuf_initializer;
|
2015-04-14 14:23:55 +00:00
|
|
|
unsigned int port_id;
|
2018-04-16 21:49:10 +00:00
|
|
|
bool overlay_offload;
|
2014-11-25 17:26:43 +00:00
|
|
|
struct rte_eth_dev *rte_dev;
|
2019-09-06 06:50:20 +00:00
|
|
|
struct rte_eth_dev_data *dev_data;
|
2014-11-25 17:26:43 +00:00
|
|
|
struct enic_fdir fdir;
|
|
|
|
char bdf_name[ENICPMD_BDF_LENGTH];
|
|
|
|
int dev_fd;
|
|
|
|
int iommu_group_fd;
|
|
|
|
int iommu_groupid;
|
|
|
|
int eventfd;
|
2020-01-14 00:24:49 +00:00
|
|
|
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
|
2014-11-25 17:26:43 +00:00
|
|
|
pthread_t err_intr_thread;
|
|
|
|
int promisc;
|
|
|
|
int allmulti;
|
2020-01-14 00:24:50 +00:00
|
|
|
uint8_t ig_vlan_strip_en;
|
2014-11-25 17:26:43 +00:00
|
|
|
int link_status;
|
2020-01-14 00:24:50 +00:00
|
|
|
uint8_t hw_ip_checksum;
|
|
|
|
uint16_t max_mtu;
|
|
|
|
uint8_t adv_filters;
|
|
|
|
uint32_t flow_filter_mode;
|
|
|
|
uint8_t filter_actions; /* HW supported actions */
|
2018-04-16 21:49:10 +00:00
|
|
|
bool vxlan;
|
|
|
|
bool disable_overlay; /* devargs disable_overlay=1 */
|
2018-10-03 20:09:28 +00:00
|
|
|
uint8_t enable_avx2_rx; /* devargs enable-avx2-rx=1 */
|
2019-09-23 14:41:33 +00:00
|
|
|
uint8_t geneve_opt_avail; /* Geneve with options offload available */
|
|
|
|
uint8_t geneve_opt_enabled; /* Geneve with options offload enabled */
|
|
|
|
uint8_t geneve_opt_request; /* devargs geneve-opt=1 */
|
2018-06-29 09:29:31 +00:00
|
|
|
bool nic_cfg_chk; /* NIC_CFG_CHK available */
|
|
|
|
bool udp_rss_weak; /* Bodega style UDP RSS */
|
2018-06-29 09:29:35 +00:00
|
|
|
uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
|
2018-06-29 09:29:36 +00:00
|
|
|
uint16_t vxlan_port; /* current vxlan port pushed to NIC */
|
2019-09-06 06:50:19 +00:00
|
|
|
int use_simple_tx_handler;
|
2020-07-02 03:14:51 +00:00
|
|
|
int use_noscatter_vec_rx_handler;
|
2014-11-25 17:26:43 +00:00
|
|
|
|
|
|
|
unsigned int flags;
|
|
|
|
unsigned int priv_flags;
|
|
|
|
|
2018-01-23 01:05:28 +00:00
|
|
|
/* work queue (len = conf_wq_count) */
|
|
|
|
struct vnic_wq *wq;
|
|
|
|
unsigned int wq_count; /* equals eth_dev nb_tx_queues */
|
2014-11-25 17:26:43 +00:00
|
|
|
|
2018-01-23 01:05:28 +00:00
|
|
|
/* receive queue (len = conf_rq_count) */
|
|
|
|
struct vnic_rq *rq;
|
|
|
|
unsigned int rq_count; /* equals eth_dev nb_rx_queues */
|
2014-11-25 17:26:43 +00:00
|
|
|
|
2018-01-23 01:05:28 +00:00
|
|
|
/* completion queue (len = conf_cq_count) */
|
|
|
|
struct vnic_cq *cq;
|
|
|
|
unsigned int cq_count; /* equals rq_count + wq_count */
|
2014-11-25 17:26:43 +00:00
|
|
|
|
2018-03-08 02:46:59 +00:00
|
|
|
/* interrupt vectors (len = conf_intr_count) */
|
|
|
|
struct vnic_intr *intr;
|
|
|
|
unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
|
2016-06-03 00:22:45 +00:00
|
|
|
|
|
|
|
/* software counters */
|
|
|
|
struct enic_soft_stats soft_stats;
|
2016-06-23 23:14:58 +00:00
|
|
|
|
net/enic: fix resource check failures when bonding devices
The enic PMD was using the same variables in the enic structure to
track two different things. Initially rq_count, wq_count, cq_count,
and intr_count were set to the values obtained from the VIC adapters
as the maximum resources allocated on the VIC, then in
enic_set_vnic_res(), they were set to the counts of resources actually
used, discarding the initial values. The checks in enic_set_vnic_res()
were technically incorrect if it is called more than once on a port,
which happens when using bonding, but were harmless in practice as the
checks couldn't fail on the second call.
The enic rx-scatter patch misunderstood the subtleties of
enic_set_vnic_res(), and naively added a multiply by two to the
rq_count check. This resulted in the rq_count check failing when
enic_set_vnic_res() was called a second time, ie when using bonding.
This patch adds new variables to the enic structure to track the
maximum resources the VIC is configured to provide so that the
information isn't later lost and calls to enic_set_vnic_res() do
the expected thing.
Fixes: 856d7ba7ed22 ("net/enic: support scattered Rx")
Signed-off-by: Nelson Escobar <neescoba@cisco.com>
2016-07-06 23:21:59 +00:00
|
|
|
/* configured resources on vic */
|
|
|
|
unsigned int conf_rq_count;
|
|
|
|
unsigned int conf_wq_count;
|
|
|
|
unsigned int conf_cq_count;
|
|
|
|
unsigned int conf_intr_count;
|
|
|
|
|
2016-06-23 23:14:58 +00:00
|
|
|
/* linked list storing memory allocations */
|
|
|
|
LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
|
|
|
|
rte_spinlock_t memzone_list_lock;
|
2016-09-22 17:02:46 +00:00
|
|
|
rte_spinlock_t mtu_lock;
|
2016-06-23 23:14:58 +00:00
|
|
|
|
net/enic: flow API for NICs with advanced filters enabled
Flow support for 1300 series adapters with the 'Advanced Filter'
mode enabled via the UCS management interface. This enables:
Attributes: ingress
Items: Outer eth, ipv4, ipv6, udp, sctp, tcp, vxlan. Inner eth, ipv4,
ipv6, udp, tcp.
Actions: queue, and void
Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Nelson Escobar <neescoba@cisco.com>
2017-05-17 22:38:06 +00:00
|
|
|
LIST_HEAD(enic_flows, rte_flow) flows;
|
net/enic: allow to change RSS settings
Currently, when more than 1 receive queues are configured, the driver
always enables RSS with the driver's own default hash type, key, and
RETA. The user is unable to change any of the RSS settings. Address
this by implementing the ethdev RSS API as follows.
Correctly report the RETA size, key size, and supported hash types
through rte_eth_dev_info.
During dev_configure(), initialize RSS according to the device's
mq_mode and rss_conf. Start with the default RETA, and use the default
key unless a custom key is provided.
Add the RETA and rss_conf query/set handlers to let the user change
RSS settings after the initial configuration. The hardware is able to
change hash type, key, and RETA individually. So, the handlers change
only the affected settings.
Refactor/rename several functions in order to make their intentions
clear. For example, remove all traces of RSS from
enicpmd_vlan_offload_set() as it is confusing.
Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
2018-03-08 02:46:54 +00:00
|
|
|
|
|
|
|
/* RSS */
|
|
|
|
uint16_t reta_size;
|
|
|
|
uint8_t hash_key_size;
|
|
|
|
uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
|
|
|
|
/*
|
|
|
|
* Keep a copy of current RSS config for queries, as we cannot retrieve
|
|
|
|
* it from the NIC.
|
|
|
|
*/
|
|
|
|
uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
|
|
|
|
uint8_t rss_enable;
|
|
|
|
uint64_t rss_hf; /* ETH_RSS flags */
|
|
|
|
union vnic_rss_key rss_key;
|
|
|
|
union vnic_rss_cpu rss_cpu;
|
2018-04-16 21:49:10 +00:00
|
|
|
|
|
|
|
uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
|
|
|
|
uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
|
2018-06-29 09:29:38 +00:00
|
|
|
uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
|
2018-04-16 21:49:10 +00:00
|
|
|
uint64_t tx_offload_mask; /* PKT_TX flags accepted */
|
2018-12-10 18:28:55 +00:00
|
|
|
|
|
|
|
/* Multicast MAC addresses added to the NIC */
|
|
|
|
uint32_t mc_count;
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr mc_addrs[ENIC_MULTICAST_PERFECT_FILTERS];
|
2019-09-23 14:41:34 +00:00
|
|
|
|
|
|
|
/* Flow manager API */
|
|
|
|
struct enic_flowman *fm;
|
2020-09-09 13:56:53 +00:00
|
|
|
/* switchdev */
|
|
|
|
uint8_t switchdev_mode;
|
|
|
|
uint16_t switch_domain_id;
|
|
|
|
uint16_t max_vf_id;
|
2020-09-09 13:56:54 +00:00
|
|
|
/* Number of queues needed for VF representor paths */
|
|
|
|
uint32_t vf_required_wq;
|
|
|
|
uint32_t vf_required_cq;
|
|
|
|
uint32_t vf_required_rq;
|
2020-09-09 13:56:53 +00:00
|
|
|
/*
|
|
|
|
* Lock to serialize devcmds from PF, VF representors as they all share
|
|
|
|
* the same PF devcmd instance in firmware.
|
|
|
|
*/
|
|
|
|
rte_spinlock_t devcmd_lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct enic_vf_representor {
|
|
|
|
struct enic enic;
|
|
|
|
struct vnic_enet_config config;
|
|
|
|
struct rte_eth_dev *eth_dev;
|
|
|
|
struct rte_ether_addr mac_addr;
|
|
|
|
struct rte_pci_addr bdf;
|
|
|
|
struct enic *pf;
|
|
|
|
uint16_t switch_domain_id;
|
|
|
|
uint16_t vf_id;
|
|
|
|
int allmulti;
|
|
|
|
int promisc;
|
2020-09-09 13:56:54 +00:00
|
|
|
/* Representor path uses PF queues. These are reserved during init */
|
|
|
|
uint16_t pf_wq_idx; /* WQ dedicated to VF rep */
|
|
|
|
uint16_t pf_wq_cq_idx; /* CQ for WQ */
|
|
|
|
uint16_t pf_rq_sop_idx; /* SOP RQ dedicated to VF rep */
|
|
|
|
uint16_t pf_rq_data_idx; /* Data RQ */
|
2014-11-25 17:26:43 +00:00
|
|
|
};
|
|
|
|
|
2020-09-09 13:56:53 +00:00
|
|
|
#define VF_ENIC_TO_VF_REP(vf_enic) \
|
|
|
|
container_of(vf_enic, struct enic_vf_representor, enic)
|
|
|
|
|
|
|
|
static inline int enic_is_vf_rep(struct enic *enic)
|
|
|
|
{
|
|
|
|
return !!(enic->rte_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR);
|
|
|
|
}
|
|
|
|
|
2018-03-08 02:46:55 +00:00
|
|
|
/* Compute ethdev's max packet size from MTU */
|
|
|
|
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
|
|
|
|
{
|
2019-03-14 11:05:32 +00:00
|
|
|
/* ethdev max size includes eth whereas NIC MTU does not */
|
2019-05-21 16:13:05 +00:00
|
|
|
return mtu + RTE_ETHER_HDR_LEN;
|
2018-03-08 02:46:55 +00:00
|
|
|
}
|
|
|
|
|
2016-10-12 21:12:02 +00:00
|
|
|
/* Get the CQ index from a Start of Packet(SOP) RQ index */
|
|
|
|
static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
|
|
|
|
{
|
2020-04-15 01:06:39 +00:00
|
|
|
return sop_idx;
|
2016-10-12 21:12:02 +00:00
|
|
|
}
|
2016-10-12 21:12:03 +00:00
|
|
|
|
|
|
|
/* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
|
|
|
|
static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
|
2016-09-22 17:02:46 +00:00
|
|
|
{
|
2020-04-15 01:06:39 +00:00
|
|
|
return sop_idx;
|
2016-09-22 17:02:46 +00:00
|
|
|
}
|
|
|
|
|
2016-10-12 21:12:03 +00:00
|
|
|
/* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
|
|
|
|
static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
|
2016-06-16 19:19:05 +00:00
|
|
|
{
|
2020-04-15 01:06:39 +00:00
|
|
|
return rte_idx;
|
2016-06-16 19:19:05 +00:00
|
|
|
}
|
|
|
|
|
2016-10-12 21:12:03 +00:00
|
|
|
/* Get the Data RQ index from a RTE RQ index */
|
2020-04-15 01:06:39 +00:00
|
|
|
static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx,
|
|
|
|
struct enic *enic)
|
2016-06-16 19:19:05 +00:00
|
|
|
{
|
2020-04-15 01:06:39 +00:00
|
|
|
return enic->rq_count + rte_idx;
|
2016-06-16 19:19:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int enic_vnic_rq_count(struct enic *enic)
|
|
|
|
{
|
|
|
|
return enic->rq_count * 2;
|
|
|
|
}
|
|
|
|
|
2014-12-02 13:38:31 +00:00
|
|
|
static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
|
2014-11-25 17:26:43 +00:00
|
|
|
{
|
2016-06-28 18:49:11 +00:00
|
|
|
/* Scatter rx uses two receive queues together with one
|
|
|
|
* completion queue, so the completion queue number is no
|
|
|
|
* longer the same as the rq number.
|
|
|
|
*/
|
2020-04-15 01:06:39 +00:00
|
|
|
return rq;
|
2014-11-25 17:26:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
|
|
|
|
{
|
|
|
|
return enic->rq_count + wq;
|
|
|
|
}
|
|
|
|
|
2020-09-09 13:56:54 +00:00
|
|
|
/*
|
|
|
|
* WQ, RQ, CQ allocation scheme. Firmware gives the driver an array of
|
|
|
|
* WQs, an array of RQs, and an array of CQs. Fow now, these are
|
|
|
|
* statically allocated between PF app send/receive queues and VF
|
|
|
|
* representor app send/receive queues. VF representor supports only 1
|
|
|
|
* send and 1 receive queue. The number of PF app queue is not known
|
|
|
|
* until the queue setup time.
|
|
|
|
*
|
|
|
|
* R = number of receive queues for PF app
|
|
|
|
* S = number of send queues for PF app
|
|
|
|
* V = number of VF representors
|
|
|
|
*
|
|
|
|
* wI = WQ for PF app send queue I
|
|
|
|
* rI = SOP RQ for PF app receive queue I
|
|
|
|
* dI = Data RQ for rI
|
|
|
|
* cwI = CQ for wI
|
|
|
|
* crI = CQ for rI
|
|
|
|
* vwI = WQ for VF representor send queue I
|
|
|
|
* vrI = SOP RQ for VF representor receive queue I
|
|
|
|
* vdI = Data RQ for vrI
|
|
|
|
* vcwI = CQ for vwI
|
|
|
|
* vcrI = CQ for vrI
|
|
|
|
*
|
|
|
|
* WQ array: | w0 |..| wS-1 |..| vwV-1 |..| vw0 |
|
|
|
|
* ^ ^ ^ ^
|
|
|
|
* index 0 S-1 W-V W-1 W=len(WQ array)
|
|
|
|
*
|
|
|
|
* RQ array: | r0 |..| rR-1 |d0 |..|dR-1| ..|vdV-1 |..| vd0 |vrV-1 |..|vr0 |
|
|
|
|
* ^ ^ ^ ^ ^ ^ ^ ^
|
|
|
|
* index 0 R-1 R 2R-1 X-2V X-(V+1) X-V X-1
|
|
|
|
* X=len(RQ array)
|
|
|
|
*
|
|
|
|
* CQ array: | cr0 |..| crR-1 |cw0|..|cwS-1|..|vcwV-1|..| vcw0|vcrV-1|..|vcr0|..
|
|
|
|
* ^ ^ ^ ^ ^ ^ ^ ^
|
|
|
|
* index 0 R-1 R R+S-1 X-2V X-(V+1) X-V X-1
|
|
|
|
* X is not a typo. It really is len(RQ array) to accommodate enic_cq_rq() used
|
|
|
|
* throughout RX handlers. The current scheme requires
|
|
|
|
* len(CQ array) >= len(RQ array).
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline unsigned int vf_wq_cq_idx(struct enic_vf_representor *vf)
|
|
|
|
{
|
|
|
|
/* rq is not a typo. index(vcwI) coincides with index(vdI) */
|
|
|
|
return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int vf_wq_idx(struct enic_vf_representor *vf)
|
|
|
|
{
|
|
|
|
return vf->pf->conf_wq_count - vf->vf_id - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int vf_rq_sop_idx(struct enic_vf_representor *vf)
|
|
|
|
{
|
|
|
|
return vf->pf->conf_rq_count - vf->vf_id - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int vf_rq_data_idx(struct enic_vf_representor *vf)
|
|
|
|
{
|
|
|
|
return vf->pf->conf_rq_count - (vf->pf->max_vf_id + vf->vf_id + 2);
|
|
|
|
}
|
|
|
|
|
2014-11-25 17:26:43 +00:00
|
|
|
static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
2019-05-29 19:14:58 +00:00
|
|
|
return eth_dev->data->dev_private;
|
2014-11-25 17:26:43 +00:00
|
|
|
}
|
|
|
|
|
2016-06-03 00:22:50 +00:00
|
|
|
static inline uint32_t
|
|
|
|
enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
|
|
|
|
{
|
|
|
|
uint32_t d = i0 + i1;
|
|
|
|
d -= (d >= n_descriptors) ? n_descriptors : 0;
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
|
|
|
|
{
|
|
|
|
int32_t d = i1 - i0;
|
|
|
|
return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
|
|
|
|
{
|
|
|
|
idx++;
|
|
|
|
if (unlikely(idx == n_descriptors))
|
|
|
|
idx = 0;
|
|
|
|
return idx;
|
|
|
|
}
|
|
|
|
|
2019-09-23 14:41:34 +00:00
|
|
|
int dev_is_enic(struct rte_eth_dev *dev);
|
2018-03-08 02:46:53 +00:00
|
|
|
void enic_fdir_stats_get(struct enic *enic,
|
|
|
|
struct rte_eth_fdir_stats *stats);
|
|
|
|
int enic_fdir_add_fltr(struct enic *enic,
|
|
|
|
struct rte_eth_fdir_filter *params);
|
|
|
|
int enic_fdir_del_fltr(struct enic *enic,
|
|
|
|
struct rte_eth_fdir_filter *params);
|
|
|
|
void enic_free_wq(void *txq);
|
|
|
|
int enic_alloc_intr_resources(struct enic *enic);
|
|
|
|
int enic_setup_finish(struct enic *enic);
|
|
|
|
int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
|
|
|
|
unsigned int socket_id, uint16_t nb_desc);
|
|
|
|
void enic_start_wq(struct enic *enic, uint16_t queue_idx);
|
|
|
|
int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
|
|
|
|
void enic_start_rq(struct enic *enic, uint16_t queue_idx);
|
|
|
|
int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
|
|
|
|
void enic_free_rq(void *rxq);
|
|
|
|
int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
|
|
|
unsigned int socket_id, struct rte_mempool *mp,
|
|
|
|
uint16_t nb_desc, uint16_t free_thresh);
|
|
|
|
int enic_set_vnic_res(struct enic *enic);
|
net/enic: allow to change RSS settings
Currently, when more than 1 receive queues are configured, the driver
always enables RSS with the driver's own default hash type, key, and
RETA. The user is unable to change any of the RSS settings. Address
this by implementing the ethdev RSS API as follows.
Correctly report the RETA size, key size, and supported hash types
through rte_eth_dev_info.
During dev_configure(), initialize RSS according to the device's
mq_mode and rss_conf. Start with the default RETA, and use the default
key unless a custom key is provided.
Add the RETA and rss_conf query/set handlers to let the user change
RSS settings after the initial configuration. The hardware is able to
change hash type, key, and RETA individually. So, the handlers change
only the affected settings.
Refactor/rename several functions in order to make their intentions
clear. For example, remove all traces of RSS from
enicpmd_vlan_offload_set() as it is confusing.
Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
2018-03-08 02:46:54 +00:00
|
|
|
int enic_init_rss_nic_cfg(struct enic *enic);
|
|
|
|
int enic_set_rss_conf(struct enic *enic,
|
|
|
|
struct rte_eth_rss_conf *rss_conf);
|
|
|
|
int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
|
|
|
|
int enic_set_vlan_strip(struct enic *enic);
|
2018-03-08 02:46:53 +00:00
|
|
|
int enic_enable(struct enic *enic);
|
|
|
|
int enic_disable(struct enic *enic);
|
|
|
|
void enic_remove(struct enic *enic);
|
|
|
|
int enic_get_link_status(struct enic *enic);
|
|
|
|
int enic_dev_stats_get(struct enic *enic,
|
|
|
|
struct rte_eth_stats *r_stats);
|
2019-09-06 14:34:54 +00:00
|
|
|
int enic_dev_stats_clear(struct enic *enic);
|
2019-09-14 11:37:24 +00:00
|
|
|
int enic_add_packet_filter(struct enic *enic);
|
2017-05-05 00:40:00 +00:00
|
|
|
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
|
2018-04-16 09:40:17 +00:00
|
|
|
int enic_del_mac_address(struct enic *enic, int mac_index);
|
2018-03-08 02:46:53 +00:00
|
|
|
unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
|
|
|
|
void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
|
|
|
|
struct rte_mbuf *tx_pkt, unsigned short len,
|
|
|
|
uint8_t sop, uint8_t eop, uint8_t cq_entry,
|
|
|
|
uint16_t ol_flags, uint16_t vlan_tag);
|
|
|
|
|
|
|
|
void enic_post_wq_index(struct vnic_wq *wq);
|
|
|
|
int enic_probe(struct enic *enic);
|
|
|
|
int enic_clsf_init(struct enic *enic);
|
|
|
|
void enic_clsf_destroy(struct enic *enic);
|
2019-09-23 14:41:34 +00:00
|
|
|
int enic_fm_init(struct enic *enic);
|
|
|
|
void enic_fm_destroy(struct enic *enic);
|
|
|
|
void *enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle,
|
2020-01-14 00:24:50 +00:00
|
|
|
uint8_t *name);
|
2019-09-23 14:41:34 +00:00
|
|
|
void enic_free_consistent(void *priv, size_t size, void *vaddr,
|
|
|
|
dma_addr_t dma_handle);
|
2016-03-04 21:09:00 +00:00
|
|
|
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2018-06-29 09:29:42 +00:00
|
|
|
uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2017-05-12 10:33:03 +00:00
|
|
|
uint16_t enic_dummy_recv_pkts(void *rx_queue,
|
|
|
|
struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2016-06-03 00:22:49 +00:00
|
|
|
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
2018-03-08 02:46:53 +00:00
|
|
|
uint16_t nb_pkts);
|
2018-06-29 09:29:40 +00:00
|
|
|
uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2018-01-23 01:05:29 +00:00
|
|
|
uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2016-06-24 22:29:28 +00:00
|
|
|
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
|
2019-09-06 06:50:20 +00:00
|
|
|
int enic_link_update(struct rte_eth_dev *eth_dev);
|
2019-09-06 06:50:19 +00:00
|
|
|
bool enic_use_vector_rx_handler(struct rte_eth_dev *eth_dev);
|
|
|
|
void enic_pick_rx_handler(struct rte_eth_dev *eth_dev);
|
|
|
|
void enic_pick_tx_handler(struct rte_eth_dev *eth_dev);
|
2016-09-29 20:56:39 +00:00
|
|
|
void enic_fdir_info(struct enic *enic);
|
|
|
|
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
|
2020-09-09 13:56:53 +00:00
|
|
|
int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params);
|
|
|
|
int enic_vf_representor_uninit(struct rte_eth_dev *ethdev);
|
|
|
|
int enic_fm_allocate_switch_domain(struct enic *pf);
|
2020-09-09 13:56:54 +00:00
|
|
|
int enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq);
|
|
|
|
void enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq);
|
|
|
|
void enic_free_wq_buf(struct rte_mbuf **buf);
|
|
|
|
void enic_free_rq_buf(struct rte_mbuf **mbuf);
|
2017-05-17 22:38:05 +00:00
|
|
|
extern const struct rte_flow_ops enic_flow_ops;
|
2019-09-23 14:41:34 +00:00
|
|
|
extern const struct rte_flow_ops enic_fm_flow_ops;
|
2020-09-09 13:56:53 +00:00
|
|
|
|
2014-11-25 17:26:43 +00:00
|
|
|
#endif /* _ENIC_H_ */
|