2018-07-13 17:06:43 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright (c) 2009-2018 Microsoft Corp.
|
|
|
|
* Copyright (c) 2016 Brocade Communications Systems, Inc.
|
|
|
|
* Copyright (c) 2012 NetApp Inc.
|
|
|
|
* Copyright (c) 2012 Citrix Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2021-02-25 00:07:56 +00:00
|
|
|
#include <rte_eal_paging.h>
|
|
|
|
|
2018-07-13 17:06:43 +00:00
|
|
|
/*
|
|
|
|
* Tunable ethdev params
|
|
|
|
*/
|
|
|
|
#define HN_MIN_RX_BUF_SIZE 1024
|
|
|
|
#define HN_MAX_XFER_LEN 2048
|
|
|
|
#define HN_MAX_MAC_ADDRS 1
|
|
|
|
#define HN_MAX_CHANNELS 64
|
|
|
|
|
|
|
|
/* Claimed to be 12232B */
|
|
|
|
#define HN_MTU_MAX (9 * 1024)
|
|
|
|
|
|
|
|
/* Retry interval */
|
|
|
|
#define HN_CHAN_INTERVAL_US 100
|
|
|
|
|
2018-08-09 17:50:07 +00:00
|
|
|
/* Host monitor interval */
|
|
|
|
#define HN_CHAN_LATENCY_NS 50000
|
|
|
|
|
2020-10-31 00:24:08 +00:00
|
|
|
#define HN_TXCOPY_THRESHOLD 512
|
|
|
|
#define HN_RXCOPY_THRESHOLD 256
|
|
|
|
|
2020-10-31 00:24:09 +00:00
|
|
|
#define HN_RX_EXTMBUF_ENABLE 0
|
|
|
|
|
2018-07-13 17:06:43 +00:00
|
|
|
#ifndef PAGE_MASK
|
2021-02-25 00:07:56 +00:00
|
|
|
#define PAGE_MASK (rte_mem_page_size() - 1)
|
2018-07-13 17:06:43 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
struct hn_data;
|
|
|
|
struct hn_txdesc;
|
|
|
|
|
|
|
|
struct hn_stats {
|
|
|
|
uint64_t packets;
|
|
|
|
uint64_t bytes;
|
|
|
|
uint64_t errors;
|
2018-08-30 22:35:10 +00:00
|
|
|
uint64_t ring_full;
|
2020-05-19 16:52:26 +00:00
|
|
|
uint64_t channel_full;
|
2018-07-13 17:06:43 +00:00
|
|
|
uint64_t multicast;
|
|
|
|
uint64_t broadcast;
|
|
|
|
/* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
|
|
|
|
uint64_t size_bins[8];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hn_tx_queue {
|
|
|
|
struct hn_data *hv;
|
|
|
|
struct vmbus_channel *chan;
|
|
|
|
uint16_t port_id;
|
|
|
|
uint16_t queue_id;
|
|
|
|
uint32_t free_thresh;
|
2020-03-31 17:13:59 +00:00
|
|
|
struct rte_mempool *txdesc_pool;
|
2020-10-22 00:26:07 +00:00
|
|
|
const struct rte_memzone *tx_rndis_mz;
|
2020-03-31 17:13:59 +00:00
|
|
|
void *tx_rndis;
|
2020-10-22 00:26:07 +00:00
|
|
|
rte_iova_t tx_rndis_iova;
|
2018-07-13 17:06:43 +00:00
|
|
|
|
|
|
|
/* Applied packet transmission aggregation limits. */
|
|
|
|
uint32_t agg_szmax;
|
|
|
|
uint32_t agg_pktmax;
|
|
|
|
uint32_t agg_align;
|
|
|
|
|
|
|
|
/* Packet transmission aggregation states */
|
|
|
|
struct hn_txdesc *agg_txd;
|
|
|
|
uint32_t agg_pktleft;
|
|
|
|
uint32_t agg_szleft;
|
|
|
|
struct rndis_packet_msg *agg_prevpkt;
|
|
|
|
|
|
|
|
struct hn_stats stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hn_rx_queue {
|
|
|
|
struct hn_data *hv;
|
|
|
|
struct vmbus_channel *chan;
|
|
|
|
struct rte_mempool *mb_pool;
|
|
|
|
struct rte_ring *rx_ring;
|
|
|
|
|
|
|
|
rte_spinlock_t ring_lock;
|
|
|
|
uint32_t event_sz;
|
|
|
|
uint16_t port_id;
|
|
|
|
uint16_t queue_id;
|
|
|
|
struct hn_stats stats;
|
2018-07-24 21:08:53 +00:00
|
|
|
|
2018-08-14 16:45:25 +00:00
|
|
|
void *event_buf;
|
2020-08-11 02:33:11 +00:00
|
|
|
struct hn_rx_bufinfo *rxbuf_info;
|
|
|
|
rte_atomic32_t rxbuf_outstanding;
|
2018-07-13 17:06:43 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* multi-packet data from host */
|
|
|
|
struct hn_rx_bufinfo {
|
|
|
|
struct vmbus_channel *chan;
|
2020-08-11 02:33:11 +00:00
|
|
|
struct hn_rx_queue *rxq;
|
2018-07-13 17:06:43 +00:00
|
|
|
uint64_t xactid;
|
|
|
|
struct rte_mbuf_ext_shared_info shinfo;
|
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
2019-02-08 03:44:02 +00:00
|
|
|
#define HN_INVALID_PORT UINT16_MAX
|
|
|
|
|
2020-12-21 21:33:22 +00:00
|
|
|
enum vf_device_state {
|
|
|
|
vf_unknown = 0,
|
|
|
|
vf_removed,
|
|
|
|
vf_configured,
|
|
|
|
vf_started,
|
|
|
|
vf_stopped,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hn_vf_ctx {
|
|
|
|
uint16_t vf_port;
|
|
|
|
|
|
|
|
/* We have taken ownership of this VF port from DPDK */
|
|
|
|
bool vf_attached;
|
|
|
|
|
|
|
|
/* VSC has requested to switch data path to VF */
|
|
|
|
bool vf_vsc_switched;
|
|
|
|
|
|
|
|
/* VSP has reported the VF is present for this NIC */
|
|
|
|
bool vf_vsp_reported;
|
|
|
|
|
|
|
|
enum vf_device_state vf_state;
|
|
|
|
};
|
|
|
|
|
2018-07-13 17:06:43 +00:00
|
|
|
struct hn_data {
|
|
|
|
struct rte_vmbus_device *vmbus;
|
|
|
|
struct hn_rx_queue *primary;
|
2020-04-30 19:08:51 +00:00
|
|
|
rte_rwlock_t vf_lock;
|
2018-07-13 17:06:43 +00:00
|
|
|
uint16_t port_id;
|
2019-02-08 03:44:02 +00:00
|
|
|
|
2020-12-21 21:33:22 +00:00
|
|
|
struct hn_vf_ctx vf_ctx;
|
|
|
|
|
2019-02-08 03:44:02 +00:00
|
|
|
uint8_t closed;
|
2018-12-14 01:26:19 +00:00
|
|
|
uint8_t vlan_strip;
|
2019-02-08 03:44:02 +00:00
|
|
|
|
2018-07-13 17:06:43 +00:00
|
|
|
uint32_t link_status;
|
|
|
|
uint32_t link_speed;
|
|
|
|
|
|
|
|
struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
|
|
|
|
uint32_t rxbuf_section_cnt; /* # of Rx sections */
|
2020-10-31 00:24:08 +00:00
|
|
|
uint32_t rx_copybreak;
|
2020-10-31 00:24:09 +00:00
|
|
|
uint32_t rx_extmbuf_enable;
|
2018-07-13 17:06:43 +00:00
|
|
|
uint16_t max_queues; /* Max available queues */
|
|
|
|
uint16_t num_queues;
|
|
|
|
uint64_t rss_offloads;
|
|
|
|
|
2020-03-31 17:13:59 +00:00
|
|
|
rte_spinlock_t chim_lock;
|
2018-07-13 17:06:43 +00:00
|
|
|
struct rte_mem_resource *chim_res; /* UIO resource for Tx */
|
2020-03-31 17:13:59 +00:00
|
|
|
struct rte_bitmap *chim_bmap; /* Send buffer map */
|
|
|
|
void *chim_bmem;
|
2020-10-31 00:24:08 +00:00
|
|
|
uint32_t tx_copybreak;
|
2018-07-13 17:06:43 +00:00
|
|
|
uint32_t chim_szmax; /* Max size per buffer */
|
|
|
|
uint32_t chim_cnt; /* Max packets per buffer */
|
|
|
|
|
2018-08-30 22:35:09 +00:00
|
|
|
uint32_t latency;
|
2018-07-13 17:06:43 +00:00
|
|
|
uint32_t nvs_ver;
|
|
|
|
uint32_t ndis_ver;
|
|
|
|
uint32_t rndis_agg_size;
|
|
|
|
uint32_t rndis_agg_pkts;
|
|
|
|
uint32_t rndis_agg_align;
|
|
|
|
|
|
|
|
volatile uint32_t rndis_pending;
|
|
|
|
rte_atomic32_t rndis_req_id;
|
|
|
|
uint8_t rndis_resp[256];
|
|
|
|
|
2019-06-13 15:03:43 +00:00
|
|
|
uint32_t rss_hash;
|
|
|
|
uint8_t rss_key[40];
|
|
|
|
uint16_t rss_ind[128];
|
|
|
|
|
2018-08-30 22:35:12 +00:00
|
|
|
struct rte_eth_dev_owner owner;
|
|
|
|
|
2018-07-13 17:06:43 +00:00
|
|
|
struct vmbus_channel *channels[HN_MAX_CHANNELS];
|
2020-12-21 21:33:22 +00:00
|
|
|
|
|
|
|
struct rte_devargs devargs;
|
|
|
|
int eal_hot_plug_retry;
|
2018-07-13 17:06:43 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct vmbus_channel *
|
|
|
|
hn_primary_chan(const struct hn_data *hv)
|
|
|
|
{
|
|
|
|
return hv->channels[0];
|
|
|
|
}
|
|
|
|
|
2018-08-09 17:50:08 +00:00
|
|
|
uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
|
|
|
|
uint32_t tx_limit);
|
2018-07-13 17:06:43 +00:00
|
|
|
|
|
|
|
uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2020-03-31 17:13:59 +00:00
|
|
|
int hn_chim_init(struct rte_eth_dev *dev);
|
|
|
|
void hn_chim_uninit(struct rte_eth_dev *dev);
|
2018-08-30 22:35:11 +00:00
|
|
|
int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
|
2018-07-13 17:06:43 +00:00
|
|
|
int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
|
|
|
uint16_t nb_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf);
|
|
|
|
void hn_dev_tx_queue_release(void *arg);
|
2018-07-24 21:08:52 +00:00
|
|
|
void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
|
|
|
|
struct rte_eth_txq_info *qinfo);
|
2018-08-09 17:50:08 +00:00
|
|
|
int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
|
2020-05-19 16:52:25 +00:00
|
|
|
int hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
|
2018-07-13 17:06:43 +00:00
|
|
|
|
|
|
|
struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
|
|
|
|
uint16_t queue_id,
|
|
|
|
unsigned int socket_id);
|
|
|
|
int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_idx, uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mp);
|
2020-05-19 16:52:24 +00:00
|
|
|
void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxq_info *qinfo);
|
2018-07-13 17:06:43 +00:00
|
|
|
void hn_dev_rx_queue_release(void *arg);
|
2020-05-19 16:52:25 +00:00
|
|
|
uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
|
|
|
|
int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
|
2019-04-30 18:12:17 +00:00
|
|
|
void hn_dev_free_queues(struct rte_eth_dev *dev);
|
2018-08-30 22:35:12 +00:00
|
|
|
|
2020-04-30 19:08:51 +00:00
|
|
|
/*
|
|
|
|
* Get VF device for existing netvsc device
|
|
|
|
* Assumes vf_lock is held.
|
|
|
|
*/
|
2019-02-08 03:44:02 +00:00
|
|
|
static inline struct rte_eth_dev *
|
|
|
|
hn_get_vf_dev(const struct hn_data *hv)
|
|
|
|
{
|
2020-12-21 21:33:22 +00:00
|
|
|
if (hv->vf_ctx.vf_attached)
|
|
|
|
return &rte_eth_devices[hv->vf_ctx.vf_port];
|
2019-02-08 03:44:02 +00:00
|
|
|
else
|
2020-12-21 21:33:22 +00:00
|
|
|
return NULL;
|
2019-02-08 03:44:02 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 16:42:26 +00:00
|
|
|
int hn_vf_info_get(struct hn_data *hv,
|
2018-08-30 22:35:12 +00:00
|
|
|
struct rte_eth_dev_info *info);
|
|
|
|
int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
|
2020-12-21 21:33:22 +00:00
|
|
|
int hn_vf_configure_locked(struct rte_eth_dev *dev,
|
|
|
|
const struct rte_eth_conf *dev_conf);
|
2018-08-30 22:35:12 +00:00
|
|
|
const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
|
|
|
|
int hn_vf_start(struct rte_eth_dev *dev);
|
|
|
|
void hn_vf_reset(struct rte_eth_dev *dev);
|
2020-10-16 13:32:59 +00:00
|
|
|
int hn_vf_close(struct rte_eth_dev *dev);
|
2020-10-15 13:30:45 +00:00
|
|
|
int hn_vf_stop(struct rte_eth_dev *dev);
|
2018-09-21 16:54:25 +00:00
|
|
|
|
2019-09-24 12:56:10 +00:00
|
|
|
int hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
|
|
|
|
int hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
|
2019-09-14 11:37:24 +00:00
|
|
|
int hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
|
|
|
|
int hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
|
2018-09-21 16:54:25 +00:00
|
|
|
int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *mc_addr_set,
|
2018-09-21 16:54:25 +00:00
|
|
|
uint32_t nb_mc_addr);
|
|
|
|
|
2018-08-30 22:35:12 +00:00
|
|
|
int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_idx, uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf);
|
|
|
|
void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
|
2020-05-19 16:52:25 +00:00
|
|
|
int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
|
|
|
|
|
2018-08-30 22:35:12 +00:00
|
|
|
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_idx, uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mp);
|
|
|
|
void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
|
|
|
|
|
|
|
|
int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
|
2019-09-06 14:34:54 +00:00
|
|
|
int hn_vf_stats_reset(struct rte_eth_dev *dev);
|
2018-08-30 22:35:12 +00:00
|
|
|
int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
|
|
|
unsigned int size);
|
|
|
|
int hn_vf_xstats_get(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_xstat *xstats,
|
2019-06-20 22:09:24 +00:00
|
|
|
unsigned int offset, unsigned int n);
|
2019-09-06 14:34:54 +00:00
|
|
|
int hn_vf_xstats_reset(struct rte_eth_dev *dev);
|
2019-06-13 15:03:43 +00:00
|
|
|
int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_conf *rss_conf);
|
|
|
|
int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size);
|
2020-12-21 21:33:22 +00:00
|
|
|
int hn_eth_rmv_event_callback(uint16_t port_id,
|
|
|
|
enum rte_eth_event_type event __rte_unused,
|
|
|
|
void *cb_arg, void *out __rte_unused);
|