This patch improves the Tx offload features selection depending on whether the application request for offloads. When the application doesn't request for Tx offload features, the corresponding features bits aren't negotiated. Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
109 lines
3.1 KiB
C
109 lines
3.1 KiB
C
/* SPDX-License-Identifier: BSD-3-Clause
|
|
* Copyright(c) 2010-2015 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _VIRTIO_ETHDEV_H_
|
|
#define _VIRTIO_ETHDEV_H_
|
|
|
|
#include <stdint.h>
|
|
|
|
#include "virtio_pci.h"
|
|
|
|
#ifndef PAGE_SIZE
|
|
#define PAGE_SIZE 4096
|
|
#endif
|
|
|
|
#define VIRTIO_MAX_RX_QUEUES 128U
|
|
#define VIRTIO_MAX_TX_QUEUES 128U
|
|
#define VIRTIO_MAX_MAC_ADDRS 64
|
|
#define VIRTIO_MIN_RX_BUFSIZE 64
|
|
#define VIRTIO_MAX_RX_PKTLEN 9728U
|
|
|
|
/* Features desired/implemented by this driver. */
|
|
#define VIRTIO_PMD_DEFAULT_GUEST_FEATURES \
|
|
(1u << VIRTIO_NET_F_MAC | \
|
|
1u << VIRTIO_NET_F_STATUS | \
|
|
1u << VIRTIO_NET_F_MQ | \
|
|
1u << VIRTIO_NET_F_CTRL_MAC_ADDR | \
|
|
1u << VIRTIO_NET_F_CTRL_VQ | \
|
|
1u << VIRTIO_NET_F_CTRL_RX | \
|
|
1u << VIRTIO_NET_F_CTRL_VLAN | \
|
|
1u << VIRTIO_NET_F_MRG_RXBUF | \
|
|
1u << VIRTIO_NET_F_MTU | \
|
|
1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \
|
|
1u << VIRTIO_RING_F_INDIRECT_DESC | \
|
|
1ULL << VIRTIO_F_VERSION_1 | \
|
|
1ULL << VIRTIO_F_IN_ORDER | \
|
|
1ULL << VIRTIO_F_IOMMU_PLATFORM)
|
|
|
|
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
|
|
(VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
|
|
1u << VIRTIO_NET_F_GUEST_CSUM | \
|
|
1u << VIRTIO_NET_F_GUEST_TSO4 | \
|
|
1u << VIRTIO_NET_F_GUEST_TSO6)
|
|
|
|
#define VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS \
|
|
(DEV_RX_OFFLOAD_TCP_CKSUM | \
|
|
DEV_RX_OFFLOAD_UDP_CKSUM | \
|
|
DEV_RX_OFFLOAD_TCP_LRO | \
|
|
DEV_RX_OFFLOAD_VLAN_FILTER | \
|
|
DEV_RX_OFFLOAD_VLAN_STRIP)
|
|
|
|
/*
|
|
* CQ function prototype
|
|
*/
|
|
void virtio_dev_cq_start(struct rte_eth_dev *dev);
|
|
|
|
/*
|
|
* RX/TX function prototypes
|
|
*/
|
|
|
|
int virtio_dev_rx_queue_done(void *rxq, uint16_t offset);
|
|
|
|
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
struct rte_mempool *mb_pool);
|
|
|
|
int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
|
|
uint16_t rx_queue_id);
|
|
|
|
int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
const struct rte_eth_txconf *tx_conf);
|
|
|
|
int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
|
|
uint16_t tx_queue_id);
|
|
|
|
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
uint16_t nb_pkts);
|
|
|
|
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
uint16_t nb_pkts);
|
|
|
|
uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
|
|
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
|
|
|
|
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
uint16_t nb_pkts);
|
|
|
|
uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
uint16_t nb_pkts);
|
|
|
|
uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
uint16_t nb_pkts);
|
|
|
|
uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
uint16_t nb_pkts);
|
|
|
|
int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
|
|
|
|
void virtio_interrupt_handler(void *param);
|
|
|
|
int virtio_dev_pause(struct rte_eth_dev *dev);
|
|
void virtio_dev_resume(struct rte_eth_dev *dev);
|
|
int virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
|
|
int nb_pkts);
|
|
|
|
#endif /* _VIRTIO_ETHDEV_H_ */
|