Import multiqueue VirtIO net driver from my user/bryanv/vtnetmq branch

This is a significant rewrite of much of the previous driver; lots of
misc. cleanup was also performed, and support for a few other minor
features was also added.
This commit is contained in:
Bryan Venteicher 2013-09-01 04:33:47 +00:00
parent cfc28a5bf7
commit 8f3600b108
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=255112
4 changed files with 2795 additions and 1550 deletions

View File

@ -69,14 +69,30 @@ prompt before booting the kernel or stored in
.Xr loader.conf 5 .
.Bl -tag -width "xxxxxx"
.It Va hw.vtnet.csum_disable
.It Va hw.vtnet. Ns Ar X Ns Va .csum_disable
This tunable disables receive and send checksum offload.
The default value is 0.
.It Va hw.vtnet.tso_disable
.It Va hw.vtnet. Ns Ar X Ns Va .tso_disable
This tunable disables TSO.
The default value is 0.
.It Va hw.vtnet.lro_disable
.It Va hw.vtnet. Ns Ar X Ns Va .lro_disable
This tunable disables LRO.
The default value is 0.
.It Va hw.vtnet.mq_disable
.It Va hw.vtnet. Ns Ar X Ns Va .mq_disable
This tunable disables multiqueue.
The default value is 0.
.It Va hw.vtnet.mq_max_pairs
.It Va hw.vtnet. Ns Ar X Ns Va .mq_max_pairs
This tunable sets the maximum number of transmit and receive queue pairs.
Multiple queues are only supported when the Multiqueue feature is negotiated.
This driver supports a maximum of 8 queue pairs.
The number of queue pairs used is the lesser of the maximum supported by the
driver and the hypervisor, the number of CPUs present in the guest, and this
tunable if not zero.
The default value is 0.
.El
.Sh SEE ALSO
.Xr arp 4 ,

File diff suppressed because it is too large Load Diff

View File

@ -29,84 +29,166 @@
#ifndef _IF_VTNETVAR_H
#define _IF_VTNETVAR_H
struct vtnet_softc;
struct vtnet_statistics {
unsigned long mbuf_alloc_failed;
uint64_t mbuf_alloc_failed;
unsigned long rx_frame_too_large;
unsigned long rx_enq_replacement_failed;
unsigned long rx_mergeable_failed;
unsigned long rx_csum_bad_ethtype;
unsigned long rx_csum_bad_start;
unsigned long rx_csum_bad_ipproto;
unsigned long rx_csum_bad_offset;
unsigned long rx_csum_failed;
unsigned long rx_csum_offloaded;
unsigned long rx_task_rescheduled;
uint64_t rx_frame_too_large;
uint64_t rx_enq_replacement_failed;
uint64_t rx_mergeable_failed;
uint64_t rx_csum_bad_ethtype;
uint64_t rx_csum_bad_ipproto;
uint64_t rx_csum_bad_offset;
uint64_t rx_csum_bad_proto;
uint64_t tx_csum_bad_ethtype;
uint64_t tx_tso_bad_ethtype;
uint64_t tx_tso_not_tcp;
unsigned long tx_csum_offloaded;
unsigned long tx_tso_offloaded;
unsigned long tx_csum_bad_ethtype;
unsigned long tx_tso_bad_ethtype;
unsigned long tx_task_rescheduled;
/*
* These are accumulated from each Rx/Tx queue.
*/
uint64_t rx_csum_failed;
uint64_t rx_csum_offloaded;
uint64_t rx_task_rescheduled;
uint64_t tx_csum_offloaded;
uint64_t tx_tso_offloaded;
uint64_t tx_task_rescheduled;
};
struct vtnet_rxq_stats {
uint64_t vrxs_ipackets; /* if_ipackets */
uint64_t vrxs_ibytes; /* if_ibytes */
uint64_t vrxs_iqdrops; /* if_iqdrops */
uint64_t vrxs_ierrors; /* if_ierrors */
uint64_t vrxs_csum;
uint64_t vrxs_csum_failed;
uint64_t vrxs_rescheduled;
};
struct vtnet_rxq {
struct mtx vtnrx_mtx;
struct vtnet_softc *vtnrx_sc;
struct virtqueue *vtnrx_vq;
int vtnrx_id;
int vtnrx_process_limit;
struct vtnet_rxq_stats vtnrx_stats;
struct taskqueue *vtnrx_tq;
struct task vtnrx_intrtask;
char vtnrx_name[16];
} __aligned(CACHE_LINE_SIZE);
#define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx)
#define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx)
#define VTNET_RXQ_LOCK_ASSERT(_rxq) \
mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED)
#define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \
mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED)
struct vtnet_txq_stats {
uint64_t vtxs_opackets; /* if_opackets */
uint64_t vtxs_obytes; /* if_obytes */
uint64_t vtxs_omcasts; /* if_omcasts */
uint64_t vtxs_csum;
uint64_t vtxs_tso;
uint64_t vtxs_collapsed;
uint64_t vtxs_rescheduled;
};
struct vtnet_txq {
struct mtx vtntx_mtx;
struct vtnet_softc *vtntx_sc;
struct virtqueue *vtntx_vq;
#ifndef VTNET_LEGACY_TX
struct buf_ring *vtntx_br;
#endif
int vtntx_id;
int vtntx_watchdog;
struct vtnet_txq_stats vtntx_stats;
struct taskqueue *vtntx_tq;
struct task vtntx_intrtask;
#ifndef VTNET_LEGACY_TX
struct task vtntx_defrtask;
#endif
char vtntx_name[16];
} __aligned(CACHE_LINE_SIZE);
#define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx)
#define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx)
#define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx)
#define VTNET_TXQ_LOCK_ASSERT(_txq) \
mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED)
#define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED)
struct vtnet_softc {
device_t vtnet_dev;
struct ifnet *vtnet_ifp;
struct mtx vtnet_mtx;
struct vtnet_rxq *vtnet_rxqs;
struct vtnet_txq *vtnet_txqs;
uint32_t vtnet_flags;
#define VTNET_FLAG_LINK 0x0001
#define VTNET_FLAG_SUSPENDED 0x0002
#define VTNET_FLAG_SUSPENDED 0x0001
#define VTNET_FLAG_MAC 0x0002
#define VTNET_FLAG_CTRL_VQ 0x0004
#define VTNET_FLAG_CTRL_RX 0x0008
#define VTNET_FLAG_VLAN_FILTER 0x0010
#define VTNET_FLAG_TSO_ECN 0x0020
#define VTNET_FLAG_MRG_RXBUFS 0x0040
#define VTNET_FLAG_LRO_NOMRG 0x0080
struct virtqueue *vtnet_rx_vq;
struct virtqueue *vtnet_tx_vq;
struct virtqueue *vtnet_ctrl_vq;
#define VTNET_FLAG_CTRL_MAC 0x0010
#define VTNET_FLAG_VLAN_FILTER 0x0020
#define VTNET_FLAG_TSO_ECN 0x0040
#define VTNET_FLAG_MRG_RXBUFS 0x0080
#define VTNET_FLAG_LRO_NOMRG 0x0100
#define VTNET_FLAG_MULTIQ 0x0200
int vtnet_link_active;
int vtnet_hdr_size;
int vtnet_tx_size;
int vtnet_rx_size;
int vtnet_rx_process_limit;
int vtnet_rx_mbuf_size;
int vtnet_rx_mbuf_count;
int vtnet_rx_nmbufs;
int vtnet_rx_clsize;
int vtnet_rx_new_clsize;
int vtnet_if_flags;
int vtnet_watchdog_timer;
int vtnet_act_vq_pairs;
int vtnet_max_vq_pairs;
struct virtqueue *vtnet_ctrl_vq;
struct vtnet_mac_filter *vtnet_mac_filter;
uint32_t *vtnet_vlan_filter;
uint64_t vtnet_features;
struct vtnet_statistics vtnet_stats;
struct callout vtnet_tick_ch;
struct ifmedia vtnet_media;
eventhandler_tag vtnet_vlan_attach;
eventhandler_tag vtnet_vlan_detach;
struct ifmedia vtnet_media;
/*
* Fake media type; the host does not provide us with
* any real media information.
*/
#define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX)
char vtnet_hwaddr[ETHER_ADDR_LEN];
struct vtnet_mac_filter *vtnet_mac_filter;
/*
* During reset, the host's VLAN filtering table is lost. The
* array below is used to restore all the VLANs configured on
* this interface after a reset.
*/
#define VTNET_VLAN_SHADOW_SIZE (4096 / 32)
int vtnet_nvlans;
uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE];
struct mtx vtnet_mtx;
char vtnet_mtx_name[16];
char vtnet_hwaddr[ETHER_ADDR_LEN];
};
/*
* Maximum number of queue pairs we will autoconfigure to.
*/
#define VTNET_MAX_QUEUE_PAIRS 8
/*
* Additional completed entries can appear in a virtqueue before we can
* reenable interrupts. Number of times to retry before scheduling the
* taskqueue to process the completed entries.
*/
#define VTNET_INTR_DISABLE_RETRIES 4
/*
* Fake the media type. The host does not provide us with any real media
* information.
*/
#define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX)
/*
* Number of words to allocate for the VLAN shadow table. There is one
* bit for each VLAN.
*/
#define VTNET_VLAN_FILTER_NWORDS (4096 / 32)
/*
* When mergeable buffers are not negotiated, the vtnet_rx_header structure
* below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
@ -161,8 +243,12 @@ struct vtnet_mac_filter {
*/
CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
#define VTNET_WATCHDOG_TIMEOUT 5
#define VTNET_TX_TIMEOUT 5
#define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP)
#define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6)
#define VTNET_CSUM_ALL_OFFLOAD \
(VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
/* Features desired/implemented by this driver. */
#define VTNET_FEATURES \
@ -170,8 +256,10 @@ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
VIRTIO_NET_F_STATUS | \
VIRTIO_NET_F_CTRL_VQ | \
VIRTIO_NET_F_CTRL_RX | \
VIRTIO_NET_F_CTRL_MAC_ADDR | \
VIRTIO_NET_F_CTRL_VLAN | \
VIRTIO_NET_F_CSUM | \
VIRTIO_NET_F_GSO | \
VIRTIO_NET_F_HOST_TSO4 | \
VIRTIO_NET_F_HOST_TSO6 | \
VIRTIO_NET_F_HOST_ECN | \
@ -180,8 +268,17 @@ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
VIRTIO_NET_F_GUEST_TSO6 | \
VIRTIO_NET_F_GUEST_ECN | \
VIRTIO_NET_F_MRG_RXBUF | \
VIRTIO_NET_F_MQ | \
VIRTIO_RING_F_EVENT_IDX | \
VIRTIO_RING_F_INDIRECT_DESC)
/*
* The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
* frames larger than 1514 bytes.
*/
#define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \
VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN)
/*
* The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
* frames larger than 1514 bytes. We do not yet support software LRO
@ -208,28 +305,35 @@ CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
/*
* Number of slots in the Tx bufrings. This value matches most other
* multiqueue drivers.
*/
#define VTNET_DEFAULT_BUFRING_SIZE 4096
/*
* Determine how many mbufs are in each receive buffer. For LRO without
* mergeable descriptors, we must allocate an mbuf chain large enough to
* hold both the vtnet_rx_header and the maximum receivable data.
*/
#define VTNET_NEEDED_RX_MBUFS(_sc) \
#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \
((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
(_sc)->vtnet_rx_mbuf_size)
(_clsize))
#define VTNET_MTX(_sc) &(_sc)->vtnet_mtx
#define VTNET_LOCK(_sc) mtx_lock(VTNET_MTX((_sc)))
#define VTNET_UNLOCK(_sc) mtx_unlock(VTNET_MTX((_sc)))
#define VTNET_LOCK_DESTROY(_sc) mtx_destroy(VTNET_MTX((_sc)))
#define VTNET_LOCK_ASSERT(_sc) mtx_assert(VTNET_MTX((_sc)), MA_OWNED)
#define VTNET_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(VTNET_MTX((_sc)), MA_NOTOWNED)
#define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx
#define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_LOCK_ASSERT(_sc) \
mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED)
#define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED)
#define VTNET_LOCK_INIT(_sc) do { \
#define VTNET_CORE_LOCK_INIT(_sc) do { \
snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \
"%s", device_get_nameunit((_sc)->vtnet_dev)); \
mtx_init(VTNET_MTX((_sc)), (_sc)->vtnet_mtx_name, \
mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \
"VTNET Core Lock", MTX_DEF); \
} while (0)

View File

@ -23,14 +23,29 @@
# SUCH DAMAGE.
#
.include <bsd.own.mk>
.PATH: ${.CURDIR}/../../../dev/virtio/network
KMOD= if_vtnet
SRCS= if_vtnet.c
SRCS+= virtio_bus_if.h virtio_if.h
SRCS+= bus_if.h device_if.h
SRCS+= opt_inet.h opt_inet6.h
MFILES= kern/bus_if.m kern/device_if.m \
dev/virtio/virtio_bus_if.m dev/virtio/virtio_if.m
.if !defined(KERNBUILDDIR)
.if ${MK_INET_SUPPORT} != "no"
opt_inet.h:
@echo "#define INET 1" > ${.TARGET}
.endif
.if ${MK_INET6_SUPPORT} != "no"
opt_inet6.h:
@echo "#define INET6 1" > ${.TARGET}
.endif
.endif
.include <bsd.kmod.mk>