From 8492797ec6bdf2bbdde97e9a4bfb073552bc93ba Mon Sep 17 00:00:00 2001 From: luigi Date: Sun, 16 Oct 2016 15:22:17 +0000 Subject: [PATCH] add two missing files for the netmap import --- sys/dev/netmap/if_ptnet.c | 2277 ++++++++++++++++++++++++++++++++++++ sys/dev/netmap/netmap_pt.c | 1462 +++++++++++++++++++++++ 2 files changed, 3739 insertions(+) create mode 100644 sys/dev/netmap/if_ptnet.c create mode 100644 sys/dev/netmap/netmap_pt.c diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c new file mode 100644 index 000000000000..86b500e36e18 --- /dev/null +++ b/sys/dev/netmap/if_ptnet.c @@ -0,0 +1,2277 @@ +/*- + * Copyright (c) 2016, Vincenzo Maffione + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* Driver for ptnet paravirtualized network device. */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "opt_inet.h" +#include "opt_inet6.h" + +#include +#include +#include +#include +#include +#include + +#ifndef PTNET_CSB_ALLOC +#error "No support for on-device CSB" +#endif + +#ifndef INET +#error "INET not defined, cannot support offloadings" +#endif + +#if __FreeBSD_version >= 1100000 +static uint64_t ptnet_get_counter(if_t, ift_counter); +#else +typedef struct ifnet *if_t; +#define if_getsoftc(_ifp) (_ifp)->if_softc +#endif + +//#define PTNETMAP_STATS +//#define DEBUG +#ifdef DEBUG +#define DBG(x) x +#else /* !DEBUG */ +#define DBG(x) +#endif /* !DEBUG */ + +extern int ptnet_vnet_hdr; /* Tunable parameter */ + +struct ptnet_softc; + +struct ptnet_queue_stats { + uint64_t packets; /* if_[io]packets */ + uint64_t bytes; /* if_[io]bytes */ + uint64_t errors; /* if_[io]errors */ + uint64_t iqdrops; /* if_iqdrops */ + uint64_t mcasts; /* if_[io]mcasts */ +#ifdef PTNETMAP_STATS + uint64_t intrs; + uint64_t kicks; +#endif /* PTNETMAP_STATS */ +}; + +struct ptnet_queue { + struct ptnet_softc *sc; + struct resource *irq; + void *cookie; + int kring_id; + struct ptnet_ring *ptring; + unsigned int kick; + struct mtx lock; + struct buf_ring *bufring; /* for TX queues */ + struct ptnet_queue_stats stats; +#ifdef PTNETMAP_STATS + struct ptnet_queue_stats last_stats; +#endif /* PTNETMAP_STATS */ + struct taskqueue *taskq; + struct task task; + char lock_name[16]; +}; + +#define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) +#define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) +#define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) + +struct ptnet_softc { + device_t dev; + if_t ifp; + struct ifmedia media; + struct mtx lock; + char lock_name[16]; + char hwaddr[ETHER_ADDR_LEN]; + + /* Mirror of PTFEAT register. */ + uint32_t ptfeatures; + unsigned int vnet_hdr_len; + + /* PCI BARs support. */ + struct resource *iomem; + struct resource *msix_mem; + + unsigned int num_rings; + unsigned int num_tx_rings; + struct ptnet_queue *queues; + struct ptnet_queue *rxqueues; + struct ptnet_csb *csb; + + unsigned int min_tx_space; + + struct netmap_pt_guest_adapter *ptna; + + struct callout tick; +#ifdef PTNETMAP_STATS + struct timeval last_ts; +#endif /* PTNETMAP_STATS */ +}; + +#define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) +#define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) + +static int ptnet_probe(device_t); +static int ptnet_attach(device_t); +static int ptnet_detach(device_t); +static int ptnet_suspend(device_t); +static int ptnet_resume(device_t); +static int ptnet_shutdown(device_t); + +static void ptnet_init(void *opaque); +static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); +static int ptnet_init_locked(struct ptnet_softc *sc); +static int ptnet_stop(struct ptnet_softc *sc); +static int ptnet_transmit(if_t ifp, struct mbuf *m); +static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, + unsigned int budget, + bool may_resched); +static void ptnet_qflush(if_t ifp); +static void ptnet_tx_task(void *context, int pending); + +static int ptnet_media_change(if_t ifp); +static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); +#ifdef PTNETMAP_STATS +static void ptnet_tick(void *opaque); +#endif + +static int ptnet_irqs_init(struct ptnet_softc *sc); +static void ptnet_irqs_fini(struct ptnet_softc *sc); + +static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); +static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, + unsigned *txd, unsigned *rxr, unsigned *rxd); +static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); +static int ptnet_nm_register(struct netmap_adapter *na, int onoff); +static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); +static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); + +static void ptnet_tx_intr(void *opaque); +static void ptnet_rx_intr(void *opaque); + +static unsigned ptnet_rx_discard(struct netmap_kring *kring, + unsigned int head); +static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, + bool may_resched); +static void ptnet_rx_task(void *context, int pending); + +#ifdef DEVICE_POLLING +static poll_handler_t ptnet_poll; +#endif + +static device_method_t ptnet_methods[] = { + DEVMETHOD(device_probe, ptnet_probe), + DEVMETHOD(device_attach, ptnet_attach), + DEVMETHOD(device_detach, ptnet_detach), + DEVMETHOD(device_suspend, ptnet_suspend), + DEVMETHOD(device_resume, ptnet_resume), + DEVMETHOD(device_shutdown, ptnet_shutdown), + DEVMETHOD_END +}; + +static driver_t ptnet_driver = { + "ptnet", + ptnet_methods, + sizeof(struct ptnet_softc) +}; + +/* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ +static devclass_t ptnet_devclass; +DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, + NULL, NULL, SI_ORDER_MIDDLE + 2); + +static int +ptnet_probe(device_t dev) +{ + if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || + pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { + return (ENXIO); + } + + device_set_desc(dev, "ptnet network adapter"); + + return (BUS_PROBE_DEFAULT); +} + +static inline void ptnet_kick(struct ptnet_queue *pq) +{ +#ifdef PTNETMAP_STATS + pq->stats.kicks ++; +#endif /* PTNETMAP_STATS */ + bus_write_4(pq->sc->iomem, pq->kick, 0); +} + +#define PTNET_BUF_RING_SIZE 4096 +#define PTNET_RX_BUDGET 512 +#define PTNET_RX_BATCH 1 +#define PTNET_TX_BUDGET 512 +#define PTNET_TX_BATCH 64 +#define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) +#define PTNET_MAX_PKT_SIZE 65536 + +#define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) +#define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ + CSUM_SCTP_IPV6) +#define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ + PTNET_CSUM_OFFLOAD_IPV6) + +static int +ptnet_attach(device_t dev) +{ + uint32_t ptfeatures = PTNETMAP_F_BASE; + unsigned int num_rx_rings, num_tx_rings; + struct netmap_adapter na_arg; + unsigned int nifp_offset; + struct ptnet_softc *sc; + if_t ifp; + uint32_t macreg; + int err, rid; + int i; + + sc = device_get_softc(dev); + sc->dev = dev; + + /* Setup PCI resources. */ + pci_enable_busmaster(dev); + + rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); + sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE); + if (sc->iomem == NULL) { + device_printf(dev, "Failed to map I/O BAR\n"); + return (ENXIO); + } + + /* Check if we are supported by the hypervisor. If not, + * bail out immediately. */ + if (ptnet_vnet_hdr) { + ptfeatures |= PTNETMAP_F_VNET_HDR; + } + bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ + ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ + if (!(ptfeatures & PTNETMAP_F_BASE)) { + device_printf(dev, "Hypervisor does not support netmap " + "passthorugh\n"); + err = ENXIO; + goto err_path; + } + sc->ptfeatures = ptfeatures; + + /* Allocate CSB and carry out CSB allocation protocol (CSBBAH first, + * then CSBBAL). */ + sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF, + M_NOWAIT | M_ZERO); + if (sc->csb == NULL) { + device_printf(dev, "Failed to allocate CSB\n"); + err = ENOMEM; + goto err_path; + } + + { + vm_paddr_t paddr = vtophys(sc->csb); + + bus_write_4(sc->iomem, PTNET_IO_CSBBAH, + (paddr >> 32) & 0xffffffff); + bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff); + } + + num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); + num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); + sc->num_rings = num_tx_rings + num_rx_rings; + sc->num_tx_rings = num_tx_rings; + + /* Allocate and initialize per-queue data structures. */ + sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, + M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->queues == NULL) { + err = ENOMEM; + goto err_path; + } + sc->rxqueues = sc->queues + num_tx_rings; + + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + + pq->sc = sc; + pq->kring_id = i; + pq->kick = PTNET_IO_KICK_BASE + 4 * i; + pq->ptring = sc->csb->rings + i; + snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", + device_get_nameunit(dev), i); + mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); + if (i >= num_tx_rings) { + /* RX queue: fix kring_id. */ + pq->kring_id -= num_tx_rings; + } else { + /* TX queue: allocate buf_ring. */ + pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, + M_DEVBUF, M_NOWAIT, &pq->lock); + if (pq->bufring == NULL) { + err = ENOMEM; + goto err_path; + } + } + } + + sc->min_tx_space = 64; /* Safe initial value. */ + + err = ptnet_irqs_init(sc); + if (err) { + goto err_path; + } + + /* Setup Ethernet interface. */ + sc->ifp = ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(dev, "Failed to allocate ifnet\n"); + err = ENOMEM; + goto err_path; + } + + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_baudrate = IF_Gbps(10); + ifp->if_softc = sc; + ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; + ifp->if_init = ptnet_init; + ifp->if_ioctl = ptnet_ioctl; +#if __FreeBSD_version >= 1100000 + ifp->if_get_counter = ptnet_get_counter; +#endif + ifp->if_transmit = ptnet_transmit; + ifp->if_qflush = ptnet_qflush; + + ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, + ptnet_media_status); + ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); + ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); + + macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); + sc->hwaddr[0] = (macreg >> 8) & 0xff; + sc->hwaddr[1] = macreg & 0xff; + macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); + sc->hwaddr[2] = (macreg >> 24) & 0xff; + sc->hwaddr[3] = (macreg >> 16) & 0xff; + sc->hwaddr[4] = (macreg >> 8) & 0xff; + sc->hwaddr[5] = macreg & 0xff; + + ether_ifattach(ifp, sc->hwaddr); + + ifp->if_hdrlen = sizeof(struct ether_vlan_header); + ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; + + if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { + /* Similarly to what the vtnet driver does, we can emulate + * VLAN offloadings by inserting and removing the 802.1Q + * header during transmit and receive. We are then able + * to do checksum offloading of VLAN frames. */ + ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 + | IFCAP_VLAN_HWCSUM + | IFCAP_TSO | IFCAP_LRO + | IFCAP_VLAN_HWTSO + | IFCAP_VLAN_HWTAGGING; + } + + ifp->if_capenable = ifp->if_capabilities; +#ifdef DEVICE_POLLING + /* Don't enable polling by default. */ + ifp->if_capabilities |= IFCAP_POLLING; +#endif + snprintf(sc->lock_name, sizeof(sc->lock_name), + "%s", device_get_nameunit(dev)); + mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); + callout_init_mtx(&sc->tick, &sc->lock, 0); + + /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ + nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); + memset(&na_arg, 0, sizeof(na_arg)); + na_arg.ifp = ifp; + na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); + na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); + na_arg.num_tx_rings = num_tx_rings; + na_arg.num_rx_rings = num_rx_rings; + na_arg.nm_config = ptnet_nm_config; + na_arg.nm_krings_create = ptnet_nm_krings_create; + na_arg.nm_krings_delete = ptnet_nm_krings_delete; + na_arg.nm_dtor = ptnet_nm_dtor; + na_arg.nm_register = ptnet_nm_register; + na_arg.nm_txsync = ptnet_nm_txsync; + na_arg.nm_rxsync = ptnet_nm_rxsync; + + netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset, ptnet_nm_ptctl); + + /* Now a netmap adapter for this ifp has been allocated, and it + * can be accessed through NA(ifp). We also have to initialize the CSB + * pointer. */ + sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); + + /* If virtio-net header was negotiated, set the virt_hdr_len field in + * the netmap adapter, to inform users that this netmap adapter requires + * the application to deal with the headers. */ + ptnet_update_vnet_hdr(sc); + + device_printf(dev, "%s() completed\n", __func__); + + return (0); + +err_path: + ptnet_detach(dev); + return err; +} + +static int +ptnet_detach(device_t dev) +{ + struct ptnet_softc *sc = device_get_softc(dev); + int i; + +#ifdef DEVICE_POLLING + if (sc->ifp->if_capenable & IFCAP_POLLING) { + ether_poll_deregister(sc->ifp); + } +#endif + callout_drain(&sc->tick); + + if (sc->queues) { + /* Drain taskqueues before calling if_detach. */ + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + + if (pq->taskq) { + taskqueue_drain(pq->taskq, &pq->task); + } + } + } + + if (sc->ifp) { + ether_ifdetach(sc->ifp); + + /* Uninitialize netmap adapters for this device. */ + netmap_detach(sc->ifp); + + ifmedia_removeall(&sc->media); + if_free(sc->ifp); + sc->ifp = NULL; + } + + ptnet_irqs_fini(sc); + + if (sc->csb) { + bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0); + bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0); + free(sc->csb, M_DEVBUF); + sc->csb = NULL; + } + + if (sc->queues) { + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + + if (mtx_initialized(&pq->lock)) { + mtx_destroy(&pq->lock); + } + if (pq->bufring != NULL) { + buf_ring_free(pq->bufring, M_DEVBUF); + } + } + free(sc->queues, M_DEVBUF); + sc->queues = NULL; + } + + if (sc->iomem) { + bus_release_resource(dev, SYS_RES_IOPORT, + PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); + sc->iomem = NULL; + } + + mtx_destroy(&sc->lock); + + device_printf(dev, "%s() completed\n", __func__); + + return (0); +} + +static int +ptnet_suspend(device_t dev) +{ + struct ptnet_softc *sc; + + sc = device_get_softc(dev); + (void)sc; + + return (0); +} + +static int +ptnet_resume(device_t dev) +{ + struct ptnet_softc *sc; + + sc = device_get_softc(dev); + (void)sc; + + return (0); +} + +static int +ptnet_shutdown(device_t dev) +{ + /* + * Suspend already does all of what we need to + * do here; we just never expect to be resumed. + */ + return (ptnet_suspend(dev)); +} + +static int +ptnet_irqs_init(struct ptnet_softc *sc) +{ + int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); + int nvecs = sc->num_rings; + device_t dev = sc->dev; + int err = ENOSPC; + int cpu_cur; + int i; + + if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { + device_printf(dev, "Could not find MSI-X capability\n"); + return (ENXIO); + } + + sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + if (sc->msix_mem == NULL) { + device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); + return (ENXIO); + } + + if (pci_msix_count(dev) < nvecs) { + device_printf(dev, "Not enough MSI-X vectors\n"); + goto err_path; + } + + err = pci_alloc_msix(dev, &nvecs); + if (err) { + device_printf(dev, "Failed to allocate MSI-X vectors\n"); + goto err_path; + } + + for (i = 0; i < nvecs; i++) { + struct ptnet_queue *pq = sc->queues + i; + + rid = i + 1; + pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_ACTIVE); + if (pq->irq == NULL) { + device_printf(dev, "Failed to allocate interrupt " + "for queue #%d\n", i); + err = ENOSPC; + goto err_path; + } + } + + cpu_cur = CPU_FIRST(); + for (i = 0; i < nvecs; i++) { + struct ptnet_queue *pq = sc->queues + i; + void (*handler)(void *) = ptnet_tx_intr; + + if (i >= sc->num_tx_rings) { + handler = ptnet_rx_intr; + } + err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, + NULL /* intr_filter */, handler, + pq, &pq->cookie); + if (err) { + device_printf(dev, "Failed to register intr handler " + "for queue #%d\n", i); + goto err_path; + } + + bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); +#if 0 + bus_bind_intr(sc->dev, pq->irq, cpu_cur); +#endif + cpu_cur = CPU_NEXT(cpu_cur); + } + + device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); + + cpu_cur = CPU_FIRST(); + for (i = 0; i < nvecs; i++) { + struct ptnet_queue *pq = sc->queues + i; + static void (*handler)(void *context, int pending); + + handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; + + TASK_INIT(&pq->task, 0, handler, pq); + pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, + taskqueue_thread_enqueue, &pq->taskq); + taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", + device_get_nameunit(sc->dev), cpu_cur); + cpu_cur = CPU_NEXT(cpu_cur); + } + + return 0; +err_path: + ptnet_irqs_fini(sc); + return err; +} + +static void +ptnet_irqs_fini(struct ptnet_softc *sc) +{ + device_t dev = sc->dev; + int i; + + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + + if (pq->taskq) { + taskqueue_free(pq->taskq); + pq->taskq = NULL; + } + + if (pq->cookie) { + bus_teardown_intr(dev, pq->irq, pq->cookie); + pq->cookie = NULL; + } + + if (pq->irq) { + bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); + pq->irq = NULL; + } + } + + if (sc->msix_mem) { + pci_release_msi(dev); + + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), + sc->msix_mem); + sc->msix_mem = NULL; + } +} + +static void +ptnet_init(void *opaque) +{ + struct ptnet_softc *sc = opaque; + + PTNET_CORE_LOCK(sc); + ptnet_init_locked(sc); + PTNET_CORE_UNLOCK(sc); +} + +static int +ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + device_t dev = sc->dev; + struct ifreq *ifr = (struct ifreq *)data; + int mask, err = 0; + + switch (cmd) { + case SIOCSIFFLAGS: + device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); + PTNET_CORE_LOCK(sc); + if (ifp->if_flags & IFF_UP) { + /* Network stack wants the iff to be up. */ + err = ptnet_init_locked(sc); + } else { + /* Network stack wants the iff to be down. */ + err = ptnet_stop(sc); + } + /* We don't need to do nothing to support IFF_PROMISC, + * since that is managed by the backend port. */ + PTNET_CORE_UNLOCK(sc); + break; + + case SIOCSIFCAP: + device_printf(dev, "SIOCSIFCAP %x %x\n", + ifr->ifr_reqcap, ifp->if_capenable); + mask = ifr->ifr_reqcap ^ ifp->if_capenable; +#ifdef DEVICE_POLLING + if (mask & IFCAP_POLLING) { + struct ptnet_queue *pq; + int i; + + if (ifr->ifr_reqcap & IFCAP_POLLING) { + err = ether_poll_register(ptnet_poll, ifp); + if (err) { + break; + } + /* Stop queues and sync with taskqueues. */ + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + for (i = 0; i < sc->num_rings; i++) { + pq = sc-> queues + i; + /* Make sure the worker sees the + * IFF_DRV_RUNNING down. */ + PTNET_Q_LOCK(pq); + pq->ptring->guest_need_kick = 0; + PTNET_Q_UNLOCK(pq); + /* Wait for rescheduling to finish. */ + if (pq->taskq) { + taskqueue_drain(pq->taskq, + &pq->task); + } + } + ifp->if_drv_flags |= IFF_DRV_RUNNING; + } else { + err = ether_poll_deregister(ifp); + for (i = 0; i < sc->num_rings; i++) { + pq = sc-> queues + i; + PTNET_Q_LOCK(pq); + pq->ptring->guest_need_kick = 1; + PTNET_Q_UNLOCK(pq); + } + } + } +#endif /* DEVICE_POLLING */ + ifp->if_capenable = ifr->ifr_reqcap; + break; + + case SIOCSIFMTU: + /* We support any reasonable MTU. */ + if (ifr->ifr_mtu < ETHERMIN || + ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { + err = EINVAL; + } else { + PTNET_CORE_LOCK(sc); + ifp->if_mtu = ifr->ifr_mtu; + PTNET_CORE_UNLOCK(sc); + } + break; + + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); + break; + + default: + err = ether_ioctl(ifp, cmd, data); + break; + } + + return err; +} + +static int +ptnet_init_locked(struct ptnet_softc *sc) +{ + if_t ifp = sc->ifp; + struct netmap_adapter *na_dr = &sc->ptna->dr.up; + struct netmap_adapter *na_nm = &sc->ptna->hwup.up; + unsigned int nm_buf_size; + int ret; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + return 0; /* nothing to do */ + } + + device_printf(sc->dev, "%s\n", __func__); + + /* Translate offload capabilities according to if_capenable. */ + ifp->if_hwassist = 0; + if (ifp->if_capenable & IFCAP_TXCSUM) + ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; + if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) + ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; + if (ifp->if_capenable & IFCAP_TSO4) + ifp->if_hwassist |= CSUM_IP_TSO; + if (ifp->if_capenable & IFCAP_TSO6) + ifp->if_hwassist |= CSUM_IP6_TSO; + + /* + * Prepare the interface for netmap mode access. + */ + netmap_update_config(na_dr); + + ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); + if (ret) { + device_printf(sc->dev, "netmap_mem_finalize() failed\n"); + return ret; + } + + if (sc->ptna->backend_regifs == 0) { + ret = ptnet_nm_krings_create(na_nm); + if (ret) { + device_printf(sc->dev, "ptnet_nm_krings_create() " + "failed\n"); + goto err_mem_finalize; + } + + ret = netmap_mem_rings_create(na_dr); + if (ret) { + device_printf(sc->dev, "netmap_mem_rings_create() " + "failed\n"); + goto err_rings_create; + } + + ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); + if (ret) { + device_printf(sc->dev, "netmap_mem_get_lut() " + "failed\n"); + goto err_get_lut; + } + } + + ret = ptnet_nm_register(na_dr, 1 /* on */); + if (ret) { + goto err_register; + } + + nm_buf_size = NETMAP_BUF_SIZE(na_dr); + + KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); + sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; + device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, + sc->min_tx_space); +#ifdef PTNETMAP_STATS + callout_reset(&sc->tick, hz, ptnet_tick, sc); +#endif + + ifp->if_drv_flags |= IFF_DRV_RUNNING; + + return 0; + +err_register: + memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); +err_get_lut: + netmap_mem_rings_delete(na_dr); +err_rings_create: + ptnet_nm_krings_delete(na_nm); +err_mem_finalize: + netmap_mem_deref(na_dr->nm_mem, na_dr); + + return ret; +} + +/* To be called under core lock. */ +static int +ptnet_stop(struct ptnet_softc *sc) +{ + if_t ifp = sc->ifp; + struct netmap_adapter *na_dr = &sc->ptna->dr.up; + struct netmap_adapter *na_nm = &sc->ptna->hwup.up; + int i; + + device_printf(sc->dev, "%s\n", __func__); + + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { + return 0; /* nothing to do */ + } + + /* Clear the driver-ready flag, and synchronize with all the queues, + * so that after this loop we are sure nobody is working anymore with + * the device. This scheme is taken from the vtnet driver. */ + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + callout_stop(&sc->tick); + for (i = 0; i < sc->num_rings; i++) { + PTNET_Q_LOCK(sc->queues + i); + PTNET_Q_UNLOCK(sc->queues + i); + } + + ptnet_nm_register(na_dr, 0 /* off */); + + if (sc->ptna->backend_regifs == 0) { + netmap_mem_rings_delete(na_dr); + ptnet_nm_krings_delete(na_nm); + } + netmap_mem_deref(na_dr->nm_mem, na_dr); + + return 0; +} + +static void +ptnet_qflush(if_t ifp) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + int i; + + /* Flush all the bufrings and do the interface flush. */ + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + struct mbuf *m; + + PTNET_Q_LOCK(pq); + if (pq->bufring) { + while ((m = buf_ring_dequeue_sc(pq->bufring))) { + m_freem(m); + } + } + PTNET_Q_UNLOCK(pq); + } + + if_qflush(ifp); +} + +static int +ptnet_media_change(if_t ifp) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + struct ifmedia *ifm = &sc->media; + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { + return EINVAL; + } + + return 0; +} + +#if __FreeBSD_version >= 1100000 +static uint64_t +ptnet_get_counter(if_t ifp, ift_counter cnt) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + struct ptnet_queue_stats stats[2]; + int i; + + /* Accumulate statistics over the queues. */ + memset(stats, 0, sizeof(stats)); + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + int idx = (i < sc->num_tx_rings) ? 0 : 1; + + stats[idx].packets += pq->stats.packets; + stats[idx].bytes += pq->stats.bytes; + stats[idx].errors += pq->stats.errors; + stats[idx].iqdrops += pq->stats.iqdrops; + stats[idx].mcasts += pq->stats.mcasts; + } + + switch (cnt) { + case IFCOUNTER_IPACKETS: + return (stats[1].packets); + case IFCOUNTER_IQDROPS: + return (stats[1].iqdrops); + case IFCOUNTER_IERRORS: + return (stats[1].errors); + case IFCOUNTER_OPACKETS: + return (stats[0].packets); + case IFCOUNTER_OBYTES: + return (stats[0].bytes); + case IFCOUNTER_OMCASTS: + return (stats[0].mcasts); + default: + return (if_get_counter_default(ifp, cnt)); + } +} +#endif + + +#ifdef PTNETMAP_STATS +/* Called under core lock. */ +static void +ptnet_tick(void *opaque) +{ + struct ptnet_softc *sc = opaque; + int i; + + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + struct ptnet_queue_stats cur = pq->stats; + struct timeval now; + unsigned int delta; + + microtime(&now); + delta = now.tv_usec - sc->last_ts.tv_usec + + (now.tv_sec - sc->last_ts.tv_sec) * 1000000; + delta /= 1000; /* in milliseconds */ + + if (delta == 0) + continue; + + device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " + "intr %lu\n", i, delta, + (cur.packets - pq->last_stats.packets), + (cur.kicks - pq->last_stats.kicks), + (cur.intrs - pq->last_stats.intrs)); + pq->last_stats = cur; + } + microtime(&sc->last_ts); + callout_schedule(&sc->tick, hz); +} +#endif /* PTNETMAP_STATS */ + +static void +ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) +{ + /* We are always active, as the backend netmap port is + * always open in netmap mode. */ + ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; + ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; +} + +static uint32_t +ptnet_nm_ptctl(if_t ifp, uint32_t cmd) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + int ret; + + bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); + ret = bus_read_4(sc->iomem, PTNET_IO_PTSTS); + device_printf(sc->dev, "PTCTL %u, ret %u\n", cmd, ret); + + return ret; +} + +static int +ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, + unsigned *rxr, unsigned *rxd) +{ + struct ptnet_softc *sc = if_getsoftc(na->ifp); + + *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); + *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); + *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); + *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); + + device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", + *txr, *rxr, *txd, *rxd); + + return 0; +} + +static void +ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) +{ + int i; + + /* Sync krings from the host, reading from + * CSB. */ + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_ring *ptring = sc->queues[i].ptring; + struct netmap_kring *kring; + + if (i < na->num_tx_rings) { + kring = na->tx_rings + i; + } else { + kring = na->rx_rings + i - na->num_tx_rings; + } + kring->rhead = kring->ring->head = ptring->head; + kring->rcur = kring->ring->cur = ptring->cur; + kring->nr_hwcur = ptring->hwcur; + kring->nr_hwtail = kring->rtail = + kring->ring->tail = ptring->hwtail; + + ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, + ptring->hwcur, ptring->head, ptring->cur, + ptring->hwtail); + ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", + t, i, kring->nr_hwcur, kring->rhead, kring->rcur, + kring->ring->head, kring->ring->cur, kring->nr_hwtail, + kring->rtail, kring->ring->tail); + } +} + +static void +ptnet_update_vnet_hdr(struct ptnet_softc *sc) +{ + sc->vnet_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; + sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; + bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, sc->vnet_hdr_len); +} + +static int +ptnet_nm_register(struct netmap_adapter *na, int onoff) +{ + /* device-specific */ + if_t ifp = na->ifp; + struct ptnet_softc *sc = if_getsoftc(ifp); + int native = (na == &sc->ptna->hwup.up); + struct ptnet_queue *pq; + enum txrx t; + int ret = 0; + int i; + + if (!onoff) { + sc->ptna->backend_regifs--; + } + + /* If this is the last netmap client, guest interrupt enable flags may + * be in arbitrary state. Since these flags are going to be used also + * by the netdevice driver, we have to make sure to start with + * notifications enabled. Also, schedule NAPI to flush pending packets + * in the RX rings, since we will not receive further interrupts + * until these will be processed. */ + if (native && !onoff && na->active_fds == 0) { + D("Exit netmap mode, re-enable interrupts"); + for (i = 0; i < sc->num_rings; i++) { + pq = sc->queues + i; + pq->ptring->guest_need_kick = 1; + } + } + + if (onoff) { + if (sc->ptna->backend_regifs == 0) { + /* Initialize notification enable fields in the CSB. */ + for (i = 0; i < sc->num_rings; i++) { + pq = sc->queues + i; + pq->ptring->host_need_kick = 1; + pq->ptring->guest_need_kick = + (!(ifp->if_capenable & IFCAP_POLLING) + && i >= sc->num_tx_rings); + } + + /* Set the virtio-net header length. */ + ptnet_update_vnet_hdr(sc); + + /* Make sure the host adapter passed through is ready + * for txsync/rxsync. */ + ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_REGIF); + if (ret) { + return ret; + } + } + + /* Sync from CSB must be done after REGIF PTCTL. Skip this + * step only if this is a netmap client and it is not the + * first one. */ + if ((!native && sc->ptna->backend_regifs == 0) || + (native && na->active_fds == 0)) { + ptnet_sync_from_csb(sc, na); + } + + /* If not native, don't call nm_set_native_flags, since we don't want + * to replace if_transmit method, nor set NAF_NETMAP_ON */ + if (native) { + for_rx_tx(t) { + for (i = 0; i <= nma_get_nrings(na, t); i++) { + struct netmap_kring *kring = &NMR(na, t)[i]; + + if (nm_kring_pending_on(kring)) { + kring->nr_mode = NKR_NETMAP_ON; + } + } + } + nm_set_native_flags(na); + } + + } else { + if (native) { + nm_clear_native_flags(na); + for_rx_tx(t) { + for (i = 0; i <= nma_get_nrings(na, t); i++) { + struct netmap_kring *kring = &NMR(na, t)[i]; + + if (nm_kring_pending_off(kring)) { + kring->nr_mode = NKR_NETMAP_OFF; + } + } + } + } + + /* Sync from CSB must be done before UNREGIF PTCTL, on the last + * netmap client. */ + if (native && na->active_fds == 0) { + ptnet_sync_from_csb(sc, na); + } + + if (sc->ptna->backend_regifs == 0) { + ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_UNREGIF); + } + } + + if (onoff) { + sc->ptna->backend_regifs++; + } + + return ret; +} + +static int +ptnet_nm_txsync(struct netmap_kring *kring, int flags) +{ + struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); + struct ptnet_queue *pq = sc->queues + kring->ring_id; + bool notify; + + notify = netmap_pt_guest_txsync(pq->ptring, kring, flags); + if (notify) { + ptnet_kick(pq); + } + + return 0; +} + +static int +ptnet_nm_rxsync(struct netmap_kring *kring, int flags) +{ + struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); + struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; + bool notify; + + notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags); + if (notify) { + ptnet_kick(pq); + } + + return 0; +} + +static void +ptnet_tx_intr(void *opaque) +{ + struct ptnet_queue *pq = opaque; + struct ptnet_softc *sc = pq->sc; + + DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); +#ifdef PTNETMAP_STATS + pq->stats.intrs ++; +#endif /* PTNETMAP_STATS */ + + if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { + return; + } + + /* Schedule the tasqueue to flush process transmissions requests. + * However, vtnet, if_em and if_igb just call ptnet_transmit() here, + * at least when using MSI-X interrupts. The if_em driver, instead + * schedule taskqueue when using legacy interrupts. */ + taskqueue_enqueue(pq->taskq, &pq->task); +} + +static void +ptnet_rx_intr(void *opaque) +{ + struct ptnet_queue *pq = opaque; + struct ptnet_softc *sc = pq->sc; + unsigned int unused; + + DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); +#ifdef PTNETMAP_STATS + pq->stats.intrs ++; +#endif /* PTNETMAP_STATS */ + + if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { + return; + } + + /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, + * receive-side processing is executed directly in the interrupt + * service routine. Alternatively, we may schedule the taskqueue. */ + ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); +} + +/* The following offloadings-related functions are taken from the vtnet + * driver, but the same functionality is required for the ptnet driver. + * As a temporary solution, I copied this code from vtnet and I started + * to generalize it (taking away driver-specific statistic accounting), + * making as little modifications as possible. + * In the future we need to share these functions between vtnet and ptnet. + */ +static int +ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) +{ + struct ether_vlan_header *evh; + int offset; + + evh = mtod(m, struct ether_vlan_header *); + if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + /* BMV: We should handle nested VLAN tags too. */ + *etype = ntohs(evh->evl_proto); + offset = sizeof(struct ether_vlan_header); + } else { + *etype = ntohs(evh->evl_encap_proto); + offset = sizeof(struct ether_header); + } + + switch (*etype) { +#if defined(INET) + case ETHERTYPE_IP: { + struct ip *ip, iphdr; + if (__predict_false(m->m_len < offset + sizeof(struct ip))) { + m_copydata(m, offset, sizeof(struct ip), + (caddr_t) &iphdr); + ip = &iphdr; + } else + ip = (struct ip *)(m->m_data + offset); + *proto = ip->ip_p; + *start = offset + (ip->ip_hl << 2); + break; + } +#endif +#if defined(INET6) + case ETHERTYPE_IPV6: + *proto = -1; + *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); + /* Assert the network stack sent us a valid packet. */ + KASSERT(*start > offset, + ("%s: mbuf %p start %d offset %d proto %d", __func__, m, + *start, offset, *proto)); + break; +#endif + default: + /* Here we should increment the tx_csum_bad_ethtype counter. */ + return (EINVAL); + } + + return (0); +} + +static int +ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, + int offset, bool allow_ecn, struct virtio_net_hdr *hdr) +{ + static struct timeval lastecn; + static int curecn; + struct tcphdr *tcp, tcphdr; + + if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { + m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); + tcp = &tcphdr; + } else + tcp = (struct tcphdr *)(m->m_data + offset); + + hdr->hdr_len = offset + (tcp->th_off << 2); + hdr->gso_size = m->m_pkthdr.tso_segsz; + hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : + VIRTIO_NET_HDR_GSO_TCPV6; + + if (tcp->th_flags & TH_CWR) { + /* + * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, + * ECN support is not on a per-interface basis, but globally via + * the net.inet.tcp.ecn.enable sysctl knob. The default is off. + */ + if (!allow_ecn) { + if (ppsratecheck(&lastecn, &curecn, 1)) + if_printf(ifp, + "TSO with ECN not negotiated with host\n"); + return (ENOTSUP); + } + hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; + } + + /* Here we should increment tx_tso counter. */ + + return (0); +} + +static struct mbuf * +ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, + struct virtio_net_hdr *hdr) +{ + int flags, etype, csum_start, proto, error; + + flags = m->m_pkthdr.csum_flags; + + error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); + if (error) + goto drop; + + if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || + (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { + /* + * We could compare the IP protocol vs the CSUM_ flag too, + * but that really should not be necessary. + */ + hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; + hdr->csum_start = csum_start; + hdr->csum_offset = m->m_pkthdr.csum_data; + /* Here we should increment the tx_csum counter. */ + } + + if (flags & CSUM_TSO) { + if (__predict_false(proto != IPPROTO_TCP)) { + /* Likely failed to correctly parse the mbuf. + * Here we should increment the tx_tso_not_tcp + * counter. */ + goto drop; + } + + KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, + ("%s: mbuf %p TSO without checksum offload %#x", + __func__, m, flags)); + + error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, + allow_ecn, hdr); + if (error) + goto drop; + } + + return (m); + +drop: + m_freem(m); + return (NULL); +} + +static void +ptnet_vlan_tag_remove(struct mbuf *m) +{ + struct ether_vlan_header *evh; + + evh = mtod(m, struct ether_vlan_header *); + m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); + m->m_flags |= M_VLANTAG; + + /* Strip the 802.1Q header. */ + bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, + ETHER_HDR_LEN - ETHER_TYPE_LEN); + m_adj(m, ETHER_VLAN_ENCAP_LEN); +} + +/* + * Use the checksum offset in the VirtIO header to set the + * correct CSUM_* flags. + */ +static int +ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, + struct virtio_net_hdr *hdr) +{ +#if defined(INET) || defined(INET6) + int offset = hdr->csum_start + hdr->csum_offset; +#endif + + /* Only do a basic sanity check on the offset. */ + switch (eth_type) { +#if defined(INET) + case ETHERTYPE_IP: + if (__predict_false(offset < ip_start + sizeof(struct ip))) + return (1); + break; +#endif +#if defined(INET6) + case ETHERTYPE_IPV6: + if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) + return (1); + break; +#endif + default: + /* Here we should increment the rx_csum_bad_ethtype counter. */ + return (1); + } + + /* + * Use the offset to determine the appropriate CSUM_* flags. This is + * a bit dirty, but we can get by with it since the checksum offsets + * happen to be different. We assume the host host does not do IPv4 + * header checksum offloading. + */ + switch (hdr->csum_offset) { + case offsetof(struct udphdr, uh_sum): + case offsetof(struct tcphdr, th_sum): + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; + break; + case offsetof(struct sctphdr, checksum): + m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; + break; + default: + /* Here we should increment the rx_csum_bad_offset counter. */ + return (1); + } + + return (0); +} + +static int +ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, + struct virtio_net_hdr *hdr) +{ + int offset, proto; + + switch (eth_type) { +#if defined(INET) + case ETHERTYPE_IP: { + struct ip *ip; + if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) + return (1); + ip = (struct ip *)(m->m_data + ip_start); + proto = ip->ip_p; + offset = ip_start + (ip->ip_hl << 2); + break; + } +#endif +#if defined(INET6) + case ETHERTYPE_IPV6: + if (__predict_false(m->m_len < ip_start + + sizeof(struct ip6_hdr))) + return (1); + offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); + if (__predict_false(offset < 0)) + return (1); + break; +#endif + default: + /* Here we should increment the rx_csum_bad_ethtype counter. */ + return (1); + } + + switch (proto) { + case IPPROTO_TCP: + if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) + return (1); + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; + break; + case IPPROTO_UDP: + if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) + return (1); + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + m->m_pkthdr.csum_data = 0xFFFF; + break; + case IPPROTO_SCTP: + if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) + return (1); + m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; + break; + default: + /* + * For the remaining protocols, FreeBSD does not support + * checksum offloading, so the checksum will be recomputed. + */ +#if 0 + if_printf(ifp, "cksum offload of unsupported " + "protocol eth_type=%#x proto=%d csum_start=%d " + "csum_offset=%d\n", __func__, eth_type, proto, + hdr->csum_start, hdr->csum_offset); +#endif + break; + } + + return (0); +} + +/* + * Set the appropriate CSUM_* flags. Unfortunately, the information + * provided is not directly useful to us. The VirtIO header gives the + * offset of the checksum, which is all Linux needs, but this is not + * how FreeBSD does things. We are forced to peek inside the packet + * a bit. + * + * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD + * could accept the offsets and let the stack figure it out. + */ +static int +ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) +{ + struct ether_header *eh; + struct ether_vlan_header *evh; + uint16_t eth_type; + int offset, error; + + eh = mtod(m, struct ether_header *); + eth_type = ntohs(eh->ether_type); + if (eth_type == ETHERTYPE_VLAN) { + /* BMV: We should handle nested VLAN tags too. */ + evh = mtod(m, struct ether_vlan_header *); + eth_type = ntohs(evh->evl_proto); + offset = sizeof(struct ether_vlan_header); + } else + offset = sizeof(struct ether_header); + + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) + error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); + else + error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); + + return (error); +} +/* End of offloading-related functions to be shared with vtnet. */ + +static inline void +ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring) +{ + struct netmap_ring *ring = kring->ring; + + /* Update hwcur and hwtail as known by the host. */ + ptnetmap_guest_read_kring_csb(ptring, kring); + + /* nm_sync_finalize */ + ring->tail = kring->rtail = kring->nr_hwtail; +} + +static void +ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, + unsigned int head, unsigned int sync_flags) +{ + struct netmap_ring *ring = kring->ring; + struct ptnet_ring *ptring = pq->ptring; + + /* Some packets have been pushed to the netmap ring. We have + * to tell the host to process the new packets, updating cur + * and head in the CSB. */ + ring->head = ring->cur = head; + + /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ + kring->rcur = kring->rhead = head; + + ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead); + + /* Kick the host if needed. */ + if (NM_ACCESS_ONCE(ptring->host_need_kick)) { + ptring->sync_flags = sync_flags; + ptnet_kick(pq); + } +} + +#define PTNET_TX_NOSPACE(_h, _k, _min) \ + ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ + (_k)->rtail - (_h)) < (_min) + +/* This function may be called by the network stack, or by + * by the taskqueue thread. */ +static int +ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, + bool may_resched) +{ + struct ptnet_softc *sc = pq->sc; + bool have_vnet_hdr = sc->vnet_hdr_len; + struct netmap_adapter *na = &sc->ptna->dr.up; + if_t ifp = sc->ifp; + unsigned int batch_count = 0; + struct ptnet_ring *ptring; + struct netmap_kring *kring; + struct netmap_ring *ring; + struct netmap_slot *slot; + unsigned int count = 0; + unsigned int minspace; + unsigned int head; + unsigned int lim; + struct mbuf *mhead; + struct mbuf *mf; + int nmbuf_bytes; + uint8_t *nmbuf; + + if (!PTNET_Q_TRYLOCK(pq)) { + /* We failed to acquire the lock, schedule the taskqueue. */ + RD(1, "Deferring TX work"); + if (may_resched) { + taskqueue_enqueue(pq->taskq, &pq->task); + } + + return 0; + } + + if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { + PTNET_Q_UNLOCK(pq); + RD(1, "Interface is down"); + return ENETDOWN; + } + + ptring = pq->ptring; + kring = na->tx_rings + pq->kring_id; + ring = kring->ring; + lim = kring->nkr_num_slots - 1; + head = ring->head; + minspace = sc->min_tx_space; + + while (count < budget) { + if (PTNET_TX_NOSPACE(head, kring, minspace)) { + /* We ran out of slot, let's see if the host has + * freed up some, by reading hwcur and hwtail from + * the CSB. */ + ptnet_sync_tail(ptring, kring); + + if (PTNET_TX_NOSPACE(head, kring, minspace)) { + /* Still no slots available. Reactivate the + * interrupts so that we can be notified + * when some free slots are made available by + * the host. */ + ptring->guest_need_kick = 1; + + /* Double-check. */ + ptnet_sync_tail(ptring, kring); + if (likely(PTNET_TX_NOSPACE(head, kring, + minspace))) { + break; + } + + RD(1, "Found more slots by doublecheck"); + /* More slots were freed before reactivating + * the interrupts. */ + ptring->guest_need_kick = 0; + } + } + + mhead = drbr_peek(ifp, pq->bufring); + if (!mhead) { + break; + } + + /* Initialize transmission state variables. */ + slot = ring->slot + head; + nmbuf = NMB(na, slot); + nmbuf_bytes = 0; + + /* If needed, prepare the virtio-net header at the beginning + * of the first slot. */ + if (have_vnet_hdr) { + struct virtio_net_hdr *vh = + (struct virtio_net_hdr *)nmbuf; + + /* For performance, we could replace this memset() with + * two 8-bytes-wide writes. */ + memset(nmbuf, 0, PTNET_HDR_SIZE); + if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { + mhead = ptnet_tx_offload(ifp, mhead, false, + vh); + if (unlikely(!mhead)) { + /* Packet dropped because errors + * occurred while preparing the vnet + * header. Let's go ahead with the next + * packet. */ + pq->stats.errors ++; + drbr_advance(ifp, pq->bufring); + continue; + } + } + ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " + "csum_start %u csum_ofs %u hdr_len = %u " + "gso_size %u gso_type %x", __func__, + mhead->m_pkthdr.csum_flags, vh->flags, + vh->csum_start, vh->csum_offset, vh->hdr_len, + vh->gso_size, vh->gso_type); + + nmbuf += PTNET_HDR_SIZE; + nmbuf_bytes += PTNET_HDR_SIZE; + } + + for (mf = mhead; mf; mf = mf->m_next) { + uint8_t *mdata = mf->m_data; + int mlen = mf->m_len; + + for (;;) { + int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; + + if (mlen < copy) { + copy = mlen; + } + memcpy(nmbuf, mdata, copy); + + mdata += copy; + mlen -= copy; + nmbuf += copy; + nmbuf_bytes += copy; + + if (!mlen) { + break; + } + + slot->len = nmbuf_bytes; + slot->flags = NS_MOREFRAG; + + head = nm_next(head, lim); + KASSERT(head != ring->tail, + ("Unexpectedly run out of TX space")); + slot = ring->slot + head; + nmbuf = NMB(na, slot); + nmbuf_bytes = 0; + } + } + + /* Complete last slot and update head. */ + slot->len = nmbuf_bytes; + slot->flags = 0; + head = nm_next(head, lim); + + /* Consume the packet just processed. */ + drbr_advance(ifp, pq->bufring); + + /* Copy the packet to listeners. */ + ETHER_BPF_MTAP(ifp, mhead); + + pq->stats.packets ++; + pq->stats.bytes += mhead->m_pkthdr.len; + if (mhead->m_flags & M_MCAST) { + pq->stats.mcasts ++; + } + + m_freem(mhead); + + count ++; + if (++batch_count == PTNET_TX_BATCH) { + ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); + batch_count = 0; + } + } + + if (batch_count) { + ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); + } + + if (count >= budget && may_resched) { + DBG(RD(1, "out of budget: resched, %d mbufs pending\n", + drbr_inuse(ifp, pq->bufring))); + taskqueue_enqueue(pq->taskq, &pq->task); + } + + PTNET_Q_UNLOCK(pq); + + return count; +} + +static int +ptnet_transmit(if_t ifp, struct mbuf *m) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + struct ptnet_queue *pq; + unsigned int queue_idx; + int err; + + DBG(device_printf(sc->dev, "transmit %p\n", m)); + + /* Insert 802.1Q header if needed. */ + if (m->m_flags & M_VLANTAG) { + m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); + if (m == NULL) { + return ENOBUFS; + } + m->m_flags &= ~M_VLANTAG; + } + + /* Get the flow-id if available. */ + queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? + m->m_pkthdr.flowid : curcpu; + + if (unlikely(queue_idx >= sc->num_tx_rings)) { + queue_idx %= sc->num_tx_rings; + } + + pq = sc->queues + queue_idx; + + err = drbr_enqueue(ifp, pq->bufring, m); + if (err) { + /* ENOBUFS when the bufring is full */ + RD(1, "%s: drbr_enqueue() failed %d\n", + __func__, err); + pq->stats.errors ++; + return err; + } + + if (ifp->if_capenable & IFCAP_POLLING) { + /* If polling is on, the transmit queues will be + * drained by the poller. */ + return 0; + } + + err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); + + return (err < 0) ? err : 0; +} + +static unsigned int +ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) +{ + struct netmap_ring *ring = kring->ring; + struct netmap_slot *slot = ring->slot + head; + + for (;;) { + head = nm_next(head, kring->nkr_num_slots - 1); + if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { + break; + } + slot = ring->slot + head; + } + + return head; +} + +static inline struct mbuf * +ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) +{ + uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; + + do { + unsigned int copy; + + if (mtail->m_len == MCLBYTES) { + struct mbuf *mf; + + mf = m_getcl(M_NOWAIT, MT_DATA, 0); + if (unlikely(!mf)) { + return NULL; + } + + mtail->m_next = mf; + mtail = mf; + mdata = mtod(mtail, uint8_t *); + mtail->m_len = 0; + } + + copy = MCLBYTES - mtail->m_len; + if (nmbuf_len < copy) { + copy = nmbuf_len; + } + + memcpy(mdata, nmbuf, copy); + + nmbuf += copy; + nmbuf_len -= copy; + mdata += copy; + mtail->m_len += copy; + } while (nmbuf_len); + + return mtail; +} + +static int +ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) +{ + struct ptnet_softc *sc = pq->sc; + bool have_vnet_hdr = sc->vnet_hdr_len; + struct ptnet_ring *ptring = pq->ptring; + struct netmap_adapter *na = &sc->ptna->dr.up; + struct netmap_kring *kring = na->rx_rings + pq->kring_id; + struct netmap_ring *ring = kring->ring; + unsigned int const lim = kring->nkr_num_slots - 1; + unsigned int head = ring->head; + unsigned int batch_count = 0; + if_t ifp = sc->ifp; + unsigned int count = 0; + + PTNET_Q_LOCK(pq); + + if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { + goto unlock; + } + + kring->nr_kflags &= ~NKR_PENDINTR; + + while (count < budget) { + unsigned int prev_head = head; + struct mbuf *mhead, *mtail; + struct virtio_net_hdr *vh; + struct netmap_slot *slot; + unsigned int nmbuf_len; + uint8_t *nmbuf; +host_sync: + if (head == ring->tail) { + /* We ran out of slot, let's see if the host has + * added some, by reading hwcur and hwtail from + * the CSB. */ + ptnet_sync_tail(ptring, kring); + + if (head == ring->tail) { + /* Still no slots available. Reactivate + * interrupts as they were disabled by the + * host thread right before issuing the + * last interrupt. */ + ptring->guest_need_kick = 1; + + /* Double-check. */ + ptnet_sync_tail(ptring, kring); + if (likely(head == ring->tail)) { + break; + } + ptring->guest_need_kick = 0; + } + } + + /* Initialize ring state variables, possibly grabbing the + * virtio-net header. */ + slot = ring->slot + head; + nmbuf = NMB(na, slot); + nmbuf_len = slot->len; + + vh = (struct virtio_net_hdr *)nmbuf; + if (have_vnet_hdr) { + if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { + /* There is no good reason why host should + * put the header in multiple netmap slots. + * If this is the case, discard. */ + RD(1, "Fragmented vnet-hdr: dropping"); + head = ptnet_rx_discard(kring, head); + pq->stats.iqdrops ++; + goto skip; + } + ND(1, "%s: vnet hdr: flags %x csum_start %u " + "csum_ofs %u hdr_len = %u gso_size %u " + "gso_type %x", __func__, vh->flags, + vh->csum_start, vh->csum_offset, vh->hdr_len, + vh->gso_size, vh->gso_type); + nmbuf += PTNET_HDR_SIZE; + nmbuf_len -= PTNET_HDR_SIZE; + } + + /* Allocate the head of a new mbuf chain. + * We use m_getcl() to allocate an mbuf with standard cluster + * size (MCLBYTES). In the future we could use m_getjcl() + * to choose different sizes. */ + mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + if (unlikely(mhead == NULL)) { + device_printf(sc->dev, "%s: failed to allocate mbuf " + "head\n", __func__); + pq->stats.errors ++; + break; + } + + /* Initialize the mbuf state variables. */ + mhead->m_pkthdr.len = nmbuf_len; + mtail->m_len = 0; + + /* Scan all the netmap slots containing the current packet. */ + for (;;) { + DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " + "len %u, flags %u\n", __func__, + head, ring->tail, slot->len, + slot->flags)); + + mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); + if (unlikely(!mtail)) { + /* Ouch. We ran out of memory while processing + * a packet. We have to restore the previous + * head position, free the mbuf chain, and + * schedule the taskqueue to give the packet + * another chance. */ + device_printf(sc->dev, "%s: failed to allocate" + " mbuf frag, reset head %u --> %u\n", + __func__, head, prev_head); + head = prev_head; + m_freem(mhead); + pq->stats.errors ++; + if (may_resched) { + taskqueue_enqueue(pq->taskq, + &pq->task); + } + goto escape; + } + + /* We have to increment head irrespective of the + * NS_MOREFRAG being set or not. */ + head = nm_next(head, lim); + + if (!(slot->flags & NS_MOREFRAG)) { + break; + } + + if (unlikely(head == ring->tail)) { + /* The very last slot prepared by the host has + * the NS_MOREFRAG set. Drop it and continue + * the outer cycle (to do the double-check). */ + RD(1, "Incomplete packet: dropping"); + m_freem(mhead); + pq->stats.iqdrops ++; + goto host_sync; + } + + slot = ring->slot + head; + nmbuf = NMB(na, slot); + nmbuf_len = slot->len; + mhead->m_pkthdr.len += nmbuf_len; + } + + mhead->m_pkthdr.rcvif = ifp; + mhead->m_pkthdr.csum_flags = 0; + + /* Store the queue idx in the packet header. */ + mhead->m_pkthdr.flowid = pq->kring_id; + M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); + + if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { + struct ether_header *eh; + + eh = mtod(mhead, struct ether_header *); + if (eh->ether_type == htons(ETHERTYPE_VLAN)) { + ptnet_vlan_tag_remove(mhead); + /* + * With the 802.1Q header removed, update the + * checksum starting location accordingly. + */ + if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) + vh->csum_start -= ETHER_VLAN_ENCAP_LEN; + } + } + + if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM + | VIRTIO_NET_HDR_F_DATA_VALID))) { + if (unlikely(ptnet_rx_csum(mhead, vh))) { + m_freem(mhead); + RD(1, "Csum offload error: dropping"); + pq->stats.iqdrops ++; + goto skip; + } + } + + pq->stats.packets ++; + pq->stats.bytes += mhead->m_pkthdr.len; + + PTNET_Q_UNLOCK(pq); + (*ifp->if_input)(ifp, mhead); + PTNET_Q_LOCK(pq); + + if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { + /* The interface has gone down while we didn't + * have the lock. Stop any processing and exit. */ + goto unlock; + } +skip: + count ++; + if (++batch_count == PTNET_RX_BATCH) { + /* Some packets have been pushed to the network stack. + * We need to update the CSB to tell the host about the new + * ring->cur and ring->head (RX buffer refill). */ + ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); + batch_count = 0; + } + } +escape: + if (batch_count) { + ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); + + } + + if (count >= budget && may_resched) { + /* If we ran out of budget or the double-check found new + * slots to process, schedule the taskqueue. */ + DBG(RD(1, "out of budget: resched h %u t %u\n", + head, ring->tail)); + taskqueue_enqueue(pq->taskq, &pq->task); + } +unlock: + PTNET_Q_UNLOCK(pq); + + return count; +} + +static void +ptnet_rx_task(void *context, int pending) +{ + struct ptnet_queue *pq = context; + + DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); + ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); +} + +static void +ptnet_tx_task(void *context, int pending) +{ + struct ptnet_queue *pq = context; + + DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); + ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); +} + +#ifdef DEVICE_POLLING +/* We don't need to handle differently POLL_AND_CHECK_STATUS and + * POLL_ONLY, since we don't have an Interrupt Status Register. */ +static int +ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) +{ + struct ptnet_softc *sc = if_getsoftc(ifp); + unsigned int queue_budget; + unsigned int count = 0; + bool borrow = false; + int i; + + KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); + queue_budget = MAX(budget / sc->num_rings, 1); + RD(1, "Per-queue budget is %d", queue_budget); + + while (budget) { + unsigned int rcnt = 0; + + for (i = 0; i < sc->num_rings; i++) { + struct ptnet_queue *pq = sc->queues + i; + + if (borrow) { + queue_budget = MIN(queue_budget, budget); + if (queue_budget == 0) { + break; + } + } + + if (i < sc->num_tx_rings) { + rcnt += ptnet_drain_transmit_queue(pq, + queue_budget, false); + } else { + rcnt += ptnet_rx_eof(pq, queue_budget, + false); + } + } + + if (!rcnt) { + /* A scan of the queues gave no result, we can + * stop here. */ + break; + } + + if (rcnt > budget) { + /* This may happen when initial budget < sc->num_rings, + * since one packet budget is given to each queue + * anyway. Just pretend we didn't eat "so much". */ + rcnt = budget; + } + count += rcnt; + budget -= rcnt; + borrow = true; + } + + + return count; +} +#endif /* DEVICE_POLLING */ diff --git a/sys/dev/netmap/netmap_pt.c b/sys/dev/netmap/netmap_pt.c new file mode 100644 index 000000000000..53348495c4eb --- /dev/null +++ b/sys/dev/netmap/netmap_pt.c @@ -0,0 +1,1462 @@ +/* + * Copyright (C) 2015 Stefano Garzarella + * Copyright (C) 2016 Vincenzo Maffione + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * common headers + */ +#if defined(__FreeBSD__) +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define usleep_range(_1, _2) +#define usleep_range(_1, _2) \ + pause_sbt("ptnetmap-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE) + +#elif defined(linux) +#include +#endif + +#include +#include +#include +#include + +#ifdef WITH_PTNETMAP_HOST + +/* RX cycle without receive any packets */ +#define PTN_RX_DRY_CYCLES_MAX 10 + +/* Limit Batch TX to half ring. + * Currently disabled, since it does not manage NS_MOREFRAG, which + * results in random drops in the VALE txsync. */ +//#define PTN_TX_BATCH_LIM(_n) ((_n >> 1)) + +/* XXX: avoid nm_*sync_prologue(). XXX-vin: this should go away, + * we should never trust the guest. */ +#define PTN_AVOID_NM_PROLOGUE +//#define BUSY_WAIT + +#define DEBUG /* Enables communication debugging. */ +#ifdef DEBUG +#define DBG(x) x +#else +#define DBG(x) +#endif + + +#undef RATE +//#define RATE /* Enables communication statistics. */ +#ifdef RATE +#define IFRATE(x) x +struct rate_batch_stats { + unsigned long sync; + unsigned long sync_dry; + unsigned long pkt; +}; + +struct rate_stats { + unsigned long gtxk; /* Guest --> Host Tx kicks. */ + unsigned long grxk; /* Guest --> Host Rx kicks. */ + unsigned long htxk; /* Host --> Guest Tx kicks. */ + unsigned long hrxk; /* Host --> Guest Rx Kicks. */ + unsigned long btxwu; /* Backend Tx wake-up. */ + unsigned long brxwu; /* Backend Rx wake-up. */ + struct rate_batch_stats txbs; + struct rate_batch_stats rxbs; +}; + +struct rate_context { + struct timer_list timer; + struct rate_stats new; + struct rate_stats old; +}; + +#define RATE_PERIOD 2 +static void +rate_callback(unsigned long arg) +{ + struct rate_context * ctx = (struct rate_context *)arg; + struct rate_stats cur = ctx->new; + struct rate_batch_stats *txbs = &cur.txbs; + struct rate_batch_stats *rxbs = &cur.rxbs; + struct rate_batch_stats *txbs_old = &ctx->old.txbs; + struct rate_batch_stats *rxbs_old = &ctx->old.rxbs; + uint64_t tx_batch, rx_batch; + unsigned long txpkts, rxpkts; + unsigned long gtxk, grxk; + int r; + + txpkts = txbs->pkt - txbs_old->pkt; + rxpkts = rxbs->pkt - rxbs_old->pkt; + + tx_batch = ((txbs->sync - txbs_old->sync) > 0) ? + txpkts / (txbs->sync - txbs_old->sync): 0; + rx_batch = ((rxbs->sync - rxbs_old->sync) > 0) ? + rxpkts / (rxbs->sync - rxbs_old->sync): 0; + + /* Fix-up gtxk and grxk estimates. */ + gtxk = (cur.gtxk - ctx->old.gtxk) - (cur.btxwu - ctx->old.btxwu); + grxk = (cur.grxk - ctx->old.grxk) - (cur.brxwu - ctx->old.brxwu); + + printk("txpkts = %lu Hz\n", txpkts/RATE_PERIOD); + printk("gtxk = %lu Hz\n", gtxk/RATE_PERIOD); + printk("htxk = %lu Hz\n", (cur.htxk - ctx->old.htxk)/RATE_PERIOD); + printk("btxw = %lu Hz\n", (cur.btxwu - ctx->old.btxwu)/RATE_PERIOD); + printk("rxpkts = %lu Hz\n", rxpkts/RATE_PERIOD); + printk("grxk = %lu Hz\n", grxk/RATE_PERIOD); + printk("hrxk = %lu Hz\n", (cur.hrxk - ctx->old.hrxk)/RATE_PERIOD); + printk("brxw = %lu Hz\n", (cur.brxwu - ctx->old.brxwu)/RATE_PERIOD); + printk("txbatch = %llu avg\n", tx_batch); + printk("rxbatch = %llu avg\n", rx_batch); + printk("\n"); + + ctx->old = cur; + r = mod_timer(&ctx->timer, jiffies + + msecs_to_jiffies(RATE_PERIOD * 1000)); + if (unlikely(r)) + D("[ptnetmap] Error: mod_timer()\n"); +} + +static void +rate_batch_stats_update(struct rate_batch_stats *bf, uint32_t pre_tail, + uint32_t act_tail, uint32_t num_slots) +{ + int n = (int)act_tail - pre_tail; + + if (n) { + if (n < 0) + n += num_slots; + + bf->sync++; + bf->pkt += n; + } else { + bf->sync_dry++; + } +} + +#else /* !RATE */ +#define IFRATE(x) +#endif /* RATE */ + +struct ptnetmap_state { + /* Kthreads. */ + struct nm_kthread **kthreads; + + /* Shared memory with the guest (TX/RX) */ + struct ptnet_ring __user *ptrings; + + bool stopped; + + /* Netmap adapter wrapping the backend. */ + struct netmap_pt_host_adapter *pth_na; + + IFRATE(struct rate_context rate_ctx;) +}; + +static inline void +ptnetmap_kring_dump(const char *title, const struct netmap_kring *kring) +{ + RD(1, "%s - name: %s hwcur: %d hwtail: %d rhead: %d rcur: %d \ + rtail: %d head: %d cur: %d tail: %d", + title, kring->name, kring->nr_hwcur, + kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail, + kring->ring->head, kring->ring->cur, kring->ring->tail); +} + +#if 0 +static inline void +ptnetmap_ring_reinit(struct netmap_kring *kring, uint32_t g_head, uint32_t g_cur) +{ + struct netmap_ring *ring = kring->ring; + + //XXX: trust guest? + ring->head = g_head; + ring->cur = g_cur; + ring->tail = NM_ACCESS_ONCE(kring->nr_hwtail); + + netmap_ring_reinit(kring); + ptnetmap_kring_dump("kring reinit", kring); +} +#endif + +/* + * TX functions to set/get and to handle host/guest kick. + */ + + +/* Enable or disable guest --> host kicks. */ +static inline void +ptring_kick_enable(struct ptnet_ring __user *ptring, uint32_t val) +{ + CSB_WRITE(ptring, host_need_kick, val); +} + +/* Are guest interrupt enabled or disabled? */ +static inline uint32_t +ptring_intr_enabled(struct ptnet_ring __user *ptring) +{ + uint32_t v; + + CSB_READ(ptring, guest_need_kick, v); + + return v; +} + +/* Enable or disable guest interrupts. */ +static inline void +ptring_intr_enable(struct ptnet_ring __user *ptring, uint32_t val) +{ + CSB_WRITE(ptring, guest_need_kick, val); +} + +/* Handle TX events: from the guest or from the backend */ +static void +ptnetmap_tx_handler(void *data) +{ + struct netmap_kring *kring = data; + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)kring->na->na_private; + struct ptnetmap_state *ptns = pth_na->ptns; + struct ptnet_ring __user *ptring; + struct netmap_ring g_ring; /* guest ring pointer, copied from CSB */ + bool more_txspace = false; + struct nm_kthread *kth; + uint32_t num_slots; + int batch; + IFRATE(uint32_t pre_tail); + + if (unlikely(!ptns)) { + D("ERROR ptnetmap state is NULL"); + return; + } + + if (unlikely(ptns->stopped)) { + RD(1, "backend netmap is being stopped"); + return; + } + + if (unlikely(nm_kr_tryget(kring, 1, NULL))) { + D("ERROR nm_kr_tryget()"); + return; + } + + /* This is a guess, to be fixed in the rate callback. */ + IFRATE(ptns->rate_ctx.new.gtxk++); + + /* Get TX ptring pointer from the CSB. */ + ptring = ptns->ptrings + kring->ring_id; + kth = ptns->kthreads[kring->ring_id]; + + num_slots = kring->nkr_num_slots; + g_ring.head = kring->rhead; + g_ring.cur = kring->rcur; + + /* Disable guest --> host notifications. */ + ptring_kick_enable(ptring, 0); + /* Copy the guest kring pointers from the CSB */ + ptnetmap_host_read_kring_csb(ptring, &g_ring, num_slots); + + for (;;) { + /* If guest moves ahead too fast, let's cut the move so + * that we don't exceed our batch limit. */ + batch = g_ring.head - kring->nr_hwcur; + if (batch < 0) + batch += num_slots; + +#ifdef PTN_TX_BATCH_LIM + if (batch > PTN_TX_BATCH_LIM(num_slots)) { + uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots); + + if (head_lim >= num_slots) + head_lim -= num_slots; + ND(1, "batch: %d head: %d head_lim: %d", batch, g_ring.head, + head_lim); + g_ring.head = head_lim; + batch = PTN_TX_BATCH_LIM(num_slots); + } +#endif /* PTN_TX_BATCH_LIM */ + + if (nm_kr_txspace(kring) <= (num_slots >> 1)) { + g_ring.flags |= NAF_FORCE_RECLAIM; + } +#ifndef PTN_AVOID_NM_PROLOGUE + /* Netmap prologue */ + if (unlikely(nm_txsync_prologue(kring, &g_ring) >= num_slots)) { + ptnetmap_ring_reinit(kring, g_ring.head, g_ring.cur); + /* Reenable notifications. */ + ptring_kick_enable(ptring, 1); + break; + } +#else /* PTN_AVOID_NM_PROLOGUE */ + kring->rhead = g_ring.head; + kring->rcur = g_ring.cur; +#endif /* !PTN_AVOID_NM_PROLOGUE */ + if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) { + ptnetmap_kring_dump("pre txsync", kring); + } + + IFRATE(pre_tail = kring->rtail); + if (unlikely(kring->nm_sync(kring, g_ring.flags))) { + /* Reenable notifications. */ + ptring_kick_enable(ptring, 1); + D("ERROR txsync"); + break; + } + + /* + * Finalize + * Copy host hwcur and hwtail into the CSB for the guest sync(), and + * do the nm_sync_finalize. + */ + ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur, + kring->nr_hwtail); + if (kring->rtail != kring->nr_hwtail) { + /* Some more room available in the parent adapter. */ + kring->rtail = kring->nr_hwtail; + more_txspace = true; + } + + IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.txbs, pre_tail, + kring->rtail, num_slots)); + + if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) { + ptnetmap_kring_dump("post txsync", kring); + } + +#ifndef BUSY_WAIT + /* Interrupt the guest if needed. */ + if (more_txspace && ptring_intr_enabled(ptring)) { + /* Disable guest kick to avoid sending unnecessary kicks */ + ptring_intr_enable(ptring, 0); + nm_os_kthread_send_irq(kth); + IFRATE(ptns->rate_ctx.new.htxk++); + more_txspace = false; + } +#endif + /* Read CSB to see if there is more work to do. */ + ptnetmap_host_read_kring_csb(ptring, &g_ring, num_slots); +#ifndef BUSY_WAIT + if (g_ring.head == kring->rhead) { + /* + * No more packets to transmit. We enable notifications and + * go to sleep, waiting for a kick from the guest when new + * new slots are ready for transmission. + */ + usleep_range(1,1); + /* Reenable notifications. */ + ptring_kick_enable(ptring, 1); + /* Doublecheck. */ + ptnetmap_host_read_kring_csb(ptring, &g_ring, num_slots); + if (g_ring.head != kring->rhead) { + /* We won the race condition, there are more packets to + * transmit. Disable notifications and do another cycle */ + ptring_kick_enable(ptring, 0); + continue; + } + break; + } + + if (nm_kr_txempty(kring)) { + /* No more available TX slots. We stop waiting for a notification + * from the backend (netmap_tx_irq). */ + ND(1, "TX ring"); + break; + } +#endif + if (unlikely(ptns->stopped)) { + D("backend netmap is being stopped"); + break; + } + } + + nm_kr_put(kring); + + if (more_txspace && ptring_intr_enabled(ptring)) { + ptring_intr_enable(ptring, 0); + nm_os_kthread_send_irq(kth); + IFRATE(ptns->rate_ctx.new.htxk++); + } +} + +/* + * We need RX kicks from the guest when (tail == head-1), where we wait + * for the guest to refill. + */ +#ifndef BUSY_WAIT +static inline int +ptnetmap_norxslots(struct netmap_kring *kring, uint32_t g_head) +{ + return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head, + kring->nkr_num_slots - 1)); +} +#endif /* !BUSY_WAIT */ + +/* Handle RX events: from the guest or from the backend */ +static void +ptnetmap_rx_handler(void *data) +{ + struct netmap_kring *kring = data; + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)kring->na->na_private; + struct ptnetmap_state *ptns = pth_na->ptns; + struct ptnet_ring __user *ptring; + struct netmap_ring g_ring; /* guest ring pointer, copied from CSB */ + struct nm_kthread *kth; + uint32_t num_slots; + int dry_cycles = 0; + bool some_recvd = false; + IFRATE(uint32_t pre_tail); + + if (unlikely(!ptns || !ptns->pth_na)) { + D("ERROR ptnetmap state %p, ptnetmap host adapter %p", ptns, + ptns ? ptns->pth_na : NULL); + return; + } + + if (unlikely(ptns->stopped)) { + RD(1, "backend netmap is being stopped"); + return; + } + + if (unlikely(nm_kr_tryget(kring, 1, NULL))) { + D("ERROR nm_kr_tryget()"); + return; + } + + /* This is a guess, to be fixed in the rate callback. */ + IFRATE(ptns->rate_ctx.new.grxk++); + + /* Get RX ptring pointer from the CSB. */ + ptring = ptns->ptrings + (pth_na->up.num_tx_rings + kring->ring_id); + kth = ptns->kthreads[pth_na->up.num_tx_rings + kring->ring_id]; + + num_slots = kring->nkr_num_slots; + g_ring.head = kring->rhead; + g_ring.cur = kring->rcur; + + /* Disable notifications. */ + ptring_kick_enable(ptring, 0); + /* Copy the guest kring pointers from the CSB */ + ptnetmap_host_read_kring_csb(ptring, &g_ring, num_slots); + + for (;;) { + uint32_t hwtail; + +#ifndef PTN_AVOID_NM_PROLOGUE + /* Netmap prologue */ + if (unlikely(nm_rxsync_prologue(kring, &g_ring) >= num_slots)) { + ptnetmap_ring_reinit(kring, g_ring.head, g_ring.cur); + /* Reenable notifications. */ + ptring_kick_enable(ptring, 1); + break; + } +#else /* PTN_AVOID_NM_PROLOGUE */ + kring->rhead = g_ring.head; + kring->rcur = g_ring.cur; +#endif /* !PTN_AVOID_NM_PROLOGUE */ + + if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) + ptnetmap_kring_dump("pre rxsync", kring); + + IFRATE(pre_tail = kring->rtail); + + if (unlikely(kring->nm_sync(kring, g_ring.flags))) { + /* Reenable notifications. */ + ptring_kick_enable(ptring, 1); + D("ERROR rxsync()"); + break; + } + /* + * Finalize + * Copy host hwcur and hwtail into the CSB for the guest sync() + */ + hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); + ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur, hwtail); + if (kring->rtail != hwtail) { + kring->rtail = hwtail; + some_recvd = true; + dry_cycles = 0; + } else { + dry_cycles++; + } + + IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.rxbs, pre_tail, + kring->rtail, num_slots)); + + if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) + ptnetmap_kring_dump("post rxsync", kring); + +#ifndef BUSY_WAIT + /* Interrupt the guest if needed. */ + if (some_recvd && ptring_intr_enabled(ptring)) { + /* Disable guest kick to avoid sending unnecessary kicks */ + ptring_intr_enable(ptring, 0); + nm_os_kthread_send_irq(kth); + IFRATE(ptns->rate_ctx.new.hrxk++); + some_recvd = false; + } +#endif + /* Read CSB to see if there is more work to do. */ + ptnetmap_host_read_kring_csb(ptring, &g_ring, num_slots); +#ifndef BUSY_WAIT + if (ptnetmap_norxslots(kring, g_ring.head)) { + /* + * No more slots available for reception. We enable notification and + * go to sleep, waiting for a kick from the guest when new receive + * slots are available. + */ + usleep_range(1,1); + /* Reenable notifications. */ + ptring_kick_enable(ptring, 1); + /* Doublecheck. */ + ptnetmap_host_read_kring_csb(ptring, &g_ring, num_slots); + if (!ptnetmap_norxslots(kring, g_ring.head)) { + /* We won the race condition, more slots are available. Disable + * notifications and do another cycle. */ + ptring_kick_enable(ptring, 0); + continue; + } + break; + } + + hwtail = NM_ACCESS_ONCE(kring->nr_hwtail); + if (unlikely(hwtail == kring->rhead || + dry_cycles >= PTN_RX_DRY_CYCLES_MAX)) { + /* No more packets to be read from the backend. We stop and + * wait for a notification from the backend (netmap_rx_irq). */ + ND(1, "nr_hwtail: %d rhead: %d dry_cycles: %d", + hwtail, kring->rhead, dry_cycles); + break; + } +#endif + if (unlikely(ptns->stopped)) { + D("backend netmap is being stopped"); + break; + } + } + + nm_kr_put(kring); + + /* Interrupt the guest if needed. */ + if (some_recvd && ptring_intr_enabled(ptring)) { + ptring_intr_enable(ptring, 0); + nm_os_kthread_send_irq(kth); + IFRATE(ptns->rate_ctx.new.hrxk++); + } +} + +#ifdef DEBUG +static void +ptnetmap_print_configuration(struct ptnetmap_cfg *cfg) +{ + int k; + + D("[PTN] configuration:"); + D(" CSB ptrings @%p, num_rings=%u, features %08x", cfg->ptrings, + cfg->num_rings, cfg->features); + for (k = 0; k < cfg->num_rings; k++) { + D(" ring #%d: iofd=%llu, irqfd=%llu", k, + (unsigned long long)cfg->entries[k].ioeventfd, + (unsigned long long)cfg->entries[k].irqfd); + } + +} +#endif + +/* Copy actual state of the host ring into the CSB for the guest init */ +static int +ptnetmap_kring_snapshot(struct netmap_kring *kring, struct ptnet_ring __user *ptring) +{ + if(CSB_WRITE(ptring, head, kring->rhead)) + goto err; + if(CSB_WRITE(ptring, cur, kring->rcur)) + goto err; + + if(CSB_WRITE(ptring, hwcur, kring->nr_hwcur)) + goto err; + if(CSB_WRITE(ptring, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail))) + goto err; + + DBG(ptnetmap_kring_dump("ptnetmap_kring_snapshot", kring);) + + return 0; +err: + return EFAULT; +} + +static struct netmap_kring * +ptnetmap_kring(struct netmap_pt_host_adapter *pth_na, int k) +{ + if (k < pth_na->up.num_tx_rings) { + return pth_na->up.tx_rings + k; + } + return pth_na->up.rx_rings + k - pth_na->up.num_tx_rings; +} + +static int +ptnetmap_krings_snapshot(struct netmap_pt_host_adapter *pth_na) +{ + struct ptnetmap_state *ptns = pth_na->ptns; + struct netmap_kring *kring; + unsigned int num_rings; + int err = 0, k; + + num_rings = pth_na->up.num_tx_rings + + pth_na->up.num_rx_rings; + + for (k = 0; k < num_rings; k++) { + kring = ptnetmap_kring(pth_na, k); + err |= ptnetmap_kring_snapshot(kring, ptns->ptrings + k); + } + + return err; +} + +/* + * Functions to create, start and stop the kthreads + */ + +static int +ptnetmap_create_kthreads(struct netmap_pt_host_adapter *pth_na, + struct ptnetmap_cfg *cfg) +{ + struct ptnetmap_state *ptns = pth_na->ptns; + struct nm_kthread_cfg nmk_cfg; + unsigned int num_rings; + int k; + + num_rings = pth_na->up.num_tx_rings + + pth_na->up.num_rx_rings; + + for (k = 0; k < num_rings; k++) { + nmk_cfg.attach_user = 1; /* attach kthread to user process */ + nmk_cfg.worker_private = ptnetmap_kring(pth_na, k); + nmk_cfg.event = *(cfg->entries + k); + nmk_cfg.type = k; + if (k < pth_na->up.num_tx_rings) { + nmk_cfg.worker_fn = ptnetmap_tx_handler; + } else { + nmk_cfg.worker_fn = ptnetmap_rx_handler; + } + + ptns->kthreads[k] = nm_os_kthread_create(&nmk_cfg); + if (ptns->kthreads[k] == NULL) { + goto err; + } + } + + return 0; +err: + for (k = 0; k < num_rings; k++) { + if (ptns->kthreads[k]) { + nm_os_kthread_delete(ptns->kthreads[k]); + ptns->kthreads[k] = NULL; + } + } + return EFAULT; +} + +static int +ptnetmap_start_kthreads(struct netmap_pt_host_adapter *pth_na) +{ + struct ptnetmap_state *ptns = pth_na->ptns; + int num_rings; + int error; + int k; + + if (!ptns) { + D("BUG ptns is NULL"); + return EFAULT; + } + + ptns->stopped = false; + + num_rings = ptns->pth_na->up.num_tx_rings + + ptns->pth_na->up.num_rx_rings; + for (k = 0; k < num_rings; k++) { + //nm_os_kthread_set_affinity(ptns->kthreads[k], xxx); + error = nm_os_kthread_start(ptns->kthreads[k]); + if (error) { + return error; + } + } + + return 0; +} + +static void +ptnetmap_stop_kthreads(struct netmap_pt_host_adapter *pth_na) +{ + struct ptnetmap_state *ptns = pth_na->ptns; + int num_rings; + int k; + + if (!ptns) { + /* Nothing to do. */ + return; + } + + ptns->stopped = true; + + num_rings = ptns->pth_na->up.num_tx_rings + + ptns->pth_na->up.num_rx_rings; + for (k = 0; k < num_rings; k++) { + nm_os_kthread_stop(ptns->kthreads[k]); + } +} + +static struct ptnetmap_cfg * +ptnetmap_read_cfg(struct nmreq *nmr) +{ + uintptr_t *nmr_ptncfg = (uintptr_t *)&nmr->nr_arg1; + struct ptnetmap_cfg *cfg; + struct ptnetmap_cfg tmp; + size_t cfglen; + + if (copyin((const void *)*nmr_ptncfg, &tmp, sizeof(tmp))) { + D("Partial copyin() failed"); + return NULL; + } + + cfglen = sizeof(tmp) + tmp.num_rings * sizeof(struct ptnet_ring_cfg); + cfg = malloc(cfglen, M_DEVBUF, M_NOWAIT | M_ZERO); + if (!cfg) { + return NULL; + } + + if (copyin((const void *)*nmr_ptncfg, cfg, cfglen)) { + D("Full copyin() failed"); + free(cfg, M_DEVBUF); + return NULL; + } + + return cfg; +} + +static int nm_unused_notify(struct netmap_kring *, int); +static int nm_pt_host_notify(struct netmap_kring *, int); + +/* Create ptnetmap state and switch parent adapter to ptnetmap mode. */ +static int +ptnetmap_create(struct netmap_pt_host_adapter *pth_na, + struct ptnetmap_cfg *cfg) +{ + unsigned ft_mask = (PTNETMAP_CFG_FEAT_CSB | PTNETMAP_CFG_FEAT_EVENTFD); + struct ptnetmap_state *ptns; + unsigned int num_rings; + int ret, i; + + /* Check if ptnetmap state is already there. */ + if (pth_na->ptns) { + D("ERROR adapter %p already in ptnetmap mode", pth_na->parent); + return EINVAL; + } + + if ((cfg->features & ft_mask) != ft_mask) { + D("ERROR ptnetmap_cfg(%x) does not contain CSB and EVENTFD", + cfg->features); + return EINVAL; + } + + num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings; + + if (num_rings != cfg->num_rings) { + D("ERROR configuration mismatch, expected %u rings, found %u", + num_rings, cfg->num_rings); + return EINVAL; + } + + ptns = malloc(sizeof(*ptns) + num_rings * sizeof(*ptns->kthreads), + M_DEVBUF, M_NOWAIT | M_ZERO); + if (!ptns) { + return ENOMEM; + } + + ptns->kthreads = (struct nm_kthread **)(ptns + 1); + ptns->stopped = true; + + /* Cross-link data structures. */ + pth_na->ptns = ptns; + ptns->pth_na = pth_na; + + /* Store the CSB address provided by the hypervisor. */ + ptns->ptrings = cfg->ptrings; + + DBG(ptnetmap_print_configuration(cfg)); + + /* Create kthreads */ + if ((ret = ptnetmap_create_kthreads(pth_na, cfg))) { + D("ERROR ptnetmap_create_kthreads()"); + goto err; + } + /* Copy krings state into the CSB for the guest initialization */ + if ((ret = ptnetmap_krings_snapshot(pth_na))) { + D("ERROR ptnetmap_krings_snapshot()"); + goto err; + } + + /* Overwrite parent nm_notify krings callback. */ + pth_na->parent->na_private = pth_na; + pth_na->parent_nm_notify = pth_na->parent->nm_notify; + pth_na->parent->nm_notify = nm_unused_notify; + + for (i = 0; i < pth_na->parent->num_rx_rings; i++) { + pth_na->up.rx_rings[i].save_notify = + pth_na->up.rx_rings[i].nm_notify; + pth_na->up.rx_rings[i].nm_notify = nm_pt_host_notify; + } + for (i = 0; i < pth_na->parent->num_tx_rings; i++) { + pth_na->up.tx_rings[i].save_notify = + pth_na->up.tx_rings[i].nm_notify; + pth_na->up.tx_rings[i].nm_notify = nm_pt_host_notify; + } + +#ifdef RATE + memset(&ptns->rate_ctx, 0, sizeof(ptns->rate_ctx)); + setup_timer(&ptns->rate_ctx.timer, &rate_callback, + (unsigned long)&ptns->rate_ctx); + if (mod_timer(&ptns->rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) + D("[ptn] Error: mod_timer()\n"); +#endif + + DBG(D("[%s] ptnetmap configuration DONE", pth_na->up.name)); + + return 0; + +err: + pth_na->ptns = NULL; + free(ptns, M_DEVBUF); + return ret; +} + +/* Switch parent adapter back to normal mode and destroy + * ptnetmap state. */ +static void +ptnetmap_delete(struct netmap_pt_host_adapter *pth_na) +{ + struct ptnetmap_state *ptns = pth_na->ptns; + int num_rings; + int i; + + if (!ptns) { + /* Nothing to do. */ + return; + } + + /* Restore parent adapter callbacks. */ + pth_na->parent->nm_notify = pth_na->parent_nm_notify; + pth_na->parent->na_private = NULL; + + for (i = 0; i < pth_na->parent->num_rx_rings; i++) { + pth_na->up.rx_rings[i].nm_notify = + pth_na->up.rx_rings[i].save_notify; + pth_na->up.rx_rings[i].save_notify = NULL; + } + for (i = 0; i < pth_na->parent->num_tx_rings; i++) { + pth_na->up.tx_rings[i].nm_notify = + pth_na->up.tx_rings[i].save_notify; + pth_na->up.tx_rings[i].save_notify = NULL; + } + + /* Delete kthreads. */ + num_rings = ptns->pth_na->up.num_tx_rings + + ptns->pth_na->up.num_rx_rings; + for (i = 0; i < num_rings; i++) { + nm_os_kthread_delete(ptns->kthreads[i]); + ptns->kthreads[i] = NULL; + } + + IFRATE(del_timer(&ptns->rate_ctx.timer)); + + free(ptns, M_DEVBUF); + + pth_na->ptns = NULL; + + DBG(D("[%s] ptnetmap deleted", pth_na->up.name)); +} + +/* + * Called by netmap_ioctl(). + * Operation is indicated in nmr->nr_cmd. + * + * Called without NMG_LOCK. + */ +int +ptnetmap_ctl(struct nmreq *nmr, struct netmap_adapter *na) +{ + struct netmap_pt_host_adapter *pth_na; + struct ptnetmap_cfg *cfg; + char *name; + int cmd, error = 0; + + name = nmr->nr_name; + cmd = nmr->nr_cmd; + + DBG(D("name: %s", name)); + + if (!nm_ptnetmap_host_on(na)) { + D("ERROR Netmap adapter %p is not a ptnetmap host adapter", na); + error = ENXIO; + goto done; + } + pth_na = (struct netmap_pt_host_adapter *)na; + + NMG_LOCK(); + switch (cmd) { + case NETMAP_PT_HOST_CREATE: + /* Read hypervisor configuration from userspace. */ + cfg = ptnetmap_read_cfg(nmr); + if (!cfg) + break; + /* Create ptnetmap state (kthreads, ...) and switch parent + * adapter to ptnetmap mode. */ + error = ptnetmap_create(pth_na, cfg); + free(cfg, M_DEVBUF); + if (error) + break; + /* Start kthreads. */ + error = ptnetmap_start_kthreads(pth_na); + if (error) + ptnetmap_delete(pth_na); + break; + + case NETMAP_PT_HOST_DELETE: + /* Stop kthreads. */ + ptnetmap_stop_kthreads(pth_na); + /* Switch parent adapter back to normal mode and destroy + * ptnetmap state (kthreads, ...). */ + ptnetmap_delete(pth_na); + break; + + default: + D("ERROR invalid cmd (nmr->nr_cmd) (0x%x)", cmd); + error = EINVAL; + break; + } + NMG_UNLOCK(); + +done: + return error; +} + +/* nm_notify callbacks for ptnetmap */ +static int +nm_pt_host_notify(struct netmap_kring *kring, int flags) +{ + struct netmap_adapter *na = kring->na; + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)na->na_private; + struct ptnetmap_state *ptns; + int k; + + /* First check that the passthrough port is not being destroyed. */ + if (unlikely(!pth_na)) { + return NM_IRQ_COMPLETED; + } + + ptns = pth_na->ptns; + if (unlikely(!ptns || ptns->stopped)) { + return NM_IRQ_COMPLETED; + } + + k = kring->ring_id; + + /* Notify kthreads (wake up if needed) */ + if (kring->tx == NR_TX) { + ND(1, "TX backend irq"); + IFRATE(ptns->rate_ctx.new.btxwu++); + } else { + k += pth_na->up.num_tx_rings; + ND(1, "RX backend irq"); + IFRATE(ptns->rate_ctx.new.brxwu++); + } + nm_os_kthread_wakeup_worker(ptns->kthreads[k]); + + return NM_IRQ_COMPLETED; +} + +static int +nm_unused_notify(struct netmap_kring *kring, int flags) +{ + D("BUG this should never be called"); + return ENXIO; +} + +/* nm_config callback for bwrap */ +static int +nm_pt_host_config(struct netmap_adapter *na, u_int *txr, u_int *txd, + u_int *rxr, u_int *rxd) +{ + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)na; + struct netmap_adapter *parent = pth_na->parent; + int error; + + //XXX: maybe calling parent->nm_config is better + + /* forward the request */ + error = netmap_update_config(parent); + + *rxr = na->num_rx_rings = parent->num_rx_rings; + *txr = na->num_tx_rings = parent->num_tx_rings; + *txd = na->num_tx_desc = parent->num_tx_desc; + *rxd = na->num_rx_desc = parent->num_rx_desc; + + DBG(D("rxr: %d txr: %d txd: %d rxd: %d", *rxr, *txr, *txd, *rxd)); + + return error; +} + +/* nm_krings_create callback for ptnetmap */ +static int +nm_pt_host_krings_create(struct netmap_adapter *na) +{ + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)na; + struct netmap_adapter *parent = pth_na->parent; + enum txrx t; + int error; + + DBG(D("%s", pth_na->up.name)); + + /* create the parent krings */ + error = parent->nm_krings_create(parent); + if (error) { + return error; + } + + /* A ptnetmap host adapter points the very same krings + * as its parent adapter. These pointer are used in the + * TX/RX worker functions. */ + na->tx_rings = parent->tx_rings; + na->rx_rings = parent->rx_rings; + na->tailroom = parent->tailroom; + + for_rx_tx(t) { + struct netmap_kring *kring; + + /* Parent's kring_create function will initialize + * its own na->si. We have to init our na->si here. */ + nm_os_selinfo_init(&na->si[t]); + + /* Force the mem_rings_create() method to create the + * host rings independently on what the regif asked for: + * these rings are needed by the guest ptnetmap adapter + * anyway. */ + kring = &NMR(na, t)[nma_get_nrings(na, t)]; + kring->nr_kflags |= NKR_NEEDRING; + } + + return 0; +} + +/* nm_krings_delete callback for ptnetmap */ +static void +nm_pt_host_krings_delete(struct netmap_adapter *na) +{ + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)na; + struct netmap_adapter *parent = pth_na->parent; + + DBG(D("%s", pth_na->up.name)); + + parent->nm_krings_delete(parent); + + na->tx_rings = na->rx_rings = na->tailroom = NULL; +} + +/* nm_register callback */ +static int +nm_pt_host_register(struct netmap_adapter *na, int onoff) +{ + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)na; + struct netmap_adapter *parent = pth_na->parent; + int error; + DBG(D("%s onoff %d", pth_na->up.name, onoff)); + + if (onoff) { + /* netmap_do_regif has been called on the ptnetmap na. + * We need to pass the information about the + * memory allocator to the parent before + * putting it in netmap mode + */ + parent->na_lut = na->na_lut; + } + + /* forward the request to the parent */ + error = parent->nm_register(parent, onoff); + if (error) + return error; + + + if (onoff) { + na->na_flags |= NAF_NETMAP_ON | NAF_PTNETMAP_HOST; + } else { + ptnetmap_delete(pth_na); + na->na_flags &= ~(NAF_NETMAP_ON | NAF_PTNETMAP_HOST); + } + + return 0; +} + +/* nm_dtor callback */ +static void +nm_pt_host_dtor(struct netmap_adapter *na) +{ + struct netmap_pt_host_adapter *pth_na = + (struct netmap_pt_host_adapter *)na; + struct netmap_adapter *parent = pth_na->parent; + + DBG(D("%s", pth_na->up.name)); + + /* The equivalent of NETMAP_PT_HOST_DELETE if the hypervisor + * didn't do it. */ + ptnetmap_stop_kthreads(pth_na); + ptnetmap_delete(pth_na); + + parent->na_flags &= ~NAF_BUSY; + + netmap_adapter_put(pth_na->parent); + pth_na->parent = NULL; +} + +/* check if nmr is a request for a ptnetmap adapter that we can satisfy */ +int +netmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na, int create) +{ + struct nmreq parent_nmr; + struct netmap_adapter *parent; /* target adapter */ + struct netmap_pt_host_adapter *pth_na; + struct ifnet *ifp = NULL; + int error; + + /* Check if it is a request for a ptnetmap adapter */ + if ((nmr->nr_flags & (NR_PTNETMAP_HOST)) == 0) { + return 0; + } + + D("Requesting a ptnetmap host adapter"); + + pth_na = malloc(sizeof(*pth_na), M_DEVBUF, M_NOWAIT | M_ZERO); + if (pth_na == NULL) { + D("ERROR malloc"); + return ENOMEM; + } + + /* first, try to find the adapter that we want to passthrough + * We use the same nmr, after we have turned off the ptnetmap flag. + * In this way we can potentially passthrough everything netmap understands. + */ + memcpy(&parent_nmr, nmr, sizeof(parent_nmr)); + parent_nmr.nr_flags &= ~(NR_PTNETMAP_HOST); + error = netmap_get_na(&parent_nmr, &parent, &ifp, create); + if (error) { + D("parent lookup failed: %d", error); + goto put_out_noputparent; + } + DBG(D("found parent: %s", parent->name)); + + /* make sure the interface is not already in use */ + if (NETMAP_OWNED_BY_ANY(parent)) { + D("NIC %s busy, cannot ptnetmap", parent->name); + error = EBUSY; + goto put_out; + } + + pth_na->parent = parent; + + /* Follow netmap_attach()-like operations for the host + * ptnetmap adapter. */ + + //XXX pth_na->up.na_flags = parent->na_flags; + pth_na->up.num_rx_rings = parent->num_rx_rings; + pth_na->up.num_tx_rings = parent->num_tx_rings; + pth_na->up.num_tx_desc = parent->num_tx_desc; + pth_na->up.num_rx_desc = parent->num_rx_desc; + + pth_na->up.nm_dtor = nm_pt_host_dtor; + pth_na->up.nm_register = nm_pt_host_register; + + /* Reuse parent's adapter txsync and rxsync methods. */ + pth_na->up.nm_txsync = parent->nm_txsync; + pth_na->up.nm_rxsync = parent->nm_rxsync; + + pth_na->up.nm_krings_create = nm_pt_host_krings_create; + pth_na->up.nm_krings_delete = nm_pt_host_krings_delete; + pth_na->up.nm_config = nm_pt_host_config; + + /* Set the notify method only or convenience, it will never + * be used, since - differently from default krings_create - we + * ptnetmap krings_create callback inits kring->nm_notify + * directly. */ + pth_na->up.nm_notify = nm_unused_notify; + + pth_na->up.nm_mem = parent->nm_mem; + + pth_na->up.na_flags |= NAF_HOST_RINGS; + + error = netmap_attach_common(&pth_na->up); + if (error) { + D("ERROR netmap_attach_common()"); + goto put_out; + } + + *na = &pth_na->up; + netmap_adapter_get(*na); + + /* set parent busy, because attached for ptnetmap */ + parent->na_flags |= NAF_BUSY; + + strncpy(pth_na->up.name, parent->name, sizeof(pth_na->up.name)); + strcat(pth_na->up.name, "-PTN"); + + DBG(D("%s ptnetmap request DONE", pth_na->up.name)); + + /* drop the reference to the ifp, if any */ + if (ifp) + if_rele(ifp); + + return 0; + +put_out: + netmap_adapter_put(parent); + if (ifp) + if_rele(ifp); +put_out_noputparent: + free(pth_na, M_DEVBUF); + return error; +} +#endif /* WITH_PTNETMAP_HOST */ + +#ifdef WITH_PTNETMAP_GUEST +/* + * GUEST ptnetmap generic txsync()/rxsync() used in e1000/virtio-net device + * driver notify is set when we need to send notification to the host + * (driver-specific) + */ + +/* + * Reconcile host and guest views of the transmit ring. + * + * Guest user wants to transmit packets up to the one before ring->head, + * and guest kernel knows tx_ring->hwcur is the first packet unsent + * by the host kernel. + * + * We push out as many packets as possible, and possibly + * reclaim buffers from previously completed transmission. + * + * Notifications from the host are enabled only if the user guest would + * block (no space in the ring). + */ +bool +netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring, + int flags) +{ + bool notify = false; + + /* Disable notifications */ + ptring->guest_need_kick = 0; + + /* + * First part: tell the host (updating the CSB) to process the new + * packets. + */ + kring->nr_hwcur = ptring->hwcur; + ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead); + + /* Ask for a kick from a guest to the host if needed. */ + if ((kring->rhead != kring->nr_hwcur && + NM_ACCESS_ONCE(ptring->host_need_kick)) || + (flags & NAF_FORCE_RECLAIM)) { + ptring->sync_flags = flags; + notify = true; + } + + /* + * Second part: reclaim buffers for completed transmissions. + */ + if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) { + ptnetmap_guest_read_kring_csb(ptring, kring); + } + + /* + * No more room in the ring for new transmissions. The user thread will + * go to sleep and we need to be notified by the host when more free + * space is available. + */ + if (nm_kr_txempty(kring)) { + /* Reenable notifications. */ + ptring->guest_need_kick = 1; + /* Double check */ + ptnetmap_guest_read_kring_csb(ptring, kring); + /* If there is new free space, disable notifications */ + if (unlikely(!nm_kr_txempty(kring))) { + ptring->guest_need_kick = 0; + } + } + + ND(1, "TX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u tail: %u", + ptring->head, ptring->cur, ptring->hwtail, + kring->rhead, kring->rcur, kring->nr_hwtail); + + return notify; +} + +/* + * Reconcile host and guest view of the receive ring. + * + * Update hwcur/hwtail from host (reading from CSB). + * + * If guest user has released buffers up to the one before ring->head, we + * also give them to the host. + * + * Notifications from the host are enabled only if the user guest would + * block (no more completed slots in the ring). + */ +bool +netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring, + int flags) +{ + bool notify = false; + + /* Disable notifications */ + ptring->guest_need_kick = 0; + + /* + * First part: import newly received packets, by updating the kring + * hwtail to the hwtail known from the host (read from the CSB). + * This also updates the kring hwcur. + */ + ptnetmap_guest_read_kring_csb(ptring, kring); + kring->nr_kflags &= ~NKR_PENDINTR; + + /* + * Second part: tell the host about the slots that guest user has + * released, by updating cur and head in the CSB. + */ + if (kring->rhead != kring->nr_hwcur) { + ptnetmap_guest_write_kring_csb(ptring, kring->rcur, + kring->rhead); + /* Ask for a kick from the guest to the host if needed. */ + if (NM_ACCESS_ONCE(ptring->host_need_kick)) { + ptring->sync_flags = flags; + notify = true; + } + } + + /* + * No more completed RX slots. The user thread will go to sleep and + * we need to be notified by the host when more RX slots have been + * completed. + */ + if (nm_kr_rxempty(kring)) { + /* Reenable notifications. */ + ptring->guest_need_kick = 1; + /* Double check */ + ptnetmap_guest_read_kring_csb(ptring, kring); + /* If there are new slots, disable notifications. */ + if (!nm_kr_rxempty(kring)) { + ptring->guest_need_kick = 0; + } + } + + ND(1, "RX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u", + ptring->head, ptring->cur, ptring->hwtail, + kring->rhead, kring->rcur); + + return notify; +} + +/* + * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor. + */ +int +ptnet_nm_krings_create(struct netmap_adapter *na) +{ + struct netmap_pt_guest_adapter *ptna = + (struct netmap_pt_guest_adapter *)na; /* Upcast. */ + struct netmap_adapter *na_nm = &ptna->hwup.up; + struct netmap_adapter *na_dr = &ptna->dr.up; + int ret; + + if (ptna->backend_regifs) { + return 0; + } + + /* Create krings on the public netmap adapter. */ + ret = netmap_hw_krings_create(na_nm); + if (ret) { + return ret; + } + + /* Copy krings into the netmap adapter private to the driver. */ + na_dr->tx_rings = na_nm->tx_rings; + na_dr->rx_rings = na_nm->rx_rings; + + return 0; +} + +void +ptnet_nm_krings_delete(struct netmap_adapter *na) +{ + struct netmap_pt_guest_adapter *ptna = + (struct netmap_pt_guest_adapter *)na; /* Upcast. */ + struct netmap_adapter *na_nm = &ptna->hwup.up; + struct netmap_adapter *na_dr = &ptna->dr.up; + + if (ptna->backend_regifs) { + return; + } + + na_dr->tx_rings = NULL; + na_dr->rx_rings = NULL; + + netmap_hw_krings_delete(na_nm); +} + +void +ptnet_nm_dtor(struct netmap_adapter *na) +{ + struct netmap_pt_guest_adapter *ptna = + (struct netmap_pt_guest_adapter *)na; + + netmap_mem_put(ptna->dr.up.nm_mem); + memset(&ptna->dr, 0, sizeof(ptna->dr)); + netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp); +} + +#endif /* WITH_PTNETMAP_GUEST */