netmap: align if_ptnet guest driver to the upstream code (commit 0e15788)

The change upgrades the driver to use the split Communication Status
Block (CSB) format. In this way the variables written by the guest
and read by the host are allocated in a different cacheline than
the variables written by the host and read by the guest; this is
needed to avoid cache thrashing.

Approved by:	hrs (mentor)
This commit is contained in:
Vincenzo Maffione 2018-04-04 21:31:12 +00:00
parent cdfebb9cf5
commit 46023447b6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=332047
4 changed files with 225 additions and 197 deletions

View File

@ -88,10 +88,6 @@
#include <dev/netmap/netmap_mem2.h>
#include <dev/virtio/network/virtio_net.h>
#ifndef PTNET_CSB_ALLOC
#error "No support for on-device CSB"
#endif
#ifndef INET
#error "INET not defined, cannot support offloadings"
#endif
@ -132,7 +128,8 @@ struct ptnet_queue {
struct resource *irq;
void *cookie;
int kring_id;
struct ptnet_ring *ptring;
struct ptnet_csb_gh *ptgh;
struct ptnet_csb_hg *pthg;
unsigned int kick;
struct mtx lock;
struct buf_ring *bufring; /* for TX queues */
@ -169,7 +166,8 @@ struct ptnet_softc {
unsigned int num_tx_rings;
struct ptnet_queue *queues;
struct ptnet_queue *rxqueues;
struct ptnet_csb *csb;
struct ptnet_csb_gh *csb_gh;
struct ptnet_csb_hg *csb_hg;
unsigned int min_tx_space;
@ -323,33 +321,48 @@ ptnet_attach(device_t dev)
ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
sc->ptfeatures = ptfeatures;
/* Allocate CSB and carry out CSB allocation protocol (CSBBAH first,
* then CSBBAL). */
sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (sc->csb == NULL) {
num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
sc->num_rings = num_tx_rings + num_rx_rings;
sc->num_tx_rings = num_tx_rings;
if (sc->num_rings * sizeof(struct ptnet_csb_gh) > PAGE_SIZE) {
device_printf(dev, "CSB cannot handle that many rings (%u)\n",
sc->num_rings);
err = ENOMEM;
goto err_path;
}
/* Allocate CSB and carry out CSB allocation protocol. */
sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
(size_t)0, -1UL, PAGE_SIZE, 0);
if (sc->csb_gh == NULL) {
device_printf(dev, "Failed to allocate CSB\n");
err = ENOMEM;
goto err_path;
}
sc->csb_hg = (struct ptnet_csb_hg *)(((char *)sc->csb_gh) + PAGE_SIZE);
{
/*
* We use uint64_t rather than vm_paddr_t since we
* need 64 bit addresses even on 32 bit platforms.
*/
uint64_t paddr = vtophys(sc->csb);
uint64_t paddr = vtophys(sc->csb_gh);
bus_write_4(sc->iomem, PTNET_IO_CSBBAH,
(paddr >> 32) & 0xffffffff);
bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff);
/* CSB allocation protocol: write to BAH first, then
* to BAL (for both GH and HG sections). */
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
(paddr >> 32) & 0xffffffff);
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
paddr & 0xffffffff);
paddr = vtophys(sc->csb_hg);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
(paddr >> 32) & 0xffffffff);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
paddr & 0xffffffff);
}
num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
sc->num_rings = num_tx_rings + num_rx_rings;
sc->num_tx_rings = num_tx_rings;
/* Allocate and initialize per-queue data structures. */
sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
M_DEVBUF, M_NOWAIT | M_ZERO);
@ -365,7 +378,8 @@ ptnet_attach(device_t dev)
pq->sc = sc;
pq->kring_id = i;
pq->kick = PTNET_IO_KICK_BASE + 4 * i;
pq->ptring = sc->csb->rings + i;
pq->ptgh = sc->csb_gh + i;
pq->pthg = sc->csb_hg + i;
snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
device_get_nameunit(dev), i);
mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
@ -467,7 +481,7 @@ ptnet_attach(device_t dev)
na_arg.nm_txsync = ptnet_nm_txsync;
na_arg.nm_rxsync = ptnet_nm_rxsync;
netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset,
netmap_pt_guest_attach(&na_arg, nifp_offset,
bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
/* Now a netmap adapter for this ifp has been allocated, and it
@ -526,11 +540,14 @@ ptnet_detach(device_t dev)
ptnet_irqs_fini(sc);
if (sc->csb) {
bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0);
bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0);
free(sc->csb, M_DEVBUF);
sc->csb = NULL;
if (sc->csb_gh) {
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
sc->csb_gh = NULL;
sc->csb_hg = NULL;
}
if (sc->queues) {
@ -777,7 +794,7 @@ ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
/* Make sure the worker sees the
* IFF_DRV_RUNNING down. */
PTNET_Q_LOCK(pq);
pq->ptring->guest_need_kick = 0;
pq->ptgh->guest_need_kick = 0;
PTNET_Q_UNLOCK(pq);
/* Wait for rescheduling to finish. */
if (pq->taskq) {
@ -791,7 +808,7 @@ ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
for (i = 0; i < sc->num_rings; i++) {
pq = sc-> queues + i;
PTNET_Q_LOCK(pq);
pq->ptring->guest_need_kick = 1;
pq->ptgh->guest_need_kick = 1;
PTNET_Q_UNLOCK(pq);
}
}
@ -1109,7 +1126,8 @@ ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
/* Sync krings from the host, reading from
* CSB. */
for (i = 0; i < sc->num_rings; i++) {
struct ptnet_ring *ptring = sc->queues[i].ptring;
struct ptnet_csb_gh *ptgh = sc->queues[i].ptgh;
struct ptnet_csb_hg *pthg = sc->queues[i].pthg;
struct netmap_kring *kring;
if (i < na->num_tx_rings) {
@ -1117,15 +1135,15 @@ ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
} else {
kring = na->rx_rings + i - na->num_tx_rings;
}
kring->rhead = kring->ring->head = ptring->head;
kring->rcur = kring->ring->cur = ptring->cur;
kring->nr_hwcur = ptring->hwcur;
kring->rhead = kring->ring->head = ptgh->head;
kring->rcur = kring->ring->cur = ptgh->cur;
kring->nr_hwcur = pthg->hwcur;
kring->nr_hwtail = kring->rtail =
kring->ring->tail = ptring->hwtail;
kring->ring->tail = pthg->hwtail;
ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
ptring->hwcur, ptring->head, ptring->cur,
ptring->hwtail);
pthg->hwcur, ptgh->head, ptgh->cur,
pthg->hwtail);
ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
kring->ring->head, kring->ring->cur, kring->nr_hwtail,
@ -1169,7 +1187,7 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
D("Exit netmap mode, re-enable interrupts");
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->ptring->guest_need_kick = 1;
pq->ptgh->guest_need_kick = 1;
}
}
@ -1178,8 +1196,8 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
/* Initialize notification enable fields in the CSB. */
for (i = 0; i < sc->num_rings; i++) {
pq = sc->queues + i;
pq->ptring->host_need_kick = 1;
pq->ptring->guest_need_kick =
pq->pthg->host_need_kick = 1;
pq->ptgh->guest_need_kick =
(!(ifp->if_capenable & IFCAP_POLLING)
&& i >= sc->num_tx_rings);
}
@ -1257,7 +1275,7 @@ ptnet_nm_txsync(struct netmap_kring *kring, int flags)
struct ptnet_queue *pq = sc->queues + kring->ring_id;
bool notify;
notify = netmap_pt_guest_txsync(pq->ptring, kring, flags);
notify = netmap_pt_guest_txsync(pq->ptgh, pq->pthg, kring, flags);
if (notify) {
ptnet_kick(pq);
}
@ -1272,7 +1290,7 @@ ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
bool notify;
notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags);
notify = netmap_pt_guest_rxsync(pq->ptgh, pq->pthg, kring, flags);
if (notify) {
ptnet_kick(pq);
}
@ -1643,12 +1661,12 @@ ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr)
/* End of offloading-related functions to be shared with vtnet. */
static inline void
ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring)
ptnet_sync_tail(struct ptnet_csb_hg *pthg, struct netmap_kring *kring)
{
struct netmap_ring *ring = kring->ring;
/* Update hwcur and hwtail as known by the host. */
ptnetmap_guest_read_kring_csb(ptring, kring);
ptnetmap_guest_read_kring_csb(pthg, kring);
/* nm_sync_finalize */
ring->tail = kring->rtail = kring->nr_hwtail;
@ -1659,7 +1677,8 @@ ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
unsigned int head, unsigned int sync_flags)
{
struct netmap_ring *ring = kring->ring;
struct ptnet_ring *ptring = pq->ptring;
struct ptnet_csb_gh *ptgh = pq->ptgh;
struct ptnet_csb_hg *pthg = pq->pthg;
/* Some packets have been pushed to the netmap ring. We have
* to tell the host to process the new packets, updating cur
@ -1669,11 +1688,11 @@ ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
/* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
kring->rcur = kring->rhead = head;
ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead);
ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead);
/* Kick the host if needed. */
if (NM_ACCESS_ONCE(ptring->host_need_kick)) {
ptring->sync_flags = sync_flags;
if (NM_ACCESS_ONCE(pthg->host_need_kick)) {
ptgh->sync_flags = sync_flags;
ptnet_kick(pq);
}
}
@ -1693,7 +1712,8 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
struct netmap_adapter *na = &sc->ptna->dr.up;
if_t ifp = sc->ifp;
unsigned int batch_count = 0;
struct ptnet_ring *ptring;
struct ptnet_csb_gh *ptgh;
struct ptnet_csb_hg *pthg;
struct netmap_kring *kring;
struct netmap_ring *ring;
struct netmap_slot *slot;
@ -1722,7 +1742,8 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
return ENETDOWN;
}
ptring = pq->ptring;
ptgh = pq->ptgh;
pthg = pq->pthg;
kring = na->tx_rings + pq->kring_id;
ring = kring->ring;
lim = kring->nkr_num_slots - 1;
@ -1734,17 +1755,17 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
/* We ran out of slot, let's see if the host has
* freed up some, by reading hwcur and hwtail from
* the CSB. */
ptnet_sync_tail(ptring, kring);
ptnet_sync_tail(pthg, kring);
if (PTNET_TX_NOSPACE(head, kring, minspace)) {
/* Still no slots available. Reactivate the
* interrupts so that we can be notified
* when some free slots are made available by
* the host. */
ptring->guest_need_kick = 1;
ptgh->guest_need_kick = 1;
/* Double-check. */
ptnet_sync_tail(ptring, kring);
ptnet_sync_tail(pthg, kring);
if (likely(PTNET_TX_NOSPACE(head, kring,
minspace))) {
break;
@ -1753,7 +1774,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
RD(1, "Found more slots by doublecheck");
/* More slots were freed before reactivating
* the interrupts. */
ptring->guest_need_kick = 0;
ptgh->guest_need_kick = 0;
}
}
@ -1983,15 +2004,16 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
{
struct ptnet_softc *sc = pq->sc;
bool have_vnet_hdr = sc->vnet_hdr_len;
struct ptnet_ring *ptring = pq->ptring;
struct ptnet_csb_gh *ptgh = pq->ptgh;
struct ptnet_csb_hg *pthg = pq->pthg;
struct netmap_adapter *na = &sc->ptna->dr.up;
struct netmap_kring *kring = na->rx_rings + pq->kring_id;
struct netmap_ring *ring = kring->ring;
unsigned int const lim = kring->nkr_num_slots - 1;
unsigned int head = ring->head;
unsigned int batch_count = 0;
if_t ifp = sc->ifp;
unsigned int count = 0;
uint32_t head;
PTNET_Q_LOCK(pq);
@ -2001,33 +2023,35 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
kring->nr_kflags &= ~NKR_PENDINTR;
head = ring->head;
while (count < budget) {
unsigned int prev_head = head;
uint32_t prev_head = head;
struct mbuf *mhead, *mtail;
struct virtio_net_hdr *vh;
struct netmap_slot *slot;
unsigned int nmbuf_len;
uint8_t *nmbuf;
int deliver = 1; /* the mbuf to the network stack. */
host_sync:
if (head == ring->tail) {
/* We ran out of slot, let's see if the host has
* added some, by reading hwcur and hwtail from
* the CSB. */
ptnet_sync_tail(ptring, kring);
ptnet_sync_tail(pthg, kring);
if (head == ring->tail) {
/* Still no slots available. Reactivate
* interrupts as they were disabled by the
* host thread right before issuing the
* last interrupt. */
ptring->guest_need_kick = 1;
ptgh->guest_need_kick = 1;
/* Double-check. */
ptnet_sync_tail(ptring, kring);
ptnet_sync_tail(pthg, kring);
if (likely(head == ring->tail)) {
break;
}
ptring->guest_need_kick = 0;
ptgh->guest_need_kick = 0;
}
}
@ -2046,6 +2070,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
RD(1, "Fragmented vnet-hdr: dropping");
head = ptnet_rx_discard(kring, head);
pq->stats.iqdrops ++;
deliver = 0;
goto skip;
}
ND(1, "%s: vnet hdr: flags %x csum_start %u "
@ -2152,31 +2177,40 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
m_freem(mhead);
RD(1, "Csum offload error: dropping");
pq->stats.iqdrops ++;
goto skip;
deliver = 0;
}
}
pq->stats.packets ++;
pq->stats.bytes += mhead->m_pkthdr.len;
PTNET_Q_UNLOCK(pq);
(*ifp->if_input)(ifp, mhead);
PTNET_Q_LOCK(pq);
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
/* The interface has gone down while we didn't
* have the lock. Stop any processing and exit. */
goto unlock;
}
skip:
count ++;
if (++batch_count == PTNET_RX_BATCH) {
/* Some packets have been pushed to the network stack.
* We need to update the CSB to tell the host about the new
* ring->cur and ring->head (RX buffer refill). */
if (++batch_count >= PTNET_RX_BATCH) {
/* Some packets have been (or will be) pushed to the network
* stack. We need to update the CSB to tell the host about
* the new ring->cur and ring->head (RX buffer refill). */
ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
batch_count = 0;
}
if (likely(deliver)) {
pq->stats.packets ++;
pq->stats.bytes += mhead->m_pkthdr.len;
PTNET_Q_UNLOCK(pq);
(*ifp->if_input)(ifp, mhead);
PTNET_Q_LOCK(pq);
/* The ring->head index (and related indices) are
* updated under pq lock by ptnet_ring_update().
* Since we dropped the lock to call if_input(), we
* must reload ring->head and restart processing the
* ring from there. */
head = ring->head;
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
/* The interface has gone down while we didn't
* have the lock. Stop any processing and exit. */
goto unlock;
}
}
}
escape:
if (batch_count) {

View File

@ -2126,8 +2126,6 @@ struct netmap_pt_guest_adapter {
/* The netmap adapter to be used by the driver. */
struct netmap_hw_adapter dr;
void *csb;
/* Reference counter to track users of backend netmap port: the
* network stack and netmap clients.
* Used to decide when we need (de)allocate krings/rings and
@ -2136,13 +2134,18 @@ struct netmap_pt_guest_adapter {
};
int netmap_pt_guest_attach(struct netmap_adapter *na, void *csb,
unsigned int nifp_offset, unsigned int memid);
struct ptnet_ring;
bool netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
int flags);
bool netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
int flags);
int netmap_pt_guest_attach(struct netmap_adapter *na,
unsigned int nifp_offset,
unsigned int memid);
struct ptnet_csb_gh;
struct ptnet_csb_hg;
bool netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh,
struct ptnet_csb_hg *pthg,
struct netmap_kring *kring,
int flags);
bool netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh,
struct ptnet_csb_hg *pthg,
struct netmap_kring *kring, int flags);
int ptnet_nm_krings_create(struct netmap_adapter *na);
void ptnet_nm_krings_delete(struct netmap_adapter *na);
void ptnet_nm_dtor(struct netmap_adapter *na);

View File

@ -172,8 +172,9 @@ struct ptnetmap_state {
/* Kthreads. */
struct nm_kctx **kctxs;
/* Shared memory with the guest (TX/RX) */
struct ptnet_ring __user *ptrings;
/* Shared memory with the guest (TX/RX) */
struct ptnet_csb_gh __user *csb_gh;
struct ptnet_csb_hg __user *csb_hg;
bool stopped;
@ -200,29 +201,22 @@ ptnetmap_kring_dump(const char *title, const struct netmap_kring *kring)
/* Enable or disable guest --> host kicks. */
static inline void
ptring_kick_enable(struct ptnet_ring __user *ptring, uint32_t val)
pthg_kick_enable(struct ptnet_csb_hg __user *pthg, uint32_t val)
{
CSB_WRITE(ptring, host_need_kick, val);
CSB_WRITE(pthg, host_need_kick, val);
}
/* Are guest interrupt enabled or disabled? */
static inline uint32_t
ptring_intr_enabled(struct ptnet_ring __user *ptring)
ptgh_intr_enabled(struct ptnet_csb_gh __user *ptgh)
{
uint32_t v;
CSB_READ(ptring, guest_need_kick, v);
CSB_READ(ptgh, guest_need_kick, v);
return v;
}
/* Enable or disable guest interrupts. */
static inline void
ptring_intr_enable(struct ptnet_ring __user *ptring, uint32_t val)
{
CSB_WRITE(ptring, guest_need_kick, val);
}
/* Handle TX events: from the guest or from the backend */
static void
ptnetmap_tx_handler(void *data, int is_kthread)
@ -231,7 +225,8 @@ ptnetmap_tx_handler(void *data, int is_kthread)
struct netmap_pt_host_adapter *pth_na =
(struct netmap_pt_host_adapter *)kring->na->na_private;
struct ptnetmap_state *ptns = pth_na->ptns;
struct ptnet_ring __user *ptring;
struct ptnet_csb_gh __user *ptgh;
struct ptnet_csb_hg __user *pthg;
struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
bool more_txspace = false;
struct nm_kctx *kth;
@ -257,18 +252,17 @@ ptnetmap_tx_handler(void *data, int is_kthread)
/* This is a guess, to be fixed in the rate callback. */
IFRATE(ptns->rate_ctx.new.gtxk++);
/* Get TX ptring pointer from the CSB. */
ptring = ptns->ptrings + kring->ring_id;
/* Get TX ptgh/pthg pointer from the CSB. */
ptgh = ptns->csb_gh + kring->ring_id;
pthg = ptns->csb_hg + kring->ring_id;
kth = ptns->kctxs[kring->ring_id];
num_slots = kring->nkr_num_slots;
shadow_ring.head = kring->rhead;
shadow_ring.cur = kring->rcur;
/* Disable guest --> host notifications. */
ptring_kick_enable(ptring, 0);
pthg_kick_enable(pthg, 0);
/* Copy the guest kring pointers from the CSB */
ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
for (;;) {
/* If guest moves ahead too fast, let's cut the move so
@ -299,7 +293,7 @@ ptnetmap_tx_handler(void *data, int is_kthread)
if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
/* Reinit ring and enable notifications. */
netmap_ring_reinit(kring);
ptring_kick_enable(ptring, 1);
pthg_kick_enable(pthg, 1);
break;
}
@ -310,7 +304,7 @@ ptnetmap_tx_handler(void *data, int is_kthread)
IFRATE(pre_tail = kring->rtail);
if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
/* Reenable notifications. */
ptring_kick_enable(ptring, 1);
pthg_kick_enable(pthg, 1);
D("ERROR txsync()");
break;
}
@ -320,7 +314,7 @@ ptnetmap_tx_handler(void *data, int is_kthread)
* Copy host hwcur and hwtail into the CSB for the guest sync(), and
* do the nm_sync_finalize.
*/
ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur,
ptnetmap_host_write_kring_csb(pthg, kring->nr_hwcur,
kring->nr_hwtail);
if (kring->rtail != kring->nr_hwtail) {
/* Some more room available in the parent adapter. */
@ -337,16 +331,15 @@ ptnetmap_tx_handler(void *data, int is_kthread)
#ifndef BUSY_WAIT
/* Interrupt the guest if needed. */
if (more_txspace && ptring_intr_enabled(ptring) && is_kthread) {
if (more_txspace && ptgh_intr_enabled(ptgh) && is_kthread) {
/* Disable guest kick to avoid sending unnecessary kicks */
ptring_intr_enable(ptring, 0);
nm_os_kctx_send_irq(kth);
IFRATE(ptns->rate_ctx.new.htxk++);
more_txspace = false;
}
#endif
/* Read CSB to see if there is more work to do. */
ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
#ifndef BUSY_WAIT
if (shadow_ring.head == kring->rhead) {
/*
@ -358,13 +351,13 @@ ptnetmap_tx_handler(void *data, int is_kthread)
usleep_range(1,1);
}
/* Reenable notifications. */
ptring_kick_enable(ptring, 1);
pthg_kick_enable(pthg, 1);
/* Doublecheck. */
ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
if (shadow_ring.head != kring->rhead) {
/* We won the race condition, there are more packets to
* transmit. Disable notifications and do another cycle */
ptring_kick_enable(ptring, 0);
pthg_kick_enable(pthg, 0);
continue;
}
break;
@ -385,8 +378,7 @@ ptnetmap_tx_handler(void *data, int is_kthread)
nm_kr_put(kring);
if (more_txspace && ptring_intr_enabled(ptring) && is_kthread) {
ptring_intr_enable(ptring, 0);
if (more_txspace && ptgh_intr_enabled(ptgh) && is_kthread) {
nm_os_kctx_send_irq(kth);
IFRATE(ptns->rate_ctx.new.htxk++);
}
@ -411,12 +403,12 @@ ptnetmap_tx_nothread_notify(void *data)
return;
}
/* We cannot access the CSB here (to check ptring->guest_need_kick),
/* We cannot access the CSB here (to check ptgh->guest_need_kick),
* unless we switch address space to the one of the guest. For now
* we unconditionally inject an interrupt. */
nm_os_kctx_send_irq(ptns->kctxs[kring->ring_id]);
IFRATE(ptns->rate_ctx.new.htxk++);
ND(1, "%s interrupt", kring->name);
nm_os_kctx_send_irq(ptns->kctxs[kring->ring_id]);
IFRATE(ptns->rate_ctx.new.htxk++);
ND(1, "%s interrupt", kring->name);
}
/*
@ -440,7 +432,8 @@ ptnetmap_rx_handler(void *data, int is_kthread)
struct netmap_pt_host_adapter *pth_na =
(struct netmap_pt_host_adapter *)kring->na->na_private;
struct ptnetmap_state *ptns = pth_na->ptns;
struct ptnet_ring __user *ptring;
struct ptnet_csb_gh __user *ptgh;
struct ptnet_csb_hg __user *pthg;
struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
struct nm_kctx *kth;
uint32_t num_slots;
@ -467,18 +460,17 @@ ptnetmap_rx_handler(void *data, int is_kthread)
/* This is a guess, to be fixed in the rate callback. */
IFRATE(ptns->rate_ctx.new.grxk++);
/* Get RX ptring pointer from the CSB. */
ptring = ptns->ptrings + (pth_na->up.num_tx_rings + kring->ring_id);
/* Get RX ptgh and pthg pointers from the CSB. */
ptgh = ptns->csb_gh + (pth_na->up.num_tx_rings + kring->ring_id);
pthg = ptns->csb_hg + (pth_na->up.num_tx_rings + kring->ring_id);
kth = ptns->kctxs[pth_na->up.num_tx_rings + kring->ring_id];
num_slots = kring->nkr_num_slots;
shadow_ring.head = kring->rhead;
shadow_ring.cur = kring->rcur;
/* Disable notifications. */
ptring_kick_enable(ptring, 0);
pthg_kick_enable(pthg, 0);
/* Copy the guest kring pointers from the CSB */
ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
for (;;) {
uint32_t hwtail;
@ -488,7 +480,7 @@ ptnetmap_rx_handler(void *data, int is_kthread)
if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
/* Reinit ring and enable notifications. */
netmap_ring_reinit(kring);
ptring_kick_enable(ptring, 1);
pthg_kick_enable(pthg, 1);
break;
}
@ -499,7 +491,7 @@ ptnetmap_rx_handler(void *data, int is_kthread)
IFRATE(pre_tail = kring->rtail);
if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
/* Reenable notifications. */
ptring_kick_enable(ptring, 1);
pthg_kick_enable(pthg, 1);
D("ERROR rxsync()");
break;
}
@ -508,7 +500,7 @@ ptnetmap_rx_handler(void *data, int is_kthread)
* Copy host hwcur and hwtail into the CSB for the guest sync()
*/
hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur, hwtail);
ptnetmap_host_write_kring_csb(pthg, kring->nr_hwcur, hwtail);
if (kring->rtail != hwtail) {
kring->rtail = hwtail;
some_recvd = true;
@ -526,16 +518,15 @@ ptnetmap_rx_handler(void *data, int is_kthread)
#ifndef BUSY_WAIT
/* Interrupt the guest if needed. */
if (some_recvd && ptring_intr_enabled(ptring)) {
if (some_recvd && ptgh_intr_enabled(ptgh)) {
/* Disable guest kick to avoid sending unnecessary kicks */
ptring_intr_enable(ptring, 0);
nm_os_kctx_send_irq(kth);
IFRATE(ptns->rate_ctx.new.hrxk++);
some_recvd = false;
}
#endif
/* Read CSB to see if there is more work to do. */
ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
#ifndef BUSY_WAIT
if (ptnetmap_norxslots(kring, shadow_ring.head)) {
/*
@ -545,13 +536,13 @@ ptnetmap_rx_handler(void *data, int is_kthread)
*/
usleep_range(1,1);
/* Reenable notifications. */
ptring_kick_enable(ptring, 1);
pthg_kick_enable(pthg, 1);
/* Doublecheck. */
ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
if (!ptnetmap_norxslots(kring, shadow_ring.head)) {
/* We won the race condition, more slots are available. Disable
* notifications and do another cycle. */
ptring_kick_enable(ptring, 0);
pthg_kick_enable(pthg, 0);
continue;
}
break;
@ -576,8 +567,7 @@ ptnetmap_rx_handler(void *data, int is_kthread)
nm_kr_put(kring);
/* Interrupt the guest if needed. */
if (some_recvd && ptring_intr_enabled(ptring)) {
ptring_intr_enable(ptring, 0);
if (some_recvd && ptgh_intr_enabled(ptgh)) {
nm_os_kctx_send_irq(kth);
IFRATE(ptns->rate_ctx.new.hrxk++);
}
@ -590,8 +580,8 @@ ptnetmap_print_configuration(struct ptnetmap_cfg *cfg)
int k;
D("ptnetmap configuration:");
D(" CSB ptrings @%p, num_rings=%u, cfgtype %08x", cfg->ptrings,
cfg->num_rings, cfg->cfgtype);
D(" CSB @%p@:%p, num_rings=%u, cfgtype %08x", cfg->csb_gh,
cfg->csb_hg, cfg->num_rings, cfg->cfgtype);
for (k = 0; k < cfg->num_rings; k++) {
switch (cfg->cfgtype) {
case PTNETMAP_CFGTYPE_QEMU: {
@ -624,16 +614,18 @@ ptnetmap_print_configuration(struct ptnetmap_cfg *cfg)
/* Copy actual state of the host ring into the CSB for the guest init */
static int
ptnetmap_kring_snapshot(struct netmap_kring *kring, struct ptnet_ring __user *ptring)
ptnetmap_kring_snapshot(struct netmap_kring *kring,
struct ptnet_csb_gh __user *ptgh,
struct ptnet_csb_hg __user *pthg)
{
if (CSB_WRITE(ptring, head, kring->rhead))
if (CSB_WRITE(ptgh, head, kring->rhead))
goto err;
if (CSB_WRITE(ptring, cur, kring->rcur))
if (CSB_WRITE(ptgh, cur, kring->rcur))
goto err;
if (CSB_WRITE(ptring, hwcur, kring->nr_hwcur))
if (CSB_WRITE(pthg, hwcur, kring->nr_hwcur))
goto err;
if (CSB_WRITE(ptring, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail)))
if (CSB_WRITE(pthg, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail)))
goto err;
DBG(ptnetmap_kring_dump("ptnetmap_kring_snapshot", kring);)
@ -665,7 +657,8 @@ ptnetmap_krings_snapshot(struct netmap_pt_host_adapter *pth_na)
for (k = 0; k < num_rings; k++) {
kring = ptnetmap_kring(pth_na, k);
err |= ptnetmap_kring_snapshot(kring, ptns->ptrings + k);
err |= ptnetmap_kring_snapshot(kring, ptns->csb_gh + k,
ptns->csb_hg + k);
}
return err;
@ -842,7 +835,8 @@ ptnetmap_create(struct netmap_pt_host_adapter *pth_na,
ptns->pth_na = pth_na;
/* Store the CSB address provided by the hypervisor. */
ptns->ptrings = cfg->ptrings;
ptns->csb_gh = cfg->csb_gh;
ptns->csb_hg = cfg->csb_hg;
DBG(ptnetmap_print_configuration(cfg));
@ -1321,26 +1315,26 @@ netmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na,
* block (no space in the ring).
*/
bool
netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
int flags)
netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg,
struct netmap_kring *kring, int flags)
{
bool notify = false;
/* Disable notifications */
ptring->guest_need_kick = 0;
ptgh->guest_need_kick = 0;
/*
* First part: tell the host (updating the CSB) to process the new
* packets.
*/
kring->nr_hwcur = ptring->hwcur;
ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead);
kring->nr_hwcur = pthg->hwcur;
ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead);
/* Ask for a kick from a guest to the host if needed. */
if (((kring->rhead != kring->nr_hwcur || nm_kr_txempty(kring))
&& NM_ACCESS_ONCE(ptring->host_need_kick)) ||
&& NM_ACCESS_ONCE(pthg->host_need_kick)) ||
(flags & NAF_FORCE_RECLAIM)) {
ptring->sync_flags = flags;
ptgh->sync_flags = flags;
notify = true;
}
@ -1348,7 +1342,7 @@ netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
* Second part: reclaim buffers for completed transmissions.
*/
if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) {
ptnetmap_guest_read_kring_csb(ptring, kring);
ptnetmap_guest_read_kring_csb(pthg, kring);
}
/*
@ -1358,17 +1352,17 @@ netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
*/
if (nm_kr_txempty(kring)) {
/* Reenable notifications. */
ptring->guest_need_kick = 1;
ptgh->guest_need_kick = 1;
/* Double check */
ptnetmap_guest_read_kring_csb(ptring, kring);
ptnetmap_guest_read_kring_csb(pthg, kring);
/* If there is new free space, disable notifications */
if (unlikely(!nm_kr_txempty(kring))) {
ptring->guest_need_kick = 0;
ptgh->guest_need_kick = 0;
}
}
ND(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
kring->name, ptring->head, ptring->cur, ptring->hwtail,
kring->name, ptgh->head, ptgh->cur, pthg->hwtail,
kring->rhead, kring->rcur, kring->nr_hwtail);
return notify;
@ -1386,20 +1380,20 @@ netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
* block (no more completed slots in the ring).
*/
bool
netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
int flags)
netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg,
struct netmap_kring *kring, int flags)
{
bool notify = false;
/* Disable notifications */
ptring->guest_need_kick = 0;
ptgh->guest_need_kick = 0;
/*
* First part: import newly received packets, by updating the kring
* hwtail to the hwtail known from the host (read from the CSB).
* This also updates the kring hwcur.
*/
ptnetmap_guest_read_kring_csb(ptring, kring);
ptnetmap_guest_read_kring_csb(pthg, kring);
kring->nr_kflags &= ~NKR_PENDINTR;
/*
@ -1407,11 +1401,11 @@ netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
* released, by updating cur and head in the CSB.
*/
if (kring->rhead != kring->nr_hwcur) {
ptnetmap_guest_write_kring_csb(ptring, kring->rcur,
ptnetmap_guest_write_kring_csb(ptgh, kring->rcur,
kring->rhead);
/* Ask for a kick from the guest to the host if needed. */
if (NM_ACCESS_ONCE(ptring->host_need_kick)) {
ptring->sync_flags = flags;
if (NM_ACCESS_ONCE(pthg->host_need_kick)) {
ptgh->sync_flags = flags;
notify = true;
}
}
@ -1423,17 +1417,17 @@ netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
*/
if (nm_kr_rxempty(kring)) {
/* Reenable notifications. */
ptring->guest_need_kick = 1;
ptgh->guest_need_kick = 1;
/* Double check */
ptnetmap_guest_read_kring_csb(ptring, kring);
ptnetmap_guest_read_kring_csb(pthg, kring);
/* If there are new slots, disable notifications. */
if (!nm_kr_rxempty(kring)) {
ptring->guest_need_kick = 0;
ptgh->guest_need_kick = 0;
}
}
ND(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
kring->name, ptring->head, ptring->cur, ptring->hwtail,
kring->name, ptgh->head, ptgh->cur, pthg->hwtail,
kring->rhead, kring->rcur, kring->nr_hwtail);
return notify;
@ -1492,13 +1486,13 @@ ptnet_nm_dtor(struct netmap_adapter *na)
struct netmap_pt_guest_adapter *ptna =
(struct netmap_pt_guest_adapter *)na;
netmap_mem_put(ptna->dr.up.nm_mem); // XXX is this needed?
netmap_mem_put(ptna->dr.up.nm_mem);
memset(&ptna->dr, 0, sizeof(ptna->dr));
netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
}
int
netmap_pt_guest_attach(struct netmap_adapter *arg, void *csb,
netmap_pt_guest_attach(struct netmap_adapter *arg,
unsigned int nifp_offset, unsigned int memid)
{
struct netmap_pt_guest_adapter *ptna;
@ -1516,7 +1510,6 @@ netmap_pt_guest_attach(struct netmap_adapter *arg, void *csb,
/* get the netmap_pt_guest_adapter */
ptna = (struct netmap_pt_guest_adapter *) NA(ifp);
ptna->csb = csb;
/* Initialize a separate pass-through netmap adapter that is going to
* be used by the ptnet driver only, and so never exposed to netmap

View File

@ -85,7 +85,8 @@ struct ptnetmap_cfg {
uint16_t cfgtype; /* how to interpret the cfg entries */
uint16_t entry_size; /* size of a config entry */
uint32_t num_rings; /* number of config entries */
void *ptrings; /* ptrings inside CSB */
void *csb_gh; /* CSB for guest --> host communication */
void *csb_hg; /* CSB for host --> guest communication */
/* Configuration entries are allocated right after the struct. */
};
@ -146,8 +147,8 @@ nmreq_pointer_put(struct nmreq *nmr, void *userptr)
#define PTNET_IO_PTCTL 4
#define PTNET_IO_MAC_LO 8
#define PTNET_IO_MAC_HI 12
#define PTNET_IO_CSBBAH 16
#define PTNET_IO_CSBBAL 20
#define PTNET_IO_CSBBAH 16 /* deprecated */
#define PTNET_IO_CSBBAL 20 /* deprecated */
#define PTNET_IO_NIFP_OFS 24
#define PTNET_IO_NUM_TX_RINGS 28
#define PTNET_IO_NUM_RX_RINGS 32
@ -155,7 +156,11 @@ nmreq_pointer_put(struct nmreq *nmr, void *userptr)
#define PTNET_IO_NUM_RX_SLOTS 40
#define PTNET_IO_VNET_HDR_LEN 44
#define PTNET_IO_HOSTMEMID 48
#define PTNET_IO_END 52
#define PTNET_IO_CSB_GH_BAH 52
#define PTNET_IO_CSB_GH_BAL 56
#define PTNET_IO_CSB_HG_BAH 60
#define PTNET_IO_CSB_HG_BAL 64
#define PTNET_IO_END 68
#define PTNET_IO_KICK_BASE 128
#define PTNET_IO_MASK 0xff
@ -163,26 +168,19 @@ nmreq_pointer_put(struct nmreq *nmr, void *userptr)
#define PTNETMAP_PTCTL_CREATE 1
#define PTNETMAP_PTCTL_DELETE 2
/* If defined, CSB is allocated by the guest, not by the host. */
#define PTNET_CSB_ALLOC
/* ptnetmap ring fields shared between guest and host */
struct ptnet_ring {
/* XXX revise the layout to minimize cache bounces. */
/* ptnetmap synchronization variables shared between guest and host */
struct ptnet_csb_gh {
uint32_t head; /* GW+ HR+ the head of the guest netmap_ring */
uint32_t cur; /* GW+ HR+ the cur of the guest netmap_ring */
uint32_t guest_need_kick; /* GW+ HR+ host-->guest notification enable */
uint32_t sync_flags; /* GW+ HR+ the flags of the guest [tx|rx]sync() */
char pad[48]; /* pad to a 64 bytes cacheline */
};
struct ptnet_csb_hg {
uint32_t hwcur; /* GR+ HW+ the hwcur of the host netmap_kring */
uint32_t hwtail; /* GR+ HW+ the hwtail of the host netmap_kring */
uint32_t host_need_kick; /* GR+ HW+ guest-->host notification enable */
char pad[4];
};
/* CSB for the ptnet device. */
struct ptnet_csb {
#define NETMAP_VIRT_CSB_SIZE 4096
struct ptnet_ring rings[NETMAP_VIRT_CSB_SIZE/sizeof(struct ptnet_ring)];
char pad[4+48];
};
#ifdef WITH_PTNETMAP_GUEST
@ -197,7 +195,7 @@ uint32_t nm_os_pt_memdev_ioread(struct ptnetmap_memdev *, unsigned int);
/* Guest driver: Write kring pointers (cur, head) to the CSB.
* This routine is coupled with ptnetmap_host_read_kring_csb(). */
static inline void
ptnetmap_guest_write_kring_csb(struct ptnet_ring *ptr, uint32_t cur,
ptnetmap_guest_write_kring_csb(struct ptnet_csb_gh *ptr, uint32_t cur,
uint32_t head)
{
/*
@ -228,16 +226,16 @@ ptnetmap_guest_write_kring_csb(struct ptnet_ring *ptr, uint32_t cur,
/* Guest driver: Read kring pointers (hwcur, hwtail) from the CSB.
* This routine is coupled with ptnetmap_host_write_kring_csb(). */
static inline void
ptnetmap_guest_read_kring_csb(struct ptnet_ring *ptr, struct netmap_kring *kring)
ptnetmap_guest_read_kring_csb(struct ptnet_csb_hg *pthg, struct netmap_kring *kring)
{
/*
* We place a memory barrier to make sure that the update of hwtail never
* overtakes the update of hwcur.
* (see explanation in ptnetmap_host_write_kring_csb).
*/
kring->nr_hwtail = ptr->hwtail;
kring->nr_hwtail = pthg->hwtail;
mb();
kring->nr_hwcur = ptr->hwcur;
kring->nr_hwcur = pthg->hwcur;
}
#endif /* WITH_PTNETMAP_GUEST */
@ -259,7 +257,7 @@ ptnetmap_guest_read_kring_csb(struct ptnet_ring *ptr, struct netmap_kring *kring
/* Host netmap: Write kring pointers (hwcur, hwtail) to the CSB.
* This routine is coupled with ptnetmap_guest_read_kring_csb(). */
static inline void
ptnetmap_host_write_kring_csb(struct ptnet_ring __user *ptr, uint32_t hwcur,
ptnetmap_host_write_kring_csb(struct ptnet_csb_hg __user *ptr, uint32_t hwcur,
uint32_t hwtail)
{
/*
@ -285,7 +283,7 @@ ptnetmap_host_write_kring_csb(struct ptnet_ring __user *ptr, uint32_t hwcur,
/* Host netmap: Read kring pointers (head, cur, sync_flags) from the CSB.
* This routine is coupled with ptnetmap_guest_write_kring_csb(). */
static inline void
ptnetmap_host_read_kring_csb(struct ptnet_ring __user *ptr,
ptnetmap_host_read_kring_csb(struct ptnet_csb_gh __user *ptr,
struct netmap_ring *shadow_ring,
uint32_t num_slots)
{