- use struct ifnet as explicit type of the argument to the

txsync() and rxsync() callbacks, removing some variables made
  useless by this change;

- add generic lock and irq handling routines. These can be useful
  in case there are no driver locks that we can reuse;

- add a few macros to reduce differences with the Linux version.
This commit is contained in:
Luigi Rizzo 2012-02-13 18:56:34 +00:00
parent d513f5b690
commit 1a26580ee8
7 changed files with 212 additions and 96 deletions

View File

@ -42,9 +42,9 @@
static void em_netmap_block_tasks(struct adapter *);
static void em_netmap_unblock_tasks(struct adapter *);
static int em_netmap_reg(struct ifnet *, int onoff);
static int em_netmap_txsync(void *, u_int, int);
static int em_netmap_rxsync(void *, u_int, int);
static void em_netmap_lock_wrapper(void *, int, u_int);
static int em_netmap_txsync(struct ifnet *, u_int, int);
static int em_netmap_rxsync(struct ifnet *, u_int, int);
static void em_netmap_lock_wrapper(struct ifnet *, int, u_int);
static void
em_netmap_attach(struct adapter *adapter)
@ -69,9 +69,9 @@ em_netmap_attach(struct adapter *adapter)
* wrapper to export locks to the generic code
*/
static void
em_netmap_lock_wrapper(void *_a, int what, u_int queueid)
em_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
{
struct adapter *adapter = _a;
struct adapter *adapter = ifp->if_softc;
ASSERT(queueid < adapter->num_queues);
switch (what) {
@ -183,9 +183,9 @@ em_netmap_reg(struct ifnet *ifp, int onoff)
* Reconcile hardware and user view of the transmit ring.
*/
static int
em_netmap_txsync(void *a, u_int ring_nr, int do_lock)
em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[ring_nr];
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->tx_rings[ring_nr];
@ -289,9 +289,9 @@ em_netmap_txsync(void *a, u_int ring_nr, int do_lock)
* Reconcile kernel and user view of the receive ring.
*/
static int
em_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->rx_rings[ring_nr];

View File

@ -38,9 +38,9 @@
#include <dev/netmap/netmap_kern.h>
static int igb_netmap_reg(struct ifnet *, int onoff);
static int igb_netmap_txsync(void *, u_int, int);
static int igb_netmap_rxsync(void *, u_int, int);
static void igb_netmap_lock_wrapper(void *, int, u_int);
static int igb_netmap_txsync(struct ifnet *, u_int, int);
static int igb_netmap_rxsync(struct ifnet *, u_int, int);
static void igb_netmap_lock_wrapper(struct ifnet *, int, u_int);
static void
@ -66,9 +66,9 @@ igb_netmap_attach(struct adapter *adapter)
* wrapper to export locks to the generic code
*/
static void
igb_netmap_lock_wrapper(void *_a, int what, u_int queueid)
igb_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
{
struct adapter *adapter = _a;
struct adapter *adapter = ifp->if_softc;
ASSERT(queueid < adapter->num_queues);
switch (what) {
@ -140,9 +140,9 @@ igb_netmap_reg(struct ifnet *ifp, int onoff)
* Reconcile kernel and user view of the transmit ring.
*/
static int
igb_netmap_txsync(void *a, u_int ring_nr, int do_lock)
igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[ring_nr];
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->tx_rings[ring_nr];
@ -258,9 +258,9 @@ igb_netmap_txsync(void *a, u_int ring_nr, int do_lock)
* Reconcile kernel and user view of the receive ring.
*/
static int
igb_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->rx_rings[ring_nr];

View File

@ -40,9 +40,9 @@
#include <dev/netmap/netmap_kern.h>
static int lem_netmap_reg(struct ifnet *, int onoff);
static int lem_netmap_txsync(void *, u_int, int);
static int lem_netmap_rxsync(void *, u_int, int);
static void lem_netmap_lock_wrapper(void *, int, u_int);
static int lem_netmap_txsync(struct ifnet *, u_int, int);
static int lem_netmap_rxsync(struct ifnet *, u_int, int);
static void lem_netmap_lock_wrapper(struct ifnet *, int, u_int);
SYSCTL_NODE(_dev, OID_AUTO, lem, CTLFLAG_RW, 0, "lem card");
@ -67,9 +67,9 @@ lem_netmap_attach(struct adapter *adapter)
static void
lem_netmap_lock_wrapper(void *_a, int what, u_int ringid)
lem_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int ringid)
{
struct adapter *adapter = _a;
struct adapter *adapter = ifp->if_softc;
/* only one ring here so ignore the ringid */
switch (what) {
@ -153,9 +153,9 @@ lem_netmap_reg(struct ifnet *ifp, int onoff)
* Reconcile kernel and user view of the transmit ring.
*/
static int
lem_netmap_txsync(void *a, u_int ring_nr, int do_lock)
lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->tx_rings[0];
struct netmap_ring *ring = kring->ring;
@ -257,9 +257,9 @@ lem_netmap_txsync(void *a, u_int ring_nr, int do_lock)
* Reconcile kernel and user view of the receive ring.
*/
static int
lem_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->rx_rings[0];
struct netmap_ring *ring = kring->ring;

View File

@ -37,9 +37,9 @@
#include <dev/netmap/netmap_kern.h>
static int re_netmap_reg(struct ifnet *, int onoff);
static int re_netmap_txsync(void *, u_int, int);
static int re_netmap_rxsync(void *, u_int, int);
static void re_netmap_lock_wrapper(void *, int, u_int);
static int re_netmap_txsync(struct ifnet *, u_int, int);
static int re_netmap_rxsync(struct ifnet *, u_int, int);
static void re_netmap_lock_wrapper(struct ifnet *, int, u_int);
static void
re_netmap_attach(struct rl_softc *sc)
@ -65,9 +65,9 @@ re_netmap_attach(struct rl_softc *sc)
* We should not use the tx/rx locks
*/
static void
re_netmap_lock_wrapper(void *_a, int what, u_int queueid)
re_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
{
struct rl_softc *adapter = _a;
struct rl_softc *adapter = ifp->if_softc;
switch (what) {
case NETMAP_CORE_LOCK:
@ -133,9 +133,9 @@ re_netmap_reg(struct ifnet *ifp, int onoff)
* Reconcile kernel and user view of the transmit ring.
*/
static int
re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct rl_softc *sc = a;
struct rl_softc *sc = ifp->if_softc;
struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
struct netmap_adapter *na = NA(sc->rl_ifp);
struct netmap_kring *kring = &na->tx_rings[ring_nr];
@ -236,9 +236,9 @@ re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
* Reconcile kernel and user view of the receive ring.
*/
static int
re_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct rl_softc *sc = a;
struct rl_softc *sc = ifp->if_softc;
struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
struct netmap_adapter *na = NA(sc->rl_ifp);
struct netmap_kring *kring = &na->rx_rings[ring_nr];

View File

@ -55,9 +55,9 @@
* *_netmap_attach() routine.
*/
static int ixgbe_netmap_reg(struct ifnet *, int onoff);
static int ixgbe_netmap_txsync(void *, u_int, int);
static int ixgbe_netmap_rxsync(void *, u_int, int);
static void ixgbe_netmap_lock_wrapper(void *, int, u_int);
static int ixgbe_netmap_txsync(struct ifnet *, u_int, int);
static int ixgbe_netmap_rxsync(struct ifnet *, u_int, int);
static void ixgbe_netmap_lock_wrapper(struct ifnet *, int, u_int);
/*
@ -90,9 +90,9 @@ ixgbe_netmap_attach(struct adapter *adapter)
* wrapper to export locks to the generic netmap code.
*/
static void
ixgbe_netmap_lock_wrapper(void *_a, int what, u_int queueid)
ixgbe_netmap_lock_wrapper(struct ifnet *_a, int what, u_int queueid)
{
struct adapter *adapter = _a;
struct adapter *adapter = _a->if_softc;
ASSERT(queueid < adapter->num_queues);
switch (what) {
@ -190,9 +190,9 @@ ixgbe_netmap_reg(struct ifnet *ifp, int onoff)
* buffers irrespective of interrupt mitigation.
*/
static int
ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct tx_ring *txr = &adapter->tx_rings[ring_nr];
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->tx_rings[ring_nr];
@ -418,9 +418,9 @@ ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
* do_lock has a special meaning: please refer to txsync.
*/
static int
ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
{
struct adapter *adapter = a;
struct adapter *adapter = ifp->if_softc;
struct rx_ring *rxr = &adapter->rx_rings[ring_nr];
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_kring *kring = &na->rx_rings[ring_nr];

View File

@ -749,7 +749,7 @@ netmap_dtor_locked(void *data)
D("deleting last netmap instance for %s", ifp->if_xname);
/*
* there is a race here with *_netmap_task() and
* netmap_poll(), which don't run under NETMAP_CORE_LOCK.
* netmap_poll(), which don't run under NETMAP_REG_LOCK.
* na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
* (aka NETMAP_DELETING(na)) are a unique marker that the
* device is dying.
@ -759,9 +759,9 @@ netmap_dtor_locked(void *data)
* should check the condition at entry and quit if
* they cannot run.
*/
na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
tsleep(na, 0, "NIOCUNREG", 4);
na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
/* Wake up any sleeping threads. netmap_poll will
* then return POLLERR
@ -803,9 +803,9 @@ netmap_dtor(void *data)
struct ifnet *ifp = priv->np_ifp;
struct netmap_adapter *na = NA(ifp);
na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
netmap_dtor_locked(data);
na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
if_rele(ifp);
bzero(priv, sizeof(*priv)); /* XXX for safety */
@ -863,7 +863,7 @@ netmap_sync_to_host(struct netmap_adapter *na)
netmap_ring_reinit(kring);
return;
}
// na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0);
// na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
/* Take packets from hwcur to cur and pass them up.
* In case of no buffers we give up. At the end of the loop,
@ -890,16 +890,15 @@ netmap_sync_to_host(struct netmap_adapter *na)
}
kring->nr_hwcur = k;
kring->nr_hwavail = ring->avail = lim;
// na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
// na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
/* send packets up, outside the lock */
while ((m = head) != NULL) {
head = head->m_nextpkt;
m->m_nextpkt = NULL;
m->m_pkthdr.rcvif = na->ifp;
if (netmap_verbose & NM_VERB_HOST)
D("sending up pkt %p size %d", m, m->m_pkthdr.len);
(na->ifp->if_input)(na->ifp, m);
D("sending up pkt %p size %d", m, MBUF_LEN(m));
NM_SEND_UP(na->ifp, m);
}
}
@ -919,7 +918,7 @@ netmap_sync_from_host(struct netmap_adapter *na, struct thread *td)
int error = 1, delta;
u_int k = ring->cur, lim = kring->nkr_num_slots;
na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0);
na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
if (k >= lim) /* bad value */
goto done;
delta = k - kring->nr_hwcur;
@ -936,7 +935,7 @@ netmap_sync_from_host(struct netmap_adapter *na, struct thread *td)
if (k && (netmap_verbose & NM_VERB_HOST))
D("%d pkts from stack", k);
done:
na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
if (error)
netmap_ring_reinit(kring);
}
@ -1028,7 +1027,6 @@ netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
{
struct ifnet *ifp = priv->np_ifp;
struct netmap_adapter *na = NA(ifp);
void *adapter = na->ifp->if_softc; /* shorthand */
u_int i = ringid & NETMAP_RING_MASK;
/* first time we don't lock */
int need_lock = (priv->np_qfirst != priv->np_qlast);
@ -1038,7 +1036,7 @@ netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
return (EINVAL);
}
if (need_lock)
na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
priv->np_ringid = ringid;
if (ringid & NETMAP_SW_RING) {
priv->np_qfirst = na->num_queues;
@ -1052,7 +1050,7 @@ netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
}
priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
if (need_lock)
na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
if (ringid & NETMAP_SW_RING)
D("ringid %s set to SW RING", ifp->if_xname);
else if (ringid & NETMAP_HW_RING)
@ -1085,7 +1083,6 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
struct ifnet *ifp;
struct nmreq *nmr = (struct nmreq *) data;
struct netmap_adapter *na;
void *adapter;
int error;
u_int i;
struct netmap_if *nifp;
@ -1127,7 +1124,6 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
if (error)
break;
na = NA(ifp); /* retrieve netmap adapter */
adapter = na->ifp->if_softc; /* shorthand */
/*
* Allocate the private per-thread structure.
* XXX perhaps we can use a blocking malloc ?
@ -1141,10 +1137,10 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
}
for (i = 10; i > 0; i--) {
na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
if (!NETMAP_DELETING(na))
break;
na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
tsleep(na, 0, "NIOCREGIF", hz/10);
}
if (i == 0) {
@ -1175,14 +1171,14 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
if (error) { /* reg. failed, release priv and ref */
error:
na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
if_rele(ifp); /* return the refcount */
bzero(priv, sizeof(*priv));
free(priv, M_DEVBUF);
break;
}
na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
error = devfs_set_cdevpriv(priv, netmap_dtor);
if (error != 0) {
@ -1219,7 +1215,6 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
}
ifp = priv->np_ifp; /* we have a reference */
na = NA(ifp); /* retrieve netmap adapter */
adapter = ifp->if_softc; /* shorthand */
if (priv->np_qfirst == na->num_queues) {
/* queues to/from host */
@ -1237,13 +1232,13 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
D("sync tx ring %d cur %d hwcur %d",
i, kring->ring->cur,
kring->nr_hwcur);
na->nm_txsync(adapter, i, 1 /* do lock */);
na->nm_txsync(ifp, i, 1 /* do lock */);
if (netmap_verbose & NM_VERB_TXSYNC)
D("after sync tx ring %d cur %d hwcur %d",
i, kring->ring->cur,
kring->nr_hwcur);
} else {
na->nm_rxsync(adapter, i, 1 /* do lock */);
na->nm_rxsync(ifp, i, 1 /* do lock */);
microtime(&na->rx_rings[i].ring->ts);
}
}
@ -1297,7 +1292,6 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
struct ifnet *ifp;
struct netmap_kring *kring;
u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
void *adapter;
enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
@ -1313,7 +1307,6 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
want_tx = events & (POLLOUT | POLLWRNORM);
want_rx = events & (POLLIN | POLLRDNORM);
adapter = ifp->if_softc;
na = NA(ifp); /* retrieve netmap adapter */
/* how many queues we are scanning */
@ -1411,16 +1404,16 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
if (!want_tx && kring->ring->cur == kring->nr_hwcur)
continue;
if (core_lock == NEED_CL) {
na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
core_lock = LOCKED_CL;
}
if (na->separate_locks)
na->nm_lock(adapter, NETMAP_TX_LOCK, i);
na->nm_lock(ifp, NETMAP_TX_LOCK, i);
if (netmap_verbose & NM_VERB_TXSYNC)
D("send %d on %s %d",
kring->ring->cur,
ifp->if_xname, i);
if (na->nm_txsync(adapter, i, 0 /* no lock */))
if (na->nm_txsync(ifp, i, 0 /* no lock */))
revents |= POLLERR;
/* Check avail/call selrecord only if called with POLLOUT */
@ -1435,7 +1428,7 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
selrecord(td, &kring->si);
}
if (na->separate_locks)
na->nm_lock(adapter, NETMAP_TX_UNLOCK, i);
na->nm_lock(ifp, NETMAP_TX_UNLOCK, i);
}
}
@ -1447,13 +1440,13 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
kring = &na->rx_rings[i];
if (core_lock == NEED_CL) {
na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
core_lock = LOCKED_CL;
}
if (na->separate_locks)
na->nm_lock(adapter, NETMAP_RX_LOCK, i);
na->nm_lock(ifp, NETMAP_RX_LOCK, i);
if (na->nm_rxsync(adapter, i, 0 /* no lock */))
if (na->nm_rxsync(ifp, i, 0 /* no lock */))
revents |= POLLERR;
if (netmap_no_timestamp == 0 ||
kring->ring->flags & NR_TIMESTAMP) {
@ -1465,7 +1458,7 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
else if (!check_all)
selrecord(td, &kring->si);
if (na->separate_locks)
na->nm_lock(adapter, NETMAP_RX_UNLOCK, i);
na->nm_lock(ifp, NETMAP_RX_UNLOCK, i);
}
}
if (check_all && revents == 0) {
@ -1476,13 +1469,55 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
selrecord(td, &na->rx_rings[i].si);
}
if (core_lock == LOCKED_CL)
na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
return (revents);
}
/*------- driver support routines ------*/
/*
* default lock wrapper. On linux we use mostly netmap-specific locks.
*/
static void
netmap_lock_wrapper(struct ifnet *_a, int what, u_int queueid)
{
struct netmap_adapter *na = NA(_a);
switch (what) {
#ifndef __FreeBSD__ /* some system do not need lock on register */
case NETMAP_REG_LOCK:
case NETMAP_REG_UNLOCK:
break;
#endif
case NETMAP_CORE_LOCK:
mtx_lock(&na->core_lock);
break;
case NETMAP_CORE_UNLOCK:
mtx_unlock(&na->core_lock);
break;
case NETMAP_TX_LOCK:
mtx_lock(&na->tx_rings[queueid].q_lock);
break;
case NETMAP_TX_UNLOCK:
mtx_unlock(&na->tx_rings[queueid].q_lock);
break;
case NETMAP_RX_LOCK:
mtx_lock(&na->rx_rings[queueid].q_lock);
break;
case NETMAP_RX_UNLOCK:
mtx_unlock(&na->rx_rings[queueid].q_lock);
break;
}
}
/*
* Initialize a ``netmap_adapter`` object created by driver on attach.
* We allocate a block of memory with room for a struct netmap_adapter
@ -1500,6 +1535,7 @@ netmap_attach(struct netmap_adapter *na, int num_queues)
int size = sizeof(*na) + 2 * n * sizeof(struct netmap_kring);
void *buf;
struct ifnet *ifp = na->ifp;
int i;
if (ifp == NULL) {
D("ifp not set, giving up");
@ -1516,6 +1552,15 @@ netmap_attach(struct netmap_adapter *na, int num_queues)
na->buff_size = NETMAP_BUF_SIZE;
bcopy(na, buf, sizeof(*na));
ifp->if_capabilities |= IFCAP_NETMAP;
na = buf;
if (na->nm_lock == NULL)
na->nm_lock = netmap_lock_wrapper;
mtx_init(&na->core_lock, "netmap core lock", NULL, MTX_DEF);
for (i = 0 ; i < num_queues; i++)
mtx_init(&na->tx_rings[i].q_lock, "netmap txq lock", NULL, MTX_DEF);
for (i = 0 ; i < num_queues; i++)
mtx_init(&na->rx_rings[i].q_lock, "netmap rxq lock", NULL, MTX_DEF);
}
D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
@ -1556,14 +1601,14 @@ netmap_start(struct ifnet *ifp, struct mbuf *m)
{
struct netmap_adapter *na = NA(ifp);
struct netmap_kring *kring = &na->rx_rings[na->num_queues];
u_int i, len = m->m_pkthdr.len;
u_int i, len = MBUF_LEN(m);
int error = EBUSY, lim = kring->nkr_num_slots - 1;
struct netmap_slot *slot;
if (netmap_verbose & NM_VERB_HOST)
D("%s packet %d len %d from the stack", ifp->if_xname,
kring->nr_hwcur + kring->nr_hwavail, len);
na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
if (kring->nr_hwavail >= lim) {
D("stack ring %s full\n", ifp->if_xname);
goto done; /* no space */
@ -1586,7 +1631,7 @@ netmap_start(struct ifnet *ifp, struct mbuf *m)
selwakeuppri(&kring->si, PI_NET);
error = 0;
done:
na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
/* release the mbuf in either cases of success or failure. As an
* alternative, put the mbuf in a free list and free the list
@ -1644,6 +1689,48 @@ netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
}
/*
* Default functions to handle rx/tx interrupts
* we have 4 cases:
* 1 ring, single lock:
* lock(core); wake(i=0); unlock(core)
* N rings, single lock:
* lock(core); wake(i); wake(N+1) unlock(core)
* 1 ring, separate locks: (i=0)
* lock(i); wake(i); unlock(i)
* N rings, separate locks:
* lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
*/
int netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
{
struct netmap_adapter *na;
struct netmap_kring *r;
if (!(ifp->if_capenable & IFCAP_NETMAP))
return 0;
na = NA(ifp);
r = work_done ? na->rx_rings : na->tx_rings;
if (na->separate_locks) {
mtx_lock(&r[q].q_lock);
selwakeuppri(&r[q].si, PI_NET);
mtx_unlock(&r[q].q_lock);
if (na->num_queues > 1) {
mtx_lock(&na->core_lock);
selwakeuppri(&r[na->num_queues + 1].si, PI_NET);
mtx_unlock(&na->core_lock);
}
} else {
mtx_lock(&na->core_lock);
selwakeuppri(&r[q].si, PI_NET);
if (na->num_queues > 1)
selwakeuppri(&r[na->num_queues + 1].si, PI_NET);
mtx_unlock(&na->core_lock);
}
if (work_done)
*work_done = 1; /* do not fire napi again */
return 1;
}
/*
* Module loader.
*

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
* Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -34,6 +34,20 @@
#ifndef _NET_NETMAP_KERN_H_
#define _NET_NETMAP_KERN_H_
#if defined(__FreeBSD__)
#define NM_LOCK_T struct mtx
#define NM_SELINFO_T struct selinfo
#define MBUF_LEN(m) ((m)->m_pkthdr.len)
#define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m)
#elif defined (__linux__)
#define NM_LOCK_T spinlock_t
#define NM_SELINFO_T wait_queue_head_t
#define MBUF_LEN(m) ((m)->len)
#define NM_SEND_UP(ifp, m) netif_rx(m)
#else
#error unsupported platform
#endif
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_NETMAP);
#endif
@ -53,11 +67,10 @@ struct netmap_adapter;
/*
* private, kernel view of a ring.
*
* XXX 20110627-todo
* The index in the NIC and netmap ring is offset by nkr_hwofs slots.
* The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
* This is so that, on a reset, buffers owned by userspace are not
* modified by the kernel. In particular:
* RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides
* RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with
* the next empty buffer as known by the hardware (next_to_check or so).
* TX rings: hwcur + hwofs coincides with next_to_send
*/
@ -70,12 +83,13 @@ struct netmap_kring {
u_int nkr_num_slots;
int nkr_hwofs; /* offset between NIC and netmap ring */
struct netmap_adapter *na; // debugging
struct selinfo si; /* poll/select wait queue */
struct netmap_adapter *na;
NM_SELINFO_T si; /* poll/select wait queue */
NM_LOCK_T q_lock; /* used if no device lock available */
} __attribute__((__aligned__(64)));
/*
* This struct is part of and extends the 'struct adapter' (or
* This struct extends the 'struct adapter' (or
* equivalent) device descriptor. It contains all fields needed to
* support netmap operation.
*/
@ -93,9 +107,9 @@ struct netmap_adapter {
u_int num_tx_desc; /* number of descriptor in each queue */
u_int num_rx_desc;
u_int buff_size;
u_int buff_size;
u_int flags;
//u_int flags; // XXX unused
/* tx_rings and rx_rings are private but allocated
* as a contiguous chunk of memory. Each array has
* N+1 entries, for the adapter queues and for the host queue.
@ -107,7 +121,7 @@ struct netmap_adapter {
* packets from the network stack when netmap is active.
* XXX probably if_qflush is not necessary.
*/
void (*if_qflush)(struct ifnet *);
//void (*if_qflush)(struct ifnet *); // XXX unused
int (*if_transmit)(struct ifnet *, struct mbuf *);
/* references to the ifnet and device routines, used by
@ -115,10 +129,12 @@ struct netmap_adapter {
*/
struct ifnet *ifp; /* adapter is ifp->if_softc */
NM_LOCK_T core_lock; /* used if no device lock available */
int (*nm_register)(struct ifnet *, int onoff);
void (*nm_lock)(void *, int what, u_int ringid);
int (*nm_txsync)(void *, u_int ring, int lock);
int (*nm_rxsync)(void *, u_int ring, int lock);
void (*nm_lock)(struct ifnet *, int what, u_int ringid);
int (*nm_txsync)(struct ifnet *, u_int ring, int lock);
int (*nm_rxsync)(struct ifnet *, u_int ring, int lock);
};
/*
@ -144,6 +160,12 @@ enum {
NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK,
NETMAP_TX_LOCK, NETMAP_TX_UNLOCK,
NETMAP_RX_LOCK, NETMAP_RX_UNLOCK,
#ifdef __FreeBSD__
#define NETMAP_REG_LOCK NETMAP_CORE_LOCK
#define NETMAP_REG_UNLOCK NETMAP_CORE_UNLOCK
#else
NETMAP_REG_LOCK, NETMAP_REG_UNLOCK
#endif
};
/*
@ -251,4 +273,11 @@ PNMB(struct netmap_slot *slot, uint64_t *pp)
return ret;
}
/* default functions to handle rx/tx interrupts */
int netmap_rx_irq(struct ifnet *, int, int *);
#define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
#ifdef __linux__
#define bus_dmamap_sync(_a, _b, _c) // wmb() or rmb() ?
netdev_tx_t netmap_start_linux(struct sk_buff *skb, struct net_device *dev);
#endif
#endif /* _NET_NETMAP_KERN_H_ */