A bit of cleanup in the names of fields of netmap-related structures.

Use the name 'ring' instead of 'queue' in all fields.
Bump NETMAP_API.
This commit is contained in:
Luigi Rizzo 2012-04-13 16:03:07 +00:00
parent 82d2fe1069
commit d76bf4ff7b
7 changed files with 92 additions and 58 deletions

View File

@ -63,6 +63,9 @@
* This is tricky, much better to use TDH for now.
*/
SYSCTL_DECL(_dev_netmap);
static int ix_write_len;
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_write_len,
CTLFLAG_RW, &ix_write_len, 0, "write rx len");
static int ix_rx_miss, ix_rx_miss_bufs, ix_use_dd, ix_crcstrip;
SYSCTL_INT(_dev_netmap, OID_AUTO, ix_crcstrip,
CTLFLAG_RW, &ix_crcstrip, 0, "strip CRC on rx frames");
@ -121,6 +124,9 @@ set_crcstrip(struct ixgbe_hw *hw, int onoff)
hl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
if (netmap_verbose)
D("%s read HLREG 0x%x rxc 0x%x",
onoff ? "enter" : "exit", hl, rxc);
/* hw requirements ... */
rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
rxc |= IXGBE_RDRXCTL_RSCACKC;
@ -133,6 +139,9 @@ set_crcstrip(struct ixgbe_hw *hw, int onoff)
hl |= IXGBE_HLREG0_RXCRCSTRP;
rxc |= IXGBE_RDRXCTL_CRCSTRIP;
}
if (netmap_verbose)
D("%s write HLREG 0x%x rxc 0x%x",
onoff ? "enter" : "exit", hl, rxc);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl);
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc);
}
@ -479,7 +488,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* of CRCSTRIP. The data sheets say differently.
* Very strange.
*/
int crclen = 0; // ix_crcstrip ? 0 : 4;
int crclen = ix_crcstrip ? 0 : 4;
l = rxr->next_to_check;
j = netmap_idx_n2k(kring, l);
@ -490,6 +499,8 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
break;
ring->slot[j].len = le16toh(curr->wb.upper.length) - crclen;
if (ix_write_len)
D("rx[%d] len %d", j, ring->slot[j].len);
bus_dmamap_sync(rxr->ptag,
rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
j = (j == lim) ? 0 : j + 1;

View File

@ -173,21 +173,21 @@ netmap_dtor_locked(void *data)
/* Wake up any sleeping threads. netmap_poll will
* then return POLLERR
*/
for (i = 0; i < na->num_tx_queues + 1; i++)
for (i = 0; i < na->num_tx_rings + 1; i++)
selwakeuppri(&na->tx_rings[i].si, PI_NET);
for (i = 0; i < na->num_rx_queues + 1; i++)
for (i = 0; i < na->num_rx_rings + 1; i++)
selwakeuppri(&na->rx_rings[i].si, PI_NET);
selwakeuppri(&na->tx_si, PI_NET);
selwakeuppri(&na->rx_si, PI_NET);
/* release all buffers */
NMA_LOCK();
for (i = 0; i < na->num_tx_queues + 1; i++) {
for (i = 0; i < na->num_tx_rings + 1; i++) {
struct netmap_ring *ring = na->tx_rings[i].ring;
lim = na->tx_rings[i].nkr_num_slots;
for (j = 0; j < lim; j++)
netmap_free_buf(nifp, ring->slot[j].buf_idx);
}
for (i = 0; i < na->num_rx_queues + 1; i++) {
for (i = 0; i < na->num_rx_rings + 1; i++) {
struct netmap_ring *ring = na->rx_rings[i].ring;
lim = na->rx_rings[i].nkr_num_slots;
for (j = 0; j < lim; j++)
@ -260,7 +260,7 @@ netmap_mmap(__unused struct cdev *dev,
static void
netmap_sync_to_host(struct netmap_adapter *na)
{
struct netmap_kring *kring = &na->tx_rings[na->num_tx_queues];
struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
struct netmap_ring *ring = kring->ring;
struct mbuf *head = NULL, *tail = NULL, *m;
u_int k, n, lim = kring->nkr_num_slots - 1;
@ -320,7 +320,7 @@ netmap_sync_to_host(struct netmap_adapter *na)
static void
netmap_sync_from_host(struct netmap_adapter *na, struct thread *td)
{
struct netmap_kring *kring = &na->rx_rings[na->num_rx_queues];
struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
struct netmap_ring *ring = kring->ring;
u_int j, n, lim = kring->nkr_num_slots;
u_int k = ring->cur, resvd = ring->reserved;
@ -415,7 +415,7 @@ netmap_ring_reinit(struct netmap_kring *kring)
}
if (errors) {
int pos = kring - kring->na->tx_rings;
int n = kring->na->num_tx_queues + 1;
int n = kring->na->num_tx_rings + 1;
D("total %d errors", errors);
errors++;
@ -443,10 +443,10 @@ netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
u_int i = ringid & NETMAP_RING_MASK;
/* initially (np_qfirst == np_qlast) we don't want to lock */
int need_lock = (priv->np_qfirst != priv->np_qlast);
int lim = na->num_rx_queues;
int lim = na->num_rx_rings;
if (na->num_tx_queues > lim)
lim = na->num_tx_queues;
if (na->num_tx_rings > lim)
lim = na->num_tx_rings;
if ( (ringid & NETMAP_HW_RING) && i >= lim) {
D("invalid ring id %d", i);
return (EINVAL);
@ -531,8 +531,8 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
if (error)
break;
na = NA(ifp); /* retrieve netmap_adapter */
nmr->nr_rx_rings = na->num_rx_queues;
nmr->nr_tx_rings = na->num_tx_queues;
nmr->nr_rx_rings = na->num_rx_rings;
nmr->nr_tx_rings = na->num_tx_rings;
nmr->nr_rx_slots = na->num_rx_desc;
nmr->nr_tx_slots = na->num_tx_desc;
if_rele(ifp); /* return the refcount */
@ -619,8 +619,8 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
}
/* return the offset of the netmap_if object */
nmr->nr_rx_rings = na->num_rx_queues;
nmr->nr_tx_rings = na->num_tx_queues;
nmr->nr_rx_rings = na->num_rx_rings;
nmr->nr_tx_rings = na->num_tx_rings;
nmr->nr_rx_slots = na->num_rx_desc;
nmr->nr_tx_slots = na->num_tx_desc;
nmr->nr_memsize = nm_mem->nm_totalsize;
@ -657,7 +657,7 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
lim = priv->np_qlast;
if (lim == NETMAP_HW_RING)
lim = (cmd == NIOCTXSYNC) ?
na->num_tx_queues : na->num_rx_queues;
na->num_tx_rings : na->num_rx_rings;
for (i = priv->np_qfirst; i < lim; i++) {
if (cmd == NIOCTXSYNC) {
@ -742,8 +742,8 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
na = NA(ifp); /* retrieve netmap adapter */
lim_tx = na->num_tx_queues;
lim_rx = na->num_rx_queues;
lim_tx = na->num_tx_rings;
lim_rx = na->num_rx_rings;
/* how many queues we are scanning */
if (priv->np_qfirst == NETMAP_SW_RING) {
if (priv->np_txpoll || want_tx) {
@ -965,7 +965,7 @@ netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
* kring N+1 is only used for the selinfo for all queues.
* Return 0 on success, ENOMEM otherwise.
*
* na->num_tx_queues can be set for cards with different tx/rx setups
* na->num_tx_rings can be set for cards with different tx/rx setups
*/
int
netmap_attach(struct netmap_adapter *na, int num_queues)
@ -980,22 +980,21 @@ netmap_attach(struct netmap_adapter *na, int num_queues)
}
/* clear other fields ? */
na->refcount = 0;
if (na->num_tx_queues == 0)
na->num_tx_queues = num_queues;
na->num_rx_queues = num_queues;
if (na->num_tx_rings == 0)
na->num_tx_rings = num_queues;
na->num_rx_rings = num_queues;
/* on each direction we have N+1 resources
* 0..n-1 are the hardware rings
* n is the ring attached to the stack.
*/
n = na->num_rx_queues + na->num_tx_queues + 2;
n = na->num_rx_rings + na->num_tx_rings + 2;
size = sizeof(*na) + n * sizeof(struct netmap_kring);
buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (buf) {
WNA(ifp) = buf;
na->tx_rings = (void *)((char *)buf + sizeof(*na));
na->rx_rings = na->tx_rings + na->num_tx_queues + 1;
na->buff_size = NETMAP_BUF_SIZE;
na->rx_rings = na->tx_rings + na->num_tx_rings + 1;
bcopy(na, buf, sizeof(*na));
ifp->if_capabilities |= IFCAP_NETMAP;
@ -1003,9 +1002,9 @@ netmap_attach(struct netmap_adapter *na, int num_queues)
if (na->nm_lock == NULL)
na->nm_lock = netmap_lock_wrapper;
mtx_init(&na->core_lock, "netmap core lock", NULL, MTX_DEF);
for (i = 0 ; i < na->num_tx_queues + 1; i++)
for (i = 0 ; i < na->num_tx_rings + 1; i++)
mtx_init(&na->tx_rings[i].q_lock, "netmap txq lock", NULL, MTX_DEF);
for (i = 0 ; i < na->num_rx_queues + 1; i++)
for (i = 0 ; i < na->num_rx_rings + 1; i++)
mtx_init(&na->rx_rings[i].q_lock, "netmap rxq lock", NULL, MTX_DEF);
}
#ifdef linux
@ -1033,11 +1032,11 @@ netmap_detach(struct ifnet *ifp)
if (!na)
return;
for (i = 0; i < na->num_tx_queues + 1; i++) {
for (i = 0; i < na->num_tx_rings + 1; i++) {
knlist_destroy(&na->tx_rings[i].si.si_note);
mtx_destroy(&na->tx_rings[i].q_lock);
}
for (i = 0; i < na->num_rx_queues + 1; i++) {
for (i = 0; i < na->num_rx_rings + 1; i++) {
knlist_destroy(&na->rx_rings[i].si.si_note);
mtx_destroy(&na->rx_rings[i].q_lock);
}
@ -1058,7 +1057,7 @@ int
netmap_start(struct ifnet *ifp, struct mbuf *m)
{
struct netmap_adapter *na = NA(ifp);
struct netmap_kring *kring = &na->rx_rings[na->num_rx_queues];
struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
u_int i, len = MBUF_LEN(m);
int error = EBUSY, lim = kring->nkr_num_slots - 1;
struct netmap_slot *slot;
@ -1085,7 +1084,7 @@ netmap_start(struct ifnet *ifp, struct mbuf *m)
slot->len = len;
kring->nr_hwavail++;
if (netmap_verbose & NM_VERB_HOST)
D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_queues);
D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings);
selwakeuppri(&kring->si, PI_NET);
error = 0;
done:
@ -1174,10 +1173,10 @@ netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
if (work_done) { /* RX path */
r = na->rx_rings + q;
r->nr_kflags |= NKR_PENDINTR;
main_wq = (na->num_rx_queues > 1) ? &na->rx_si : NULL;
main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL;
} else { /* tx path */
r = na->tx_rings + q;
main_wq = (na->num_tx_queues > 1) ? &na->tx_si : NULL;
main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL;
work_done = &q; /* dummy */
}
if (na->separate_locks) {

View File

@ -34,6 +34,8 @@
#ifndef _NET_NETMAP_KERN_H_
#define _NET_NETMAP_KERN_H_
#define NETMAP_MEM2 // use the new memory allocator
#if defined(__FreeBSD__)
#define NM_LOCK_T struct mtx
#define NM_SELINFO_T struct selinfo
@ -104,20 +106,25 @@ struct netmap_adapter {
int refcount; /* number of user-space descriptors using this
interface, which is equal to the number of
struct netmap_if objs in the mapped region. */
/*
* The selwakeup in the interrupt thread can use per-ring
* and/or global wait queues. We track how many clients
* of each type we have so we can optimize the drivers,
* and especially avoid huge contention on the locks.
*/
int na_single; /* threads attached to a single hw queue */
int na_multi; /* threads attached to multiple hw queues */
int separate_locks; /* set if the interface suports different
locks for rx, tx and core. */
u_int num_rx_queues; /* number of tx/rx queue pairs: this is
a duplicate field needed to simplify the
signature of ``netmap_detach``. */
u_int num_tx_queues; // if nonzero, overrides num_queues XXX
u_int num_rx_rings; /* number of tx/rx ring pairs */
u_int num_tx_rings; // if nonzero, overrides num_rx_rings
u_int num_tx_desc; /* number of descriptor in each queue */
u_int num_rx_desc;
u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE
//u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE
//u_int flags; // XXX unused
/* tx_rings and rx_rings are private but allocated
* as a contiguous chunk of memory. Each array has
* N+1 entries, for the adapter queues and for the host queue.
@ -129,9 +136,7 @@ struct netmap_adapter {
/* copy of if_qflush and if_transmit pointers, to intercept
* packets from the network stack when netmap is active.
* XXX probably if_qflush is not necessary.
*/
//void (*if_qflush)(struct ifnet *); // XXX unused
int (*if_transmit)(struct ifnet *, struct mbuf *);
/* references to the ifnet and device routines, used by
@ -294,6 +299,21 @@ netmap_idx_k2n(struct netmap_kring *kr, int idx)
}
#ifdef NETMAP_MEM2
/* Entries of the look-up table. */
struct lut_entry {
void *vaddr; /* virtual address. */
vm_paddr_t paddr; /* phisical address. */
};
struct netmap_obj_pool;
extern struct lut_entry *netmap_buffer_lut;
#define NMB_VA(i) (netmap_buffer_lut[i].vaddr)
#define NMB_PA(i) (netmap_buffer_lut[i].paddr)
#else /* NETMAP_MEM1 */
#define NMB_VA(i) (netmap_buffer_base + (i * NETMAP_BUF_SIZE) )
#endif /* NETMAP_MEM2 */
/*
* NMB return the virtual address of a buffer (buffer 0 on bad index)
* PNMB also fills the physical address
@ -302,17 +322,19 @@ static inline void *
NMB(struct netmap_slot *slot)
{
uint32_t i = slot->buf_idx;
return (i >= netmap_total_buffers) ? netmap_buffer_base :
netmap_buffer_base + (i * NETMAP_BUF_SIZE);
return (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
}
static inline void *
PNMB(struct netmap_slot *slot, uint64_t *pp)
{
uint32_t i = slot->buf_idx;
void *ret = (i >= netmap_total_buffers) ? netmap_buffer_base :
netmap_buffer_base + (i * NETMAP_BUF_SIZE);
void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i);
#ifdef NETMAP_MEM2
*pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i);
#else
*pp = vtophys(ret);
#endif
return ret;
}

View File

@ -280,8 +280,8 @@ netmap_if_new(const char *ifname, struct netmap_adapter *na)
struct netmap_kring *kring;
char *buff;
u_int i, len, ofs, numdesc;
u_int nrx = na->num_rx_queues + 1; /* shorthand, include stack queue */
u_int ntx = na->num_tx_queues + 1; /* shorthand, include stack queue */
u_int nrx = na->num_rx_rings + 1; /* shorthand, include stack queue */
u_int ntx = na->num_tx_rings + 1; /* shorthand, include stack queue */
/*
* the descriptor is followed inline by an array of offsets
@ -293,8 +293,8 @@ netmap_if_new(const char *ifname, struct netmap_adapter *na)
return (NULL);
/* initialize base fields */
*(int *)(uintptr_t)&nifp->ni_rx_queues = na->num_rx_queues;
*(int *)(uintptr_t)&nifp->ni_tx_queues = na->num_tx_queues;
*(int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
*(int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
strncpy(nifp->ni_name, ifname, IFNAMSIZ);
(na->refcount)++; /* XXX atomic ? we are under lock */

View File

@ -83,8 +83,8 @@
/ | cur |
NETMAP_IF (nifp, one per file desc.) / | avail |
+---------------+ / | buf_ofs |
| ni_num_queues | / +=============+
| | / | buf_idx | slot[0]
| ni_tx_rings | / +=============+
| ni_rx_rings | / | buf_idx | slot[0]
| | / | len, flags |
| | / +-------------+
+===============+ / | buf_idx | slot[1]
@ -221,8 +221,8 @@ struct netmap_ring {
struct netmap_if {
char ni_name[IFNAMSIZ]; /* name of the interface. */
const u_int ni_version; /* API version, currently unused */
const u_int ni_rx_queues; /* number of rx queue pairs */
const u_int ni_tx_queues; /* if zero, same as ni_tx_queues */
const u_int ni_rx_rings; /* number of rx rings */
const u_int ni_tx_rings; /* if zero, same as ni_rx_rings */
/*
* The following array contains the offset of each netmap ring
* from this structure. The first ni_tx_queues+1 entries refer
@ -257,7 +257,7 @@ struct netmap_if {
struct nmreq {
char nr_name[IFNAMSIZ];
uint32_t nr_version; /* API version */
#define NETMAP_API 2 /* current version */
#define NETMAP_API 3 /* current version */
uint32_t nr_offset; /* nifp offset in the shared region */
uint32_t nr_memsize; /* size of the shared region */
uint32_t nr_tx_slots; /* slots in tx rings */

View File

@ -70,7 +70,7 @@
#define NETMAP_RXRING(nifp, index) \
((struct netmap_ring *)((char *)(nifp) + \
(nifp)->ring_ofs[index + (nifp)->ni_tx_queues+1] ) )
(nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] ) )
#define NETMAP_BUF(ring, index) \
((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size))

View File

@ -300,7 +300,7 @@ howmany(struct my_ring *me, int tx)
if (0 && verbose && tot && !tx)
D("ring %s %s %s has %d avail at %d",
me->ifname, tx ? "tx": "rx",
me->end > me->nifp->ni_rx_queues ?
me->end >= me->nifp->ni_tx_rings ? // XXX who comes first ?
"host":"net",
tot, NETMAP_TXRING(me->nifp, me->begin)->cur);
return tot;
@ -361,6 +361,8 @@ main(int argc, char **argv)
}
}
argc -= optind;
argv += optind;
if (argc > 1)
ifa = argv[1];
@ -440,8 +442,8 @@ main(int argc, char **argv)
D("Wait %d secs for link to come up...", wait_link);
sleep(wait_link);
D("Ready to go, %s 0x%x/%d <-> %s 0x%x/%d.",
me[0].ifname, me[0].queueid, me[0].nifp->ni_rx_queues,
me[1].ifname, me[1].queueid, me[1].nifp->ni_rx_queues);
me[0].ifname, me[0].queueid, me[0].nifp->ni_rx_rings,
me[1].ifname, me[1].queueid, me[1].nifp->ni_rx_rings);
/* main loop */
signal(SIGINT, sigint_h);