sync the code with the version in head. which the exception of

svn 275358 (M_FLOWID deprecation, only a couple of lines)
which cannot be merged.

if_lem_netmap.h, if_re_netmap.h:
- use the same (commented out) function to update the stat counters
  as in HEAD. This is a no-op here

netmap.c
- merge 274459 (support for private knote lock)
  and minor changes on nm_config and comments

netmap_freebsd.c
- merge 274459 (support for private knote lock)
- merge 274354 (initialize color if passed as argument)

netmap_generic.c
- fix a comment

netmap_kern.h
- revise the lock macros, using sx locks;
  merge 274459 (private knote lock)

netmap_monitor.c
- use full memory barriers

netmap_pipe.c
- use full memory barriers, use length from the correct queue
  (mostly cosmetic, since the queues typically have the same size)
This commit is contained in:
luigi 2015-02-14 19:41:26 +00:00
parent c8630fd03e
commit d6e510de09
8 changed files with 93 additions and 46 deletions

View File

@ -410,7 +410,7 @@ lem_netmap_rxsync(struct netmap_kring *kring, int flags)
netmap_idx_n2k(kring, adapter->next_rx_desc_to_check),
kring->nr_hwtail);
adapter->next_rx_desc_to_check = nic_i;
// ifp->if_ipackets += n;
// if_inc_counter(ifp, IFCOUNTER_IPACKETS, n);
kring->nr_hwtail = nm_i;
}
kring->nr_kflags &= ~NKR_PENDINTR;

View File

@ -222,7 +222,7 @@ re_netmap_rxsync(struct netmap_kring *kring, int flags)
/* sync was in re_newbuf() */
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
// sc->rl_ifp->if_ipackets++;
// if_inc_counter(sc->rl_ifp, IFCOUNTER_IPACKETS, 1);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}

View File

@ -375,9 +375,14 @@ ports attached to the switch)
/* reduce conditional code */
// linux API, use for the knlist in FreeBSD
#define init_waitqueue_head(x) knlist_init_mtx(&(x)->si_note, NULL)
/* use a private mutex for the knlist */
#define init_waitqueue_head(x) do { \
struct mtx *m = &(x)->m; \
mtx_init(m, "nm_kn_lock", NULL, MTX_DEF); \
knlist_init_mtx(&(x)->si.si_note, m); \
} while (0)
void freebsd_selwakeup(struct selinfo *si, int pri);
#define OS_selrecord(a, b) selrecord(a, &((b)->si))
#define OS_selwakeup(a, b) freebsd_selwakeup(a, b)
#elif defined(linux)
@ -651,9 +656,8 @@ netmap_update_config(struct netmap_adapter *na)
u_int txr, txd, rxr, rxd;
txr = txd = rxr = rxd = 0;
if (na->nm_config) {
na->nm_config(na, &txr, &txd, &rxr, &rxd);
} else {
if (na->nm_config == NULL ||
na->nm_config(na, &txr, &txd, &rxr, &rxd)) {
/* take whatever we had at init time */
txr = na->num_tx_rings;
txd = na->num_tx_desc;
@ -806,6 +810,19 @@ netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
}
#ifdef __FreeBSD__
static void
netmap_knlist_destroy(NM_SELINFO_T *si)
{
/* XXX kqueue(9) needed; these will mirror knlist_init. */
knlist_delete(&si->si.si_note, curthread, 0 /* not locked */ );
knlist_destroy(&si->si.si_note);
/* now we don't need the mutex anymore */
mtx_destroy(&si->m);
}
#endif /* __FreeBSD__ */
/* undo the actions performed by netmap_krings_create */
/* call with NMG_LOCK held */
void
@ -816,6 +833,7 @@ netmap_krings_delete(struct netmap_adapter *na)
/* we rely on the krings layout described above */
for ( ; kring != na->tailroom; kring++) {
mtx_destroy(&kring->q_lock);
netmap_knlist_destroy(&kring->si);
}
free(na->tx_rings, M_DEVBUF);
na->tx_rings = na->rx_rings = na->tailroom = NULL;
@ -996,9 +1014,8 @@ netmap_do_unregif(struct netmap_priv_d *priv, struct netmap_if *nifp)
* XXX The wake up now must happen during *_down(), when
* we order all activities to stop. -gl
*/
/* XXX kqueue(9) needed; these will mirror knlist_init. */
/* knlist_destroy(&na->tx_si.si_note); */
/* knlist_destroy(&na->rx_si.si_note); */
netmap_knlist_destroy(&na->tx_si);
netmap_knlist_destroy(&na->rx_si);
/* delete rings and buffers */
netmap_mem_rings_delete(na);
@ -1310,7 +1327,7 @@ netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwai
/* access copies of cur,tail in the kring */
if (kring->rcur == kring->rtail && td) /* no bufs available */
selrecord(td, &kring->si);
OS_selrecord(td, &kring->si);
mbq_unlock(q);
return ret;
@ -2150,7 +2167,7 @@ netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
error = ENXIO;
break;
}
rmb(); /* make sure following reads are not from cache */
mb(); /* make sure following reads are not from cache */
na = priv->np_na; /* we have a reference */
@ -2410,7 +2427,7 @@ flush_tx:
}
}
if (want_tx && retry_tx && !is_kevent) {
selrecord(td, check_all_tx ?
OS_selrecord(td, check_all_tx ?
&na->tx_si : &na->tx_rings[priv->np_txqfirst].si);
retry_tx = 0;
goto flush_tx;
@ -2479,7 +2496,7 @@ do_retry_rx:
}
if (retry_rx && !is_kevent)
selrecord(td, check_all_rx ?
OS_selrecord(td, check_all_rx ?
&na->rx_si : &na->rx_rings[priv->np_rxqfirst].si);
if (send_down > 0 || retry_rx) {
retry_rx = 0;
@ -3053,8 +3070,13 @@ netmap_init(void)
error = netmap_mem_init();
if (error != 0)
goto fail;
/* XXX could use make_dev_credv() to get error number */
netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
/*
* MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
* when the module is compiled in.
* XXX could use make_dev_credv() to get error number
*/
netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
&netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
"netmap");
if (!netmap_dev)
goto fail;

View File

@ -466,6 +466,8 @@ netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
if (netmap_verbose)
D("handle %p size %jd prot %d foff %jd",
handle, (intmax_t)size, prot, (intmax_t)foff);
if (color)
*color = 0;
dev_ref(vmh->dev);
return 0;
}
@ -654,25 +656,24 @@ netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
* and do not need the selrecord().
*/
void freebsd_selwakeup(struct selinfo *si, int pri);
void
freebsd_selwakeup(struct selinfo *si, int pri)
freebsd_selwakeup(struct nm_selinfo *si, int pri)
{
if (netmap_verbose)
D("on knote %p", &si->si_note);
selwakeuppri(si, pri);
D("on knote %p", &si->si.si_note);
selwakeuppri(&si->si, pri);
/* use a non-zero hint to tell the notification from the
* call done in kqueue_scan() which uses 0
*/
KNOTE_UNLOCKED(&si->si_note, 0x100 /* notification */);
KNOTE_UNLOCKED(&si->si.si_note, 0x100 /* notification */);
}
static void
netmap_knrdetach(struct knote *kn)
{
struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
struct selinfo *si = priv->np_rxsi;
struct selinfo *si = &priv->np_rxsi->si;
D("remove selinfo %p", si);
knlist_remove(&si->si_note, kn, 0);
@ -682,7 +683,7 @@ static void
netmap_knwdetach(struct knote *kn)
{
struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
struct selinfo *si = priv->np_txsi;
struct selinfo *si = &priv->np_txsi->si;
D("remove selinfo %p", si);
knlist_remove(&si->si_note, kn, 0);
@ -754,7 +755,7 @@ netmap_kqfilter(struct cdev *dev, struct knote *kn)
struct netmap_priv_d *priv;
int error;
struct netmap_adapter *na;
struct selinfo *si;
struct nm_selinfo *si;
int ev = kn->kn_filter;
if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
@ -777,7 +778,7 @@ netmap_kqfilter(struct cdev *dev, struct knote *kn)
kn->kn_fop = (ev == EVFILT_WRITE) ?
&netmap_wfiltops : &netmap_rfiltops;
kn->kn_hook = priv;
knlist_add(&si->si_note, kn, 1);
knlist_add(&si->si.si_note, kn, 1);
// XXX unlock(priv)
ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s",
na, na->ifp->if_xname, curthread, priv, kn,

View File

@ -821,7 +821,7 @@ generic_netmap_attach(struct ifnet *ifp)
num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc);
generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */
ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
if (num_tx_desc == 0 || num_rx_desc == 0) {
D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc);

View File

@ -37,6 +37,7 @@
#define WITH_VALE // comment out to disable VALE support
#define WITH_PIPES
#define WITH_MONITOR
#define WITH_GENERIC
#if defined(__FreeBSD__)
@ -44,6 +45,8 @@
#define unlikely(x) __builtin_expect((long)!!(x), 0L)
#define NM_LOCK_T struct mtx
/* netmap global lock */
#define NMG_LOCK_T struct sx
#define NMG_LOCK_INIT() sx_init(&netmap_global_lock, \
"netmap global lock")
@ -52,7 +55,7 @@
#define NMG_UNLOCK() sx_xunlock(&netmap_global_lock)
#define NMG_LOCK_ASSERT() sx_assert(&netmap_global_lock, SA_XLOCKED)
#define NM_SELINFO_T struct selinfo
#define NM_SELINFO_T struct nm_selinfo
#define MBUF_LEN(m) ((m)->m_pkthdr.len)
#define MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
#define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m)
@ -63,6 +66,12 @@
#define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
#define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
#if __FreeBSD_version >= 1100030
#define WNA(_ifp) (_ifp)->if_netmap
#else /* older FreeBSD */
#define WNA(_ifp) (_ifp)->if_pspare[0]
#endif /* older FreeBSD */
#if __FreeBSD_version >= 1100005
struct netmap_adapter *netmap_getna(if_t ifp);
#endif
@ -79,6 +88,13 @@ struct netmap_adapter *netmap_getna(if_t ifp);
MALLOC_DECLARE(M_NETMAP);
struct nm_selinfo {
struct selinfo si;
struct mtx m;
};
void freebsd_selwakeup(struct nm_selinfo *si, int pri);
// XXX linux struct, not used in FreeBSD
struct net_device_ops {
};
@ -101,13 +117,20 @@ struct hrtimer {
#define NM_ATOMIC_T volatile long unsigned int
// XXX a mtx would suffice here too 20130404 gl
#define NMG_LOCK_T struct semaphore
#define NMG_LOCK_INIT() sema_init(&netmap_global_lock, 1)
#define NMG_LOCK_DESTROY()
#define NMG_LOCK() down(&netmap_global_lock)
#define NMG_UNLOCK() up(&netmap_global_lock)
#define NMG_LOCK_ASSERT() // XXX to be completed
#define NM_MTX_T struct mutex
#define NM_MTX_INIT(m, s) do { (void)s; mutex_init(&(m)); } while (0)
#define NM_MTX_DESTROY(m) do { (void)m; } while (0)
#define NM_MTX_LOCK(m) mutex_lock(&(m))
#define NM_MTX_UNLOCK(m) mutex_unlock(&(m))
#define NM_MTX_LOCK_ASSERT(m) mutex_is_locked(&(m))
#define NMG_LOCK_T NM_MTX_T
#define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock, \
"netmap_global_lock")
#define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock)
#define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock)
#define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock)
#define NMG_LOCK_ASSERT() NM_MTX_LOCK_ASSERT(netmap_global_lock)
#ifndef DEV_NETMAP
#define DEV_NETMAP
@ -260,7 +283,7 @@ struct netmap_kring {
struct netmap_adapter *na;
/* The folloiwing fields are for VALE switch support */
/* The following fields are for VALE switch support */
struct nm_bdg_fwd *nkr_ft;
uint32_t *nkr_leases;
#define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */
@ -635,6 +658,7 @@ struct netmap_hw_adapter { /* physical device */
int (*nm_hw_register)(struct netmap_adapter *, int onoff);
};
#ifdef WITH_GENERIC
/* Mitigation support. */
struct nm_generic_mit {
struct hrtimer mit_timer;
@ -662,6 +686,7 @@ struct netmap_generic_adapter { /* emulated device */
netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *);
#endif
};
#endif /* WITH_GENERIC */
static __inline int
netmap_real_tx_rings(struct netmap_adapter *na)
@ -1186,9 +1211,6 @@ extern int netmap_generic_rings;
* NA returns a pointer to the struct netmap adapter from the ifp,
* WNA is used to write it.
*/
#ifndef WNA
#define WNA(_ifp) (_ifp)->if_pspare[0]
#endif
#define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
/*
@ -1478,6 +1500,7 @@ struct netmap_monitor_adapter {
#endif /* WITH_MONITOR */
#ifdef WITH_GENERIC
/*
* generic netmap emulation for devices that do not have
* native netmap support.
@ -1509,6 +1532,7 @@ void netmap_mitigation_start(struct nm_generic_mit *mit);
void netmap_mitigation_restart(struct nm_generic_mit *mit);
int netmap_mitigation_active(struct nm_generic_mit *mit);
void netmap_mitigation_cleanup(struct nm_generic_mit *mit);
#endif /* WITH_GENERIC */

View File

@ -179,7 +179,7 @@ netmap_monitor_parent_sync(struct netmap_kring *kring, int flags, u_int* ringptr
i = nm_next(i, mlim);
}
wmb();
mb();
mkring->nr_hwtail = i;
mtx_unlock(&mkring->q_lock);
@ -225,7 +225,7 @@ netmap_monitor_rxsync(struct netmap_kring *kring, int flags)
{
ND("%s %x", kring->name, flags);
kring->nr_hwcur = kring->rcur;
rmb();
mb();
nm_rxsync_finalize(kring);
return 0;
}

View File

@ -197,10 +197,10 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
if (m < 0)
m += txkring->nkr_num_slots;
limit = m;
m = rxkring->nkr_num_slots - 1; /* max avail space on destination */
m = lim_rx; /* max avail space on destination */
busy = j - rxkring->nr_hwcur; /* busy slots */
if (busy < 0)
busy += txkring->nkr_num_slots;
busy += rxkring->nkr_num_slots;
m -= busy; /* subtract busy slots */
ND(2, "m %d limit %d", m, limit);
if (m < limit)
@ -228,7 +228,7 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
k = nm_next(k, lim_tx);
}
wmb(); /* make sure the slots are updated before publishing them */
mb(); /* make sure the slots are updated before publishing them */
rxkring->nr_hwtail = j;
txkring->nr_hwcur = k;
txkring->nr_hwtail = nm_prev(k, lim_tx);
@ -237,7 +237,7 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
ND(2, "after: hwcur %d hwtail %d cur %d head %d tail %d j %d", txkring->nr_hwcur, txkring->nr_hwtail,
txkring->rcur, txkring->rhead, txkring->rtail, j);
wmb(); /* make sure rxkring->nr_hwtail is updated before notifying */
mb(); /* make sure rxkring->nr_hwtail is updated before notifying */
rxkring->na->nm_notify(rxkring->na, rxkring->ring_id, NR_RX, 0);
return 0;
@ -253,12 +253,12 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
rxkring->nr_hwcur = rxkring->rhead; /* recover user-relased slots */
ND(5, "hwcur %d hwtail %d cur %d head %d tail %d", rxkring->nr_hwcur, rxkring->nr_hwtail,
rxkring->rcur, rxkring->rhead, rxkring->rtail);
rmb(); /* paired with the first wmb() in txsync */
mb(); /* paired with the first mb() in txsync */
nm_rxsync_finalize(rxkring);
if (oldhwcur != rxkring->nr_hwcur) {
/* we have released some slots, notify the other end */
wmb(); /* make sure nr_hwcur is updated before notifying */
mb(); /* make sure nr_hwcur is updated before notifying */
txkring->na->nm_notify(txkring->na, txkring->ring_id, NR_TX, 0);
}
return 0;