netmap: refactor logging macros and pipes
Changelist: - Replace ND, D and RD macros with nm_prdis, nm_prinf, nm_prerr and nm_prlim, to avoid possible naming conflicts. - Add netmap_krings_mode_commit() helper function and use that to reduce code duplication. - Refactor pipes control code to export some functions that can be reused by the veth driver (on Linux) and epair(4). - Add check to reject API requests with version less than 11. - Small code refactoring for the null adapter. MFC after: 1 week
This commit is contained in:
parent
c6fb952de9
commit
2cc5eac6b2
@ -1151,10 +1151,10 @@ ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
|
||||
kring->nr_hwtail = kring->rtail =
|
||||
kring->ring->tail = ktoa->hwtail;
|
||||
|
||||
ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
|
||||
nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
|
||||
ktoa->hwcur, atok->head, atok->cur,
|
||||
ktoa->hwtail);
|
||||
ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
|
||||
nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
|
||||
t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
|
||||
kring->ring->head, kring->ring->cur, kring->nr_hwtail,
|
||||
kring->rtail, kring->ring->tail);
|
||||
@ -1179,7 +1179,6 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
|
||||
struct ptnet_softc *sc = if_getsoftc(ifp);
|
||||
int native = (na == &sc->ptna->hwup.up);
|
||||
struct ptnet_queue *pq;
|
||||
enum txrx t;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
@ -1194,7 +1193,7 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
|
||||
* in the RX rings, since we will not receive further interrupts
|
||||
* until these will be processed. */
|
||||
if (native && !onoff && na->active_fds == 0) {
|
||||
D("Exit netmap mode, re-enable interrupts");
|
||||
nm_prinf("Exit netmap mode, re-enable interrupts");
|
||||
for (i = 0; i < sc->num_rings; i++) {
|
||||
pq = sc->queues + i;
|
||||
pq->atok->appl_need_kick = 1;
|
||||
@ -1230,30 +1229,14 @@ ptnet_nm_register(struct netmap_adapter *na, int onoff)
|
||||
/* If not native, don't call nm_set_native_flags, since we don't want
|
||||
* to replace if_transmit method, nor set NAF_NETMAP_ON */
|
||||
if (native) {
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i <= nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, onoff);
|
||||
nm_set_native_flags(na);
|
||||
}
|
||||
|
||||
} else {
|
||||
if (native) {
|
||||
nm_clear_native_flags(na);
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i <= nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_off(kring)) {
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, onoff);
|
||||
}
|
||||
|
||||
if (sc->ptna->backend_users == 0) {
|
||||
@ -1728,7 +1711,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
|
||||
|
||||
if (!PTNET_Q_TRYLOCK(pq)) {
|
||||
/* We failed to acquire the lock, schedule the taskqueue. */
|
||||
RD(1, "Deferring TX work");
|
||||
nm_prlim(1, "Deferring TX work");
|
||||
if (may_resched) {
|
||||
taskqueue_enqueue(pq->taskq, &pq->task);
|
||||
}
|
||||
@ -1738,7 +1721,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
|
||||
|
||||
if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
|
||||
PTNET_Q_UNLOCK(pq);
|
||||
RD(1, "Interface is down");
|
||||
nm_prlim(1, "Interface is down");
|
||||
return ENETDOWN;
|
||||
}
|
||||
|
||||
@ -1776,7 +1759,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
|
||||
break;
|
||||
}
|
||||
|
||||
RD(1, "Found more slots by doublecheck");
|
||||
nm_prlim(1, "Found more slots by doublecheck");
|
||||
/* More slots were freed before reactivating
|
||||
* the interrupts. */
|
||||
atok->appl_need_kick = 0;
|
||||
@ -1815,7 +1798,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
|
||||
continue;
|
||||
}
|
||||
}
|
||||
ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
|
||||
nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
|
||||
"csum_start %u csum_ofs %u hdr_len = %u "
|
||||
"gso_size %u gso_type %x", __func__,
|
||||
mhead->m_pkthdr.csum_flags, vh->flags,
|
||||
@ -1890,7 +1873,7 @@ ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
|
||||
}
|
||||
|
||||
if (count >= budget && may_resched) {
|
||||
DBG(RD(1, "out of budget: resched, %d mbufs pending\n",
|
||||
DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
|
||||
drbr_inuse(ifp, pq->bufring)));
|
||||
taskqueue_enqueue(pq->taskq, &pq->task);
|
||||
}
|
||||
@ -1932,7 +1915,7 @@ ptnet_transmit(if_t ifp, struct mbuf *m)
|
||||
err = drbr_enqueue(ifp, pq->bufring, m);
|
||||
if (err) {
|
||||
/* ENOBUFS when the bufring is full */
|
||||
RD(1, "%s: drbr_enqueue() failed %d\n",
|
||||
nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
|
||||
__func__, err);
|
||||
pq->stats.errors ++;
|
||||
return err;
|
||||
@ -2077,13 +2060,13 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
|
||||
/* There is no good reason why host should
|
||||
* put the header in multiple netmap slots.
|
||||
* If this is the case, discard. */
|
||||
RD(1, "Fragmented vnet-hdr: dropping");
|
||||
nm_prlim(1, "Fragmented vnet-hdr: dropping");
|
||||
head = ptnet_rx_discard(kring, head);
|
||||
pq->stats.iqdrops ++;
|
||||
deliver = 0;
|
||||
goto skip;
|
||||
}
|
||||
ND(1, "%s: vnet hdr: flags %x csum_start %u "
|
||||
nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
|
||||
"csum_ofs %u hdr_len = %u gso_size %u "
|
||||
"gso_type %x", __func__, vh->flags,
|
||||
vh->csum_start, vh->csum_offset, vh->hdr_len,
|
||||
@ -2147,7 +2130,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
|
||||
/* The very last slot prepared by the host has
|
||||
* the NS_MOREFRAG set. Drop it and continue
|
||||
* the outer cycle (to do the double-check). */
|
||||
RD(1, "Incomplete packet: dropping");
|
||||
nm_prlim(1, "Incomplete packet: dropping");
|
||||
m_freem(mhead);
|
||||
pq->stats.iqdrops ++;
|
||||
goto host_sync;
|
||||
@ -2185,7 +2168,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
|
||||
| VIRTIO_NET_HDR_F_DATA_VALID))) {
|
||||
if (unlikely(ptnet_rx_csum(mhead, vh))) {
|
||||
m_freem(mhead);
|
||||
RD(1, "Csum offload error: dropping");
|
||||
nm_prlim(1, "Csum offload error: dropping");
|
||||
pq->stats.iqdrops ++;
|
||||
deliver = 0;
|
||||
}
|
||||
@ -2231,7 +2214,7 @@ ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
|
||||
if (count >= budget && may_resched) {
|
||||
/* If we ran out of budget or the double-check found new
|
||||
* slots to process, schedule the taskqueue. */
|
||||
DBG(RD(1, "out of budget: resched h %u t %u\n",
|
||||
DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
|
||||
head, ring->tail));
|
||||
taskqueue_enqueue(pq->taskq, &pq->task);
|
||||
}
|
||||
@ -2246,7 +2229,7 @@ ptnet_rx_task(void *context, int pending)
|
||||
{
|
||||
struct ptnet_queue *pq = context;
|
||||
|
||||
DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
|
||||
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
|
||||
ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
|
||||
}
|
||||
|
||||
@ -2255,7 +2238,7 @@ ptnet_tx_task(void *context, int pending)
|
||||
{
|
||||
struct ptnet_queue *pq = context;
|
||||
|
||||
DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
|
||||
DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
|
||||
ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
|
||||
}
|
||||
|
||||
@ -2273,7 +2256,7 @@ ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
|
||||
|
||||
KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
|
||||
queue_budget = MAX(budget / sc->num_rings, 1);
|
||||
RD(1, "Per-queue budget is %d", queue_budget);
|
||||
nm_prlim(1, "Per-queue budget is %d", queue_budget);
|
||||
|
||||
while (budget) {
|
||||
unsigned int rcnt = 0;
|
||||
|
@ -90,7 +90,6 @@ vtnet_netmap_reg(struct netmap_adapter *na, int state)
|
||||
struct ifnet *ifp = na->ifp;
|
||||
struct vtnet_softc *sc = ifp->if_softc;
|
||||
int success;
|
||||
enum txrx t;
|
||||
int i;
|
||||
|
||||
/* Drain the taskqueues to make sure that there are no worker threads
|
||||
@ -132,44 +131,11 @@ vtnet_netmap_reg(struct netmap_adapter *na, int state)
|
||||
success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO;
|
||||
|
||||
if (state) {
|
||||
for_rx_tx(t) {
|
||||
/* Hardware rings. */
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_on(kring))
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
|
||||
/* Host rings. */
|
||||
for (i = 0; i < nma_get_host_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring =
|
||||
NMR(na, t)[nma_get_nrings(na, t) + i];
|
||||
|
||||
if (nm_kring_pending_on(kring))
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, state);
|
||||
nm_set_native_flags(na);
|
||||
} else {
|
||||
nm_clear_native_flags(na);
|
||||
for_rx_tx(t) {
|
||||
/* Hardware rings. */
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_off(kring))
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
|
||||
/* Host rings. */
|
||||
for (i = 0; i < nma_get_host_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring =
|
||||
NMR(na, t)[nma_get_nrings(na, t) + i];
|
||||
|
||||
if (nm_kring_pending_off(kring))
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, state);
|
||||
}
|
||||
|
||||
VTNET_CORE_UNLOCK(sc);
|
||||
@ -396,7 +362,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
|
||||
/* Skip the virtio-net header. */
|
||||
len -= sc->vtnet_hdr_size;
|
||||
if (unlikely(len < 0)) {
|
||||
RD(1, "Truncated virtio-net-header, "
|
||||
nm_prlim(1, "Truncated virtio-net-header, "
|
||||
"missing %d bytes", -len);
|
||||
len = 0;
|
||||
}
|
||||
@ -408,7 +374,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
|
||||
kring->nr_hwtail = nm_i;
|
||||
kring->nr_kflags &= ~NKR_PENDINTR;
|
||||
}
|
||||
ND("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
|
||||
nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur,
|
||||
kring->nr_hwcur, kring->nr_hwtail);
|
||||
|
||||
/*
|
||||
@ -423,7 +389,7 @@ vtnet_netmap_rxsync(struct netmap_kring *kring, int flags)
|
||||
virtqueue_notify(vq);
|
||||
}
|
||||
|
||||
ND("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
|
||||
nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur,
|
||||
ring->tail, kring->nr_hwcur, kring->nr_hwtail);
|
||||
|
||||
return 0;
|
||||
|
@ -893,7 +893,7 @@ netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
|
||||
kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
|
||||
snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
|
||||
nm_txrx2str(t), i);
|
||||
ND("ktx %s h %d c %d t %d",
|
||||
nm_prdis("ktx %s h %d c %d t %d",
|
||||
kring->name, kring->rhead, kring->rcur, kring->rtail);
|
||||
err = nm_os_selinfo_init(&kring->si, kring->name);
|
||||
if (err) {
|
||||
@ -955,7 +955,7 @@ netmap_hw_krings_delete(struct netmap_adapter *na)
|
||||
|
||||
for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
|
||||
struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
|
||||
ND("destroy sw mbq with len %d", mbq_len(q));
|
||||
nm_prdis("destroy sw mbq with len %d", mbq_len(q));
|
||||
mbq_purge(q);
|
||||
mbq_safe_fini(q);
|
||||
}
|
||||
@ -1176,7 +1176,7 @@ netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
|
||||
if ((slot->flags & NS_FORWARD) == 0 && !force)
|
||||
continue;
|
||||
if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
|
||||
RD(5, "bad pkt at %d len %d", n, slot->len);
|
||||
nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
|
||||
continue;
|
||||
}
|
||||
slot->flags &= ~NS_FORWARD; // XXX needed ?
|
||||
@ -1290,7 +1290,7 @@ netmap_txsync_to_host(struct netmap_kring *kring, int flags)
|
||||
*/
|
||||
mbq_init(&q);
|
||||
netmap_grab_packets(kring, &q, 1 /* force */);
|
||||
ND("have %d pkts in queue", mbq_len(&q));
|
||||
nm_prdis("have %d pkts in queue", mbq_len(&q));
|
||||
kring->nr_hwcur = head;
|
||||
kring->nr_hwtail = head + lim;
|
||||
if (kring->nr_hwtail > lim)
|
||||
@ -1338,7 +1338,7 @@ netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
|
||||
struct netmap_slot *slot = &ring->slot[nm_i];
|
||||
|
||||
m_copydata(m, 0, len, NMB(na, slot));
|
||||
ND("nm %d len %d", nm_i, len);
|
||||
nm_prdis("nm %d len %d", nm_i, len);
|
||||
if (netmap_debug & NM_DEBUG_HOST)
|
||||
nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
|
||||
|
||||
@ -1603,7 +1603,7 @@ netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
|
||||
|
||||
#define NM_FAIL_ON(t) do { \
|
||||
if (unlikely(t)) { \
|
||||
RD(5, "%s: fail '" #t "' " \
|
||||
nm_prlim(5, "%s: fail '" #t "' " \
|
||||
"h %d c %d t %d " \
|
||||
"rh %d rc %d rt %d " \
|
||||
"hc %d ht %d", \
|
||||
@ -1635,7 +1635,7 @@ nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
|
||||
u_int cur = ring->cur; /* read only once */
|
||||
u_int n = kring->nkr_num_slots;
|
||||
|
||||
ND(5, "%s kcur %d ktail %d head %d cur %d tail %d",
|
||||
nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
|
||||
kring->name,
|
||||
kring->nr_hwcur, kring->nr_hwtail,
|
||||
ring->head, ring->cur, ring->tail);
|
||||
@ -1671,7 +1671,7 @@ nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
|
||||
}
|
||||
}
|
||||
if (ring->tail != kring->rtail) {
|
||||
RD(5, "%s tail overwritten was %d need %d", kring->name,
|
||||
nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
|
||||
ring->tail, kring->rtail);
|
||||
ring->tail = kring->rtail;
|
||||
}
|
||||
@ -1698,7 +1698,7 @@ nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
|
||||
uint32_t const n = kring->nkr_num_slots;
|
||||
uint32_t head, cur;
|
||||
|
||||
ND(5,"%s kc %d kt %d h %d c %d t %d",
|
||||
nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
|
||||
kring->name,
|
||||
kring->nr_hwcur, kring->nr_hwtail,
|
||||
ring->head, ring->cur, ring->tail);
|
||||
@ -1733,7 +1733,7 @@ nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
|
||||
}
|
||||
}
|
||||
if (ring->tail != kring->rtail) {
|
||||
RD(5, "%s tail overwritten was %d need %d",
|
||||
nm_prlim(5, "%s tail overwritten was %d need %d",
|
||||
kring->name,
|
||||
ring->tail, kring->rtail);
|
||||
ring->tail = kring->rtail;
|
||||
@ -1762,7 +1762,7 @@ netmap_ring_reinit(struct netmap_kring *kring)
|
||||
int errors = 0;
|
||||
|
||||
// XXX KASSERT nm_kr_tryget
|
||||
RD(10, "called for %s", kring->name);
|
||||
nm_prlim(10, "called for %s", kring->name);
|
||||
// XXX probably wrong to trust userspace
|
||||
kring->rhead = ring->head;
|
||||
kring->rcur = ring->cur;
|
||||
@ -1778,17 +1778,17 @@ netmap_ring_reinit(struct netmap_kring *kring)
|
||||
u_int idx = ring->slot[i].buf_idx;
|
||||
u_int len = ring->slot[i].len;
|
||||
if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
|
||||
RD(5, "bad index at slot %d idx %d len %d ", i, idx, len);
|
||||
nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
|
||||
ring->slot[i].buf_idx = 0;
|
||||
ring->slot[i].len = 0;
|
||||
} else if (len > NETMAP_BUF_SIZE(kring->na)) {
|
||||
ring->slot[i].len = 0;
|
||||
RD(5, "bad len at slot %d idx %d len %d", i, idx, len);
|
||||
nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
|
||||
}
|
||||
}
|
||||
if (errors) {
|
||||
RD(10, "total %d errors", errors);
|
||||
RD(10, "%s reinit, cur %d -> %d tail %d -> %d",
|
||||
nm_prlim(10, "total %d errors", errors);
|
||||
nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
|
||||
kring->name,
|
||||
ring->cur, kring->nr_hwcur,
|
||||
ring->tail, kring->nr_hwtail);
|
||||
@ -1825,7 +1825,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
|
||||
case NR_REG_NULL:
|
||||
priv->np_qfirst[t] = 0;
|
||||
priv->np_qlast[t] = nma_get_nrings(na, t);
|
||||
ND("ALL/PIPE: %s %d %d", nm_txrx2str(t),
|
||||
nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
|
||||
priv->np_qfirst[t], priv->np_qlast[t]);
|
||||
break;
|
||||
case NR_REG_SW:
|
||||
@ -1837,7 +1837,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
|
||||
priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
|
||||
nma_get_nrings(na, t) : 0);
|
||||
priv->np_qlast[t] = netmap_all_rings(na, t);
|
||||
ND("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
|
||||
nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
|
||||
nm_txrx2str(t),
|
||||
priv->np_qfirst[t], priv->np_qlast[t]);
|
||||
break;
|
||||
@ -1853,7 +1853,7 @@ netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode,
|
||||
j = 0;
|
||||
priv->np_qfirst[t] = j;
|
||||
priv->np_qlast[t] = j + 1;
|
||||
ND("ONE_NIC: %s %d %d", nm_txrx2str(t),
|
||||
nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
|
||||
priv->np_qfirst[t], priv->np_qlast[t]);
|
||||
break;
|
||||
default:
|
||||
@ -1962,7 +1962,7 @@ netmap_krings_get(struct netmap_priv_d *priv)
|
||||
if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
|
||||
(kring->users && excl))
|
||||
{
|
||||
ND("ring %s busy", kring->name);
|
||||
nm_prdis("ring %s busy", kring->name);
|
||||
return EBUSY;
|
||||
}
|
||||
}
|
||||
@ -1997,7 +1997,7 @@ netmap_krings_put(struct netmap_priv_d *priv)
|
||||
int excl = (priv->np_flags & NR_EXCLUSIVE);
|
||||
enum txrx t;
|
||||
|
||||
ND("%s: releasing tx [%d, %d) rx [%d, %d)",
|
||||
nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
|
||||
na->name,
|
||||
priv->np_qfirst[NR_TX],
|
||||
priv->np_qlast[NR_TX],
|
||||
@ -2262,7 +2262,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
|
||||
if (error)
|
||||
goto err_drop_mem;
|
||||
ND("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
|
||||
nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
|
||||
na->na_lut.objsize);
|
||||
|
||||
/* ring configuration may have changed, fetch from the card */
|
||||
@ -2284,7 +2284,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
/* This netmap adapter is attached to an ifnet. */
|
||||
unsigned mtu = nm_os_ifnet_mtu(na->ifp);
|
||||
|
||||
ND("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
|
||||
nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
|
||||
na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
|
||||
|
||||
if (na->rx_buf_maxsize == 0) {
|
||||
@ -2381,7 +2381,7 @@ nm_sync_finalize(struct netmap_kring *kring)
|
||||
*/
|
||||
kring->ring->tail = kring->rtail = kring->nr_hwtail;
|
||||
|
||||
ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
|
||||
nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
|
||||
kring->name, kring->nr_hwcur, kring->nr_hwtail,
|
||||
kring->rhead, kring->rcur, kring->rtail);
|
||||
}
|
||||
@ -3711,7 +3711,8 @@ netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
|
||||
hwna->up.nm_dtor = netmap_hw_dtor;
|
||||
}
|
||||
|
||||
if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
|
||||
nm_prinf("%s: netmap queues/slots: TX %d/%d, RX %d/%d\n",
|
||||
hwna->up.name,
|
||||
hwna->up.num_tx_rings, hwna->up.num_tx_desc,
|
||||
hwna->up.num_rx_rings, hwna->up.num_rx_desc);
|
||||
return 0;
|
||||
@ -3779,7 +3780,7 @@ netmap_hw_krings_create(struct netmap_adapter *na)
|
||||
for (i = na->num_rx_rings; i < lim; i++) {
|
||||
mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
|
||||
}
|
||||
ND("initialized sw rx queue %d", na->num_rx_rings);
|
||||
nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -3880,13 +3881,13 @@ netmap_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
|
||||
if (!netmap_generic_hwcsum) {
|
||||
if (nm_os_mbuf_has_csum_offld(m)) {
|
||||
RD(1, "%s drop mbuf that needs checksum offload", na->name);
|
||||
nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (nm_os_mbuf_has_seg_offld(m)) {
|
||||
RD(1, "%s drop mbuf that needs generic segmentation offload", na->name);
|
||||
nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -3906,11 +3907,11 @@ netmap_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
if (busy < 0)
|
||||
busy += kring->nkr_num_slots;
|
||||
if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
|
||||
RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
|
||||
nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
|
||||
kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
|
||||
} else {
|
||||
mbq_enqueue(q, m);
|
||||
ND(2, "%s %d bufs in queue", na->name, mbq_len(q));
|
||||
nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
|
||||
/* notify outside the lock */
|
||||
m = NULL;
|
||||
error = 0;
|
||||
@ -3946,7 +3947,7 @@ netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
|
||||
int new_hwofs, lim;
|
||||
|
||||
if (!nm_native_on(na)) {
|
||||
ND("interface not in native netmap mode");
|
||||
nm_prdis("interface not in native netmap mode");
|
||||
return NULL; /* nothing to reinitialize */
|
||||
}
|
||||
|
||||
@ -4088,7 +4089,7 @@ netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
|
||||
return NM_IRQ_PASS;
|
||||
|
||||
if (na->na_flags & NAF_SKIP_INTR) {
|
||||
ND("use regular interrupt");
|
||||
nm_prdis("use regular interrupt");
|
||||
return NM_IRQ_PASS;
|
||||
}
|
||||
|
||||
@ -4129,6 +4130,25 @@ nm_clear_native_flags(struct netmap_adapter *na)
|
||||
na->na_flags &= ~NAF_NETMAP_ON;
|
||||
}
|
||||
|
||||
void
|
||||
netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
|
||||
{
|
||||
enum txrx t;
|
||||
|
||||
for_rx_tx(t) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < netmap_real_rings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (onoff && nm_kring_pending_on(kring))
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
else if (!onoff && nm_kring_pending_off(kring))
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Module loader and unloader
|
||||
*
|
||||
|
@ -203,14 +203,14 @@ nm_find_bridge(const char *name, int create, struct netmap_bdg_ops *ops)
|
||||
} else if (x->bdg_namelen != namelen) {
|
||||
continue;
|
||||
} else if (strncmp(name, x->bdg_basename, namelen) == 0) {
|
||||
ND("found '%.*s' at %d", namelen, name, i);
|
||||
nm_prdis("found '%.*s' at %d", namelen, name, i);
|
||||
b = x;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == num_bridges && b) { /* name not found, can create entry */
|
||||
/* initialize the bridge */
|
||||
ND("create new bridge %s with ports %d", b->bdg_basename,
|
||||
nm_prdis("create new bridge %s with ports %d", b->bdg_basename,
|
||||
b->bdg_active_ports);
|
||||
b->ht = nm_os_malloc(sizeof(struct nm_hash_ent) * NM_BDG_HASH);
|
||||
if (b->ht == NULL) {
|
||||
@ -239,7 +239,7 @@ netmap_bdg_free(struct nm_bridge *b)
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
ND("marking bridge %s as free", b->bdg_basename);
|
||||
nm_prdis("marking bridge %s as free", b->bdg_basename);
|
||||
nm_os_free(b->ht);
|
||||
memset(&b->bdg_ops, 0, sizeof(b->bdg_ops));
|
||||
memset(&b->bdg_saved_ops, 0, sizeof(b->bdg_saved_ops));
|
||||
@ -312,13 +312,13 @@ netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)
|
||||
memcpy(b->tmp_bdg_port_index, b->bdg_port_index, sizeof(b->tmp_bdg_port_index));
|
||||
for (i = 0; (hw >= 0 || sw >= 0) && i < lim; ) {
|
||||
if (hw >= 0 && tmp[i] == hw) {
|
||||
ND("detach hw %d at %d", hw, i);
|
||||
nm_prdis("detach hw %d at %d", hw, i);
|
||||
lim--; /* point to last active port */
|
||||
tmp[i] = tmp[lim]; /* swap with i */
|
||||
tmp[lim] = hw; /* now this is inactive */
|
||||
hw = -1;
|
||||
} else if (sw >= 0 && tmp[i] == sw) {
|
||||
ND("detach sw %d at %d", sw, i);
|
||||
nm_prdis("detach sw %d at %d", sw, i);
|
||||
lim--;
|
||||
tmp[i] = tmp[lim];
|
||||
tmp[lim] = sw;
|
||||
@ -342,7 +342,7 @@ netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)
|
||||
b->bdg_active_ports = lim;
|
||||
BDG_WUNLOCK(b);
|
||||
|
||||
ND("now %d active ports", lim);
|
||||
nm_prdis("now %d active ports", lim);
|
||||
netmap_bdg_free(b);
|
||||
}
|
||||
|
||||
@ -408,7 +408,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
|
||||
b = nm_find_bridge(nr_name, create, ops);
|
||||
if (b == NULL) {
|
||||
ND("no bridges available for '%s'", nr_name);
|
||||
nm_prdis("no bridges available for '%s'", nr_name);
|
||||
return (create ? ENOMEM : ENXIO);
|
||||
}
|
||||
if (strlen(nr_name) < b->bdg_namelen) /* impossible */
|
||||
@ -425,10 +425,10 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
for (j = 0; j < b->bdg_active_ports; j++) {
|
||||
i = b->bdg_port_index[j];
|
||||
vpna = b->bdg_ports[i];
|
||||
ND("checking %s", vpna->up.name);
|
||||
nm_prdis("checking %s", vpna->up.name);
|
||||
if (!strcmp(vpna->up.name, nr_name)) {
|
||||
netmap_adapter_get(&vpna->up);
|
||||
ND("found existing if %s refs %d", nr_name)
|
||||
nm_prdis("found existing if %s refs %d", nr_name)
|
||||
*na = &vpna->up;
|
||||
return 0;
|
||||
}
|
||||
@ -445,7 +445,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
/* record the next two ports available, but do not allocate yet */
|
||||
cand = b->bdg_port_index[b->bdg_active_ports];
|
||||
cand2 = b->bdg_port_index[b->bdg_active_ports + 1];
|
||||
ND("+++ bridge %s port %s used %d avail %d %d",
|
||||
nm_prdis("+++ bridge %s port %s used %d avail %d %d",
|
||||
b->bdg_basename, ifname, b->bdg_active_ports, cand, cand2);
|
||||
|
||||
/*
|
||||
@ -515,7 +515,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
|
||||
BDG_WLOCK(b);
|
||||
vpna->bdg_port = cand;
|
||||
ND("NIC %p to bridge port %d", vpna, cand);
|
||||
nm_prdis("NIC %p to bridge port %d", vpna, cand);
|
||||
/* bind the port to the bridge (virtual ports are not active) */
|
||||
b->bdg_ports[cand] = vpna;
|
||||
vpna->na_bdg = b;
|
||||
@ -526,9 +526,9 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
hostna->bdg_port = cand2;
|
||||
hostna->na_bdg = b;
|
||||
b->bdg_active_ports++;
|
||||
ND("host %p to bridge port %d", hostna, cand2);
|
||||
nm_prdis("host %p to bridge port %d", hostna, cand2);
|
||||
}
|
||||
ND("if %s refs %d", ifname, vpna->up.na_refcount);
|
||||
nm_prdis("if %s refs %d", ifname, vpna->up.na_refcount);
|
||||
BDG_WUNLOCK(b);
|
||||
*na = &vpna->up;
|
||||
netmap_adapter_get(*na);
|
||||
@ -920,8 +920,6 @@ netmap_vp_reg(struct netmap_adapter *na, int onoff)
|
||||
{
|
||||
struct netmap_vp_adapter *vpna =
|
||||
(struct netmap_vp_adapter*)na;
|
||||
enum txrx t;
|
||||
int i;
|
||||
|
||||
/* persistent ports may be put in netmap mode
|
||||
* before being attached to a bridge
|
||||
@ -929,14 +927,7 @@ netmap_vp_reg(struct netmap_adapter *na, int onoff)
|
||||
if (vpna->na_bdg)
|
||||
BDG_WLOCK(vpna->na_bdg);
|
||||
if (onoff) {
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < netmap_real_rings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_on(kring))
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, onoff);
|
||||
if (na->active_fds == 0)
|
||||
na->na_flags |= NAF_NETMAP_ON;
|
||||
/* XXX on FreeBSD, persistent VALE ports should also
|
||||
@ -945,14 +936,7 @@ netmap_vp_reg(struct netmap_adapter *na, int onoff)
|
||||
} else {
|
||||
if (na->active_fds == 0)
|
||||
na->na_flags &= ~NAF_NETMAP_ON;
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < netmap_real_rings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_off(kring))
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, onoff);
|
||||
}
|
||||
if (vpna->na_bdg)
|
||||
BDG_WUNLOCK(vpna->na_bdg);
|
||||
@ -1077,7 +1061,7 @@ netmap_bwrap_dtor(struct netmap_adapter *na)
|
||||
(bh ? bna->host.bdg_port : -1));
|
||||
}
|
||||
|
||||
ND("na %p", na);
|
||||
nm_prdis("na %p", na);
|
||||
na->ifp = NULL;
|
||||
bna->host.up.ifp = NULL;
|
||||
hwna->na_vp = bna->saved_na_vp;
|
||||
@ -1182,7 +1166,7 @@ netmap_bwrap_reg(struct netmap_adapter *na, int onoff)
|
||||
int error, i;
|
||||
enum txrx t;
|
||||
|
||||
ND("%s %s", na->name, onoff ? "on" : "off");
|
||||
nm_prdis("%s %s", na->name, onoff ? "on" : "off");
|
||||
|
||||
if (onoff) {
|
||||
/* netmap_do_regif has been called on the bwrap na.
|
||||
@ -1387,7 +1371,7 @@ netmap_bwrap_krings_delete_common(struct netmap_adapter *na)
|
||||
enum txrx t;
|
||||
int i;
|
||||
|
||||
ND("%s", na->name);
|
||||
nm_prdis("%s", na->name);
|
||||
|
||||
/* decrement the usage counter for all the hwna krings */
|
||||
for_rx_tx(t) {
|
||||
@ -1414,7 +1398,7 @@ netmap_bwrap_notify(struct netmap_kring *kring, int flags)
|
||||
struct netmap_kring *hw_kring;
|
||||
int error;
|
||||
|
||||
ND("%s: na %s hwna %s",
|
||||
nm_prdis("%s: na %s hwna %s",
|
||||
(kring ? kring->name : "NULL!"),
|
||||
(na ? na->name : "NULL!"),
|
||||
(hwna ? hwna->name : "NULL!"));
|
||||
@ -1426,7 +1410,7 @@ netmap_bwrap_notify(struct netmap_kring *kring, int flags)
|
||||
|
||||
/* first step: simulate a user wakeup on the rx ring */
|
||||
netmap_vp_rxsync(kring, flags);
|
||||
ND("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
|
||||
nm_prdis("%s[%d] PRE rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
|
||||
na->name, ring_n,
|
||||
kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,
|
||||
kring->rhead, kring->rcur, kring->rtail,
|
||||
@ -1445,7 +1429,7 @@ netmap_bwrap_notify(struct netmap_kring *kring, int flags)
|
||||
|
||||
/* fourth step: the user goes to sleep again, causing another rxsync */
|
||||
netmap_vp_rxsync(kring, flags);
|
||||
ND("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
|
||||
nm_prdis("%s[%d] PST rx(c%3d t%3d l%3d) ring(h%3d c%3d t%3d) tx(c%3d ht%3d t%3d)",
|
||||
na->name, ring_n,
|
||||
kring->nr_hwcur, kring->nr_hwtail, kring->nkr_hwlease,
|
||||
kring->rhead, kring->rcur, kring->rtail,
|
||||
@ -1595,7 +1579,7 @@ netmap_bwrap_attach_common(struct netmap_adapter *na,
|
||||
if (hwna->na_flags & NAF_MOREFRAG)
|
||||
na->na_flags |= NAF_MOREFRAG;
|
||||
|
||||
ND("%s<->%s txr %d txd %d rxr %d rxd %d",
|
||||
nm_prdis("%s<->%s txr %d txd %d rxr %d rxd %d",
|
||||
na->name, ifp->if_xname,
|
||||
na->num_tx_rings, na->num_tx_desc,
|
||||
na->num_rx_rings, na->num_rx_desc);
|
||||
|
@ -1350,8 +1350,6 @@ nm_os_kctx_destroy(struct nm_kctx *nmk)
|
||||
void
|
||||
nm_os_selwakeup(struct nm_selinfo *si)
|
||||
{
|
||||
if (netmap_verbose)
|
||||
nm_prinf("on knote %p", &si->si.si_note);
|
||||
selwakeuppri(&si->si, PI_NET);
|
||||
taskqueue_enqueue(si->ntfytq, &si->ntfytask);
|
||||
}
|
||||
|
@ -237,18 +237,7 @@ generic_netmap_unregister(struct netmap_adapter *na)
|
||||
nm_os_catch_tx(gna, 0);
|
||||
}
|
||||
|
||||
for_each_rx_kring_h(r, kring, na) {
|
||||
if (nm_kring_pending_off(kring)) {
|
||||
nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name);
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
}
|
||||
for_each_tx_kring_h(r, kring, na) {
|
||||
if (nm_kring_pending_off(kring)) {
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
nm_prinf("Emulated adapter: ring '%s' deactivated", kring->name);
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, /*onoff=*/0);
|
||||
|
||||
for_each_rx_kring(r, kring, na) {
|
||||
/* Free the mbufs still pending in the RX queues,
|
||||
@ -371,19 +360,7 @@ generic_netmap_register(struct netmap_adapter *na, int enable)
|
||||
}
|
||||
}
|
||||
|
||||
for_each_rx_kring_h(r, kring, na) {
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
nm_prinf("Emulated adapter: ring '%s' activated", kring->name);
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
|
||||
}
|
||||
for_each_tx_kring_h(r, kring, na) {
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
nm_prinf("Emulated adapter: ring '%s' activated", kring->name);
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, /*onoff=*/1);
|
||||
|
||||
for_each_tx_kring(r, kring, na) {
|
||||
/* Initialize tx_pool and tx_event. */
|
||||
|
@ -271,7 +271,7 @@ typedef struct hrtimer{
|
||||
__LINE__, __FUNCTION__, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/* Disabled printf (used to be ND). */
|
||||
/* Disabled printf (used to be nm_prdis). */
|
||||
#define nm_prdis(format, ...)
|
||||
|
||||
/* Rate limited, lps indicates how many per second. */
|
||||
@ -286,11 +286,6 @@ typedef struct hrtimer{
|
||||
nm_prinf(format, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/* Old macros. */
|
||||
#define ND nm_prdis
|
||||
#define D nm_prerr
|
||||
#define RD nm_prlim
|
||||
|
||||
struct netmap_adapter;
|
||||
struct nm_bdg_fwd;
|
||||
struct nm_bridge;
|
||||
@ -1149,7 +1144,7 @@ nm_kr_rxspace(struct netmap_kring *k)
|
||||
int space = k->nr_hwtail - k->nr_hwcur;
|
||||
if (space < 0)
|
||||
space += k->nkr_num_slots;
|
||||
ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
|
||||
nm_prdis("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail);
|
||||
|
||||
return space;
|
||||
}
|
||||
@ -1375,6 +1370,8 @@ nm_update_hostrings_mode(struct netmap_adapter *na)
|
||||
void nm_set_native_flags(struct netmap_adapter *);
|
||||
void nm_clear_native_flags(struct netmap_adapter *);
|
||||
|
||||
void netmap_krings_mode_commit(struct netmap_adapter *na, int onoff);
|
||||
|
||||
/*
|
||||
* nm_*sync_prologue() functions are used in ioctl/poll and ptnetmap
|
||||
* kthreads.
|
||||
@ -1402,7 +1399,7 @@ uint32_t nm_rxsync_prologue(struct netmap_kring *, struct netmap_ring *);
|
||||
#if 1 /* debug version */
|
||||
#define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \
|
||||
if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \
|
||||
RD(5, "bad addr/len ring %d slot %d idx %d len %d", \
|
||||
nm_prlim(5, "bad addr/len ring %d slot %d idx %d len %d", \
|
||||
kring->ring_id, nm_i, slot->buf_idx, len); \
|
||||
if (_l > NETMAP_BUF_SIZE(_na)) \
|
||||
_l = NETMAP_BUF_SIZE(_na); \
|
||||
@ -1564,7 +1561,7 @@ void __netmap_adapter_get(struct netmap_adapter *na);
|
||||
#define netmap_adapter_get(na) \
|
||||
do { \
|
||||
struct netmap_adapter *__na = na; \
|
||||
D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
|
||||
nm_prinf("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
|
||||
__netmap_adapter_get(__na); \
|
||||
} while (0)
|
||||
|
||||
@ -1573,7 +1570,7 @@ int __netmap_adapter_put(struct netmap_adapter *na);
|
||||
#define netmap_adapter_put(na) \
|
||||
({ \
|
||||
struct netmap_adapter *__na = na; \
|
||||
D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
|
||||
nm_prinf("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \
|
||||
__netmap_adapter_put(__na); \
|
||||
})
|
||||
|
||||
@ -1735,7 +1732,7 @@ int nm_iommu_group_id(bus_dma_tag_t dev);
|
||||
addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE);
|
||||
|
||||
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
|
||||
D("dma mapping error");
|
||||
nm_prerr("dma mapping error");
|
||||
/* goto dma_error; See e1000_put_txbuf() */
|
||||
/* XXX reset */
|
||||
}
|
||||
@ -1994,6 +1991,12 @@ nm_si_user(struct netmap_priv_d *priv, enum txrx t)
|
||||
#ifdef WITH_PIPES
|
||||
int netmap_pipe_txsync(struct netmap_kring *txkring, int flags);
|
||||
int netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags);
|
||||
int netmap_pipe_krings_create_both(struct netmap_adapter *na,
|
||||
struct netmap_adapter *ona);
|
||||
void netmap_pipe_krings_delete_both(struct netmap_adapter *na,
|
||||
struct netmap_adapter *ona);
|
||||
int netmap_pipe_reg_both(struct netmap_adapter *na,
|
||||
struct netmap_adapter *ona);
|
||||
#endif /* WITH_PIPES */
|
||||
|
||||
#ifdef WITH_MONITOR
|
||||
@ -2328,7 +2331,7 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
|
||||
m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save
|
||||
m->m_ext.ext_free = (void *)void_mbuf_dtor;
|
||||
m->m_ext.ext_type = EXT_EXTREF;
|
||||
ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
|
||||
nm_prdis(5, "create m %p refcnt %d", m, MBUF_REFCNT(m));
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
@ -365,7 +365,14 @@ netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
/* Request for the legacy control API. Convert it to a
|
||||
* NIOCCTRL request. */
|
||||
struct nmreq *nmr = (struct nmreq *) data;
|
||||
struct nmreq_header *hdr = nmreq_from_legacy(nmr, cmd);
|
||||
struct nmreq_header *hdr;
|
||||
|
||||
if (nmr->nr_version < 11) {
|
||||
nm_prerr("Minimum supported API is 11 (requested %u)",
|
||||
nmr->nr_version);
|
||||
return EINVAL;
|
||||
}
|
||||
hdr = nmreq_from_legacy(nmr, cmd);
|
||||
if (hdr == NULL) { /* out of memory */
|
||||
return ENOMEM;
|
||||
}
|
||||
@ -390,14 +397,14 @@ netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
#ifdef __FreeBSD__
|
||||
case FIONBIO:
|
||||
case FIOASYNC:
|
||||
ND("FIONBIO/FIOASYNC are no-ops");
|
||||
/* FIONBIO/FIOASYNC are no-ops. */
|
||||
break;
|
||||
|
||||
case BIOCIMMEDIATE:
|
||||
case BIOCGHDRCMPLT:
|
||||
case BIOCSHDRCMPLT:
|
||||
case BIOCSSEESENT:
|
||||
D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
|
||||
/* Ignore these commands. */
|
||||
break;
|
||||
|
||||
default: /* allow device-specific ioctls */
|
||||
|
@ -979,7 +979,7 @@ netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
|
||||
continue;
|
||||
|
||||
ofs = ofs + relofs;
|
||||
ND("%s: return offset %d (cluster %d) for pointer %p",
|
||||
nm_prdis("%s: return offset %d (cluster %d) for pointer %p",
|
||||
p->name, ofs, i, vaddr);
|
||||
return ofs;
|
||||
}
|
||||
@ -1043,7 +1043,7 @@ netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_
|
||||
if (index)
|
||||
*index = i * 32 + j;
|
||||
}
|
||||
ND("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
|
||||
nm_prdis("%s allocator: allocated object @ [%d][%d]: vaddr %p",p->name, i, j, vaddr);
|
||||
|
||||
if (start)
|
||||
*start = i;
|
||||
@ -1143,7 +1143,7 @@ netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
|
||||
*head = cur; /* restore */
|
||||
break;
|
||||
}
|
||||
ND(5, "allocate buffer %d -> %d", *head, cur);
|
||||
nm_prdis(5, "allocate buffer %d -> %d", *head, cur);
|
||||
*p = cur; /* link to previous head */
|
||||
}
|
||||
|
||||
@ -1160,7 +1160,7 @@ netmap_extra_free(struct netmap_adapter *na, uint32_t head)
|
||||
struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
|
||||
uint32_t i, cur, *buf;
|
||||
|
||||
ND("freeing the extra list");
|
||||
nm_prdis("freeing the extra list");
|
||||
for (i = 0; head >=2 && head < p->objtotal; i++) {
|
||||
cur = head;
|
||||
buf = lut[head].vaddr;
|
||||
@ -1197,7 +1197,7 @@ netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
|
||||
slot[i].ptr = 0;
|
||||
}
|
||||
|
||||
ND("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
|
||||
nm_prdis("%s: allocated %d buffers, %d available, first at %d", p->name, n, p->objfree, pos);
|
||||
return (0);
|
||||
|
||||
cleanup:
|
||||
@ -1245,7 +1245,7 @@ netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
|
||||
if (slot[i].buf_idx > 1)
|
||||
netmap_free_buf(nmd, slot[i].buf_idx);
|
||||
}
|
||||
ND("%s: released some buffers, available: %u",
|
||||
nm_prdis("%s: released some buffers, available: %u",
|
||||
p->name, p->objfree);
|
||||
}
|
||||
|
||||
@ -1539,7 +1539,7 @@ netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
|
||||
(void)lut;
|
||||
nm_prerr("unsupported on Windows");
|
||||
#else /* linux */
|
||||
ND("unmapping and freeing plut for %s", na->name);
|
||||
nm_prdis("unmapping and freeing plut for %s", na->name);
|
||||
if (lut->plut == NULL)
|
||||
return 0;
|
||||
for (i = 0; i < lim; i += p->_clustentries) {
|
||||
@ -1577,11 +1577,11 @@ netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
|
||||
#else /* linux */
|
||||
|
||||
if (lut->plut != NULL) {
|
||||
ND("plut already allocated for %s", na->name);
|
||||
nm_prdis("plut already allocated for %s", na->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ND("allocating physical lut for %s", na->name);
|
||||
nm_prdis("allocating physical lut for %s", na->name);
|
||||
lut->plut = nm_alloc_plut(lim);
|
||||
if (lut->plut == NULL) {
|
||||
nm_prerr("Failed to allocate physical lut for %s", na->name);
|
||||
@ -1775,7 +1775,7 @@ netmap_mem2_config(struct netmap_mem_d *nmd)
|
||||
if (!netmap_mem_params_changed(nmd->params))
|
||||
goto out;
|
||||
|
||||
ND("reconfiguring");
|
||||
nm_prdis("reconfiguring");
|
||||
|
||||
if (nmd->flags & NETMAP_MEM_FINALIZED) {
|
||||
/* reset previous allocation */
|
||||
@ -1870,10 +1870,10 @@ netmap_free_rings(struct netmap_adapter *na)
|
||||
if (netmap_debug & NM_DEBUG_MEM)
|
||||
nm_prinf("deleting ring %s", kring->name);
|
||||
if (!(kring->nr_kflags & NKR_FAKERING)) {
|
||||
ND("freeing bufs for %s", kring->name);
|
||||
nm_prdis("freeing bufs for %s", kring->name);
|
||||
netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots);
|
||||
} else {
|
||||
ND("NOT freeing bufs for %s", kring->name);
|
||||
nm_prdis("NOT freeing bufs for %s", kring->name);
|
||||
}
|
||||
netmap_ring_free(na->nm_mem, ring);
|
||||
kring->ring = NULL;
|
||||
@ -1918,7 +1918,7 @@ netmap_mem2_rings_create(struct netmap_adapter *na)
|
||||
nm_prerr("Cannot allocate %s_ring", nm_txrx2str(t));
|
||||
goto cleanup;
|
||||
}
|
||||
ND("txring at %p", ring);
|
||||
nm_prdis("txring at %p", ring);
|
||||
kring->ring = ring;
|
||||
*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
|
||||
*(int64_t *)(uintptr_t)&ring->buf_ofs =
|
||||
@ -1932,9 +1932,9 @@ netmap_mem2_rings_create(struct netmap_adapter *na)
|
||||
ring->tail = kring->rtail;
|
||||
*(uint32_t *)(uintptr_t)&ring->nr_buf_size =
|
||||
netmap_mem_bufsize(na->nm_mem);
|
||||
ND("%s h %d c %d t %d", kring->name,
|
||||
nm_prdis("%s h %d c %d t %d", kring->name,
|
||||
ring->head, ring->cur, ring->tail);
|
||||
ND("initializing slots for %s_ring", nm_txrx2str(t));
|
||||
nm_prdis("initializing slots for %s_ring", nm_txrx2str(t));
|
||||
if (!(kring->nr_kflags & NKR_FAKERING)) {
|
||||
/* this is a real ring */
|
||||
if (netmap_debug & NM_DEBUG_MEM)
|
||||
@ -2306,19 +2306,19 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
|
||||
#if !defined(linux) && !defined(_WIN32)
|
||||
p->lut[j].paddr = vtophys(p->lut[j].vaddr);
|
||||
#endif
|
||||
ND("%s %d at %p", p->name, j, p->lut[j].vaddr);
|
||||
nm_prdis("%s %d at %p", p->name, j, p->lut[j].vaddr);
|
||||
noff = off + p->_objsize;
|
||||
if (noff < PAGE_SIZE) {
|
||||
off = noff;
|
||||
continue;
|
||||
}
|
||||
ND("too big, recomputing offset...");
|
||||
nm_prdis("too big, recomputing offset...");
|
||||
while (noff >= PAGE_SIZE) {
|
||||
char *old_clust = clust;
|
||||
noff -= PAGE_SIZE;
|
||||
clust = nm_os_extmem_nextpage(nme->os);
|
||||
nr_pages--;
|
||||
ND("noff %zu page %p nr_pages %d", noff,
|
||||
nm_prdis("noff %zu page %p nr_pages %d", noff,
|
||||
page_to_virt(*pages), nr_pages);
|
||||
if (noff > 0 && !nm_isset(p->invalid_bitmap, j) &&
|
||||
(nr_pages == 0 ||
|
||||
@ -2328,7 +2328,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
|
||||
* drop this object
|
||||
* */
|
||||
p->invalid_bitmap[ (j>>5) ] |= 1U << (j & 31U);
|
||||
ND("non contiguous at off %zu, drop", noff);
|
||||
nm_prdis("non contiguous at off %zu, drop", noff);
|
||||
}
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
@ -2338,7 +2338,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
|
||||
p->objtotal = j;
|
||||
p->numclusters = p->objtotal;
|
||||
p->memtotal = j * p->_objsize;
|
||||
ND("%d memtotal %u", j, p->memtotal);
|
||||
nm_prdis("%d memtotal %u", j, p->memtotal);
|
||||
}
|
||||
|
||||
netmap_mem_ext_register(nme);
|
||||
@ -2442,7 +2442,7 @@ netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
|
||||
} else {
|
||||
ptnmd->pt_ifs = curr->next;
|
||||
}
|
||||
D("removed (ifp=%p,nifp_offset=%u)",
|
||||
nm_prinf("removed (ifp=%p,nifp_offset=%u)",
|
||||
curr->ifp, curr->nifp_offset);
|
||||
nm_os_free(curr);
|
||||
ret = 0;
|
||||
@ -2498,7 +2498,7 @@ netmap_mem_pt_guest_ofstophys(struct netmap_mem_d *nmd, vm_ooffset_t off)
|
||||
vm_paddr_t paddr;
|
||||
/* if the offset is valid, just return csb->base_addr + off */
|
||||
paddr = (vm_paddr_t)(ptnmd->nm_paddr + off);
|
||||
ND("off %lx padr %lx", off, (unsigned long)paddr);
|
||||
nm_prdis("off %lx padr %lx", off, (unsigned long)paddr);
|
||||
return paddr;
|
||||
}
|
||||
|
||||
@ -2528,7 +2528,7 @@ netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
|
||||
goto out;
|
||||
|
||||
if (ptnmd->ptn_dev == NULL) {
|
||||
D("ptnetmap memdev not attached");
|
||||
nm_prerr("ptnetmap memdev not attached");
|
||||
error = ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -2547,10 +2547,10 @@ netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
|
||||
|
||||
/* allocate the lut */
|
||||
if (ptnmd->buf_lut.lut == NULL) {
|
||||
D("allocating lut");
|
||||
nm_prinf("allocating lut");
|
||||
ptnmd->buf_lut.lut = nm_alloc_lut(nbuffers);
|
||||
if (ptnmd->buf_lut.lut == NULL) {
|
||||
D("lut allocation failed");
|
||||
nm_prerr("lut allocation failed");
|
||||
return ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -2615,11 +2615,11 @@ netmap_mem_pt_guest_delete(struct netmap_mem_d *nmd)
|
||||
if (nmd == NULL)
|
||||
return;
|
||||
if (netmap_verbose)
|
||||
D("deleting %p", nmd);
|
||||
nm_prinf("deleting %p", nmd);
|
||||
if (nmd->active > 0)
|
||||
D("bug: deleting mem allocator with active=%d!", nmd->active);
|
||||
nm_prerr("bug: deleting mem allocator with active=%d!", nmd->active);
|
||||
if (netmap_verbose)
|
||||
D("done deleting %p", nmd);
|
||||
nm_prinf("done deleting %p", nmd);
|
||||
NMA_LOCK_DESTROY(nmd);
|
||||
nm_os_free(nmd);
|
||||
}
|
||||
@ -2633,7 +2633,7 @@ netmap_mem_pt_guest_if_new(struct netmap_adapter *na, struct netmap_priv_d *priv
|
||||
|
||||
ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
|
||||
if (ptif == NULL) {
|
||||
D("Error: interface %p is not in passthrough", na->ifp);
|
||||
nm_prerr("interface %s is not in passthrough", na->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2650,7 +2650,7 @@ netmap_mem_pt_guest_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
|
||||
|
||||
ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
|
||||
if (ptif == NULL) {
|
||||
D("Error: interface %p is not in passthrough", na->ifp);
|
||||
nm_prerr("interface %s is not in passthrough", na->name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2664,7 +2664,7 @@ netmap_mem_pt_guest_rings_create(struct netmap_adapter *na)
|
||||
|
||||
ptif = netmap_mem_pt_guest_ifp_lookup(na->nm_mem, na->ifp);
|
||||
if (ptif == NULL) {
|
||||
D("Error: interface %p is not in passthrough", na->ifp);
|
||||
nm_prerr("interface %s is not in passthrough", na->name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ nm_is_zmon(struct netmap_adapter *na)
|
||||
static int
|
||||
netmap_monitor_txsync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
RD(1, "%s %x", kring->name, flags);
|
||||
nm_prlim(1, "%s %x", kring->name, flags);
|
||||
return EIO;
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ netmap_monitor_rxsync(struct netmap_kring *kring, int flags)
|
||||
/* parent left netmap mode */
|
||||
return EIO;
|
||||
}
|
||||
ND("%s %x", kring->name, flags);
|
||||
nm_prdis("%s %x", kring->name, flags);
|
||||
kring->nr_hwcur = kring->rhead;
|
||||
mb();
|
||||
return 0;
|
||||
@ -230,8 +230,8 @@ nm_monitor_dealloc(struct netmap_kring *kring)
|
||||
{
|
||||
if (kring->monitors) {
|
||||
if (kring->n_monitors > 0) {
|
||||
D("freeing not empty monitor array for %s (%d dangling monitors)!", kring->name,
|
||||
kring->n_monitors);
|
||||
nm_prerr("freeing not empty monitor array for %s (%d dangling monitors)!",
|
||||
kring->name, kring->n_monitors);
|
||||
}
|
||||
nm_os_free(kring->monitors);
|
||||
kring->monitors = NULL;
|
||||
@ -270,7 +270,7 @@ nm_monitor_dummycb(struct netmap_kring *kring, int flags)
|
||||
static void
|
||||
nm_monitor_intercept_callbacks(struct netmap_kring *kring)
|
||||
{
|
||||
ND("intercept callbacks on %s", kring->name);
|
||||
nm_prdis("intercept callbacks on %s", kring->name);
|
||||
kring->mon_sync = kring->nm_sync != NULL ?
|
||||
kring->nm_sync : nm_monitor_dummycb;
|
||||
kring->mon_notify = kring->nm_notify;
|
||||
@ -286,7 +286,7 @@ nm_monitor_intercept_callbacks(struct netmap_kring *kring)
|
||||
static void
|
||||
nm_monitor_restore_callbacks(struct netmap_kring *kring)
|
||||
{
|
||||
ND("restoring callbacks on %s", kring->name);
|
||||
nm_prdis("restoring callbacks on %s", kring->name);
|
||||
kring->nm_sync = kring->mon_sync;
|
||||
kring->mon_sync = NULL;
|
||||
if (kring->tx == NR_RX) {
|
||||
@ -333,7 +333,7 @@ netmap_monitor_add(struct netmap_kring *mkring, struct netmap_kring *kring, int
|
||||
|
||||
if (nm_monitor_none(ikring)) {
|
||||
/* this is the first monitor, intercept the callbacks */
|
||||
ND("%s: intercept callbacks on %s", mkring->name, ikring->name);
|
||||
nm_prdis("%s: intercept callbacks on %s", mkring->name, ikring->name);
|
||||
nm_monitor_intercept_callbacks(ikring);
|
||||
}
|
||||
|
||||
@ -513,11 +513,11 @@ netmap_monitor_reg_common(struct netmap_adapter *na, int onoff, int zmon)
|
||||
int i;
|
||||
enum txrx t, s;
|
||||
|
||||
ND("%p: onoff %d", na, onoff);
|
||||
nm_prdis("%p: onoff %d", na, onoff);
|
||||
if (onoff) {
|
||||
if (pna == NULL) {
|
||||
/* parent left netmap mode, fatal */
|
||||
D("%s: internal error", na->name);
|
||||
nm_prerr("%s: parent left netmap mode", na->name);
|
||||
return ENXIO;
|
||||
}
|
||||
for_rx_tx(t) {
|
||||
@ -592,7 +592,7 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
|
||||
mlim; // = mkring->nkr_num_slots - 1;
|
||||
|
||||
if (mkring == NULL) {
|
||||
RD(5, "NULL monitor on %s", kring->name);
|
||||
nm_prlim(5, "NULL monitor on %s", kring->name);
|
||||
return 0;
|
||||
}
|
||||
mring = mkring->ring;
|
||||
@ -653,7 +653,7 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
|
||||
tmp = ms->buf_idx;
|
||||
ms->buf_idx = s->buf_idx;
|
||||
s->buf_idx = tmp;
|
||||
ND(5, "beg %d buf_idx %d", beg, tmp);
|
||||
nm_prdis(5, "beg %d buf_idx %d", beg, tmp);
|
||||
|
||||
tmp = ms->len;
|
||||
ms->len = s->len;
|
||||
@ -770,7 +770,7 @@ netmap_monitor_parent_sync(struct netmap_kring *kring, u_int first_new, int new_
|
||||
*dst = NMB(mkring->na, ms);
|
||||
|
||||
if (unlikely(copy_len > max_len)) {
|
||||
RD(5, "%s->%s: truncating %d to %d", kring->name,
|
||||
nm_prlim(5, "%s->%s: truncating %d to %d", kring->name,
|
||||
mkring->name, copy_len, max_len);
|
||||
copy_len = max_len;
|
||||
}
|
||||
@ -849,7 +849,7 @@ static int
|
||||
netmap_monitor_parent_notify(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
int (*notify)(struct netmap_kring*, int);
|
||||
ND(5, "%s %x", kring->name, flags);
|
||||
nm_prdis(5, "%s %x", kring->name, flags);
|
||||
/* ?xsync callbacks have tryget called by their callers
|
||||
* (NIOCREGIF and poll()), but here we have to call it
|
||||
* by ourself
|
||||
@ -909,12 +909,12 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
req->nr_flags |= (NR_MONITOR_TX | NR_MONITOR_RX);
|
||||
}
|
||||
if ((req->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX)) == 0) {
|
||||
ND("not a monitor");
|
||||
nm_prdis("not a monitor");
|
||||
return 0;
|
||||
}
|
||||
/* this is a request for a monitor adapter */
|
||||
|
||||
ND("flags %lx", req->nr_flags);
|
||||
nm_prdis("flags %lx", req->nr_flags);
|
||||
|
||||
/* First, try to find the adapter that we want to monitor.
|
||||
* We use the same req, after we have turned off the monitor flags.
|
||||
@ -927,24 +927,23 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
error = netmap_get_na(hdr, &pna, &ifp, nmd, create);
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
if (error) {
|
||||
D("parent lookup failed: %d", error);
|
||||
nm_prerr("parent lookup failed: %d", error);
|
||||
return error;
|
||||
}
|
||||
ND("found parent: %s", pna->name);
|
||||
nm_prdis("found parent: %s", pna->name);
|
||||
|
||||
if (!nm_netmap_on(pna)) {
|
||||
/* parent not in netmap mode */
|
||||
/* XXX we can wait for the parent to enter netmap mode,
|
||||
* by intercepting its nm_register callback (2014-03-16)
|
||||
*/
|
||||
D("%s not in netmap mode", pna->name);
|
||||
nm_prerr("%s not in netmap mode", pna->name);
|
||||
error = EINVAL;
|
||||
goto put_out;
|
||||
}
|
||||
|
||||
mna = nm_os_malloc(sizeof(*mna));
|
||||
if (mna == NULL) {
|
||||
D("memory error");
|
||||
error = ENOMEM;
|
||||
goto put_out;
|
||||
}
|
||||
@ -954,7 +953,7 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
error = netmap_interp_ringid(&mna->priv, req->nr_mode, req->nr_ringid,
|
||||
req->nr_flags);
|
||||
if (error) {
|
||||
D("ringid error");
|
||||
nm_prerr("ringid error");
|
||||
goto free_out;
|
||||
}
|
||||
snprintf(mna->up.name, sizeof(mna->up.name), "%s/%s%s%s#%lu", pna->name,
|
||||
@ -1013,7 +1012,7 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
|
||||
error = netmap_attach_common(&mna->up);
|
||||
if (error) {
|
||||
D("attach_common error");
|
||||
nm_prerr("netmap_attach_common failed");
|
||||
goto mem_put_out;
|
||||
}
|
||||
|
||||
@ -1024,7 +1023,7 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
netmap_adapter_get(*na);
|
||||
|
||||
/* keep the reference to the parent */
|
||||
ND("monitor ok");
|
||||
nm_prdis("monitor ok");
|
||||
|
||||
/* drop the reference to the ifp, if any */
|
||||
if (ifp)
|
||||
|
@ -74,15 +74,7 @@
|
||||
#ifdef WITH_NMNULL
|
||||
|
||||
static int
|
||||
netmap_null_txsync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
(void)kring;
|
||||
(void)flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
netmap_null_rxsync(struct netmap_kring *kring, int flags)
|
||||
netmap_null_sync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
(void)kring;
|
||||
(void)flags;
|
||||
@ -95,12 +87,6 @@ netmap_null_krings_create(struct netmap_adapter *na)
|
||||
return netmap_krings_create(na, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
netmap_null_krings_delete(struct netmap_adapter *na)
|
||||
{
|
||||
netmap_krings_delete(na);
|
||||
}
|
||||
|
||||
static int
|
||||
netmap_null_reg(struct netmap_adapter *na, int onoff)
|
||||
{
|
||||
@ -153,11 +139,11 @@ netmap_get_null_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
}
|
||||
snprintf(nna->up.name, sizeof(nna->up.name), "null:%s", hdr->nr_name);
|
||||
|
||||
nna->up.nm_txsync = netmap_null_txsync;
|
||||
nna->up.nm_rxsync = netmap_null_rxsync;
|
||||
nna->up.nm_txsync = netmap_null_sync;
|
||||
nna->up.nm_rxsync = netmap_null_sync;
|
||||
nna->up.nm_register = netmap_null_reg;
|
||||
nna->up.nm_krings_create = netmap_null_krings_create;
|
||||
nna->up.nm_krings_delete = netmap_null_krings_delete;
|
||||
nna->up.nm_krings_delete = netmap_krings_delete;
|
||||
nna->up.nm_bdg_attach = netmap_null_bdg_attach;
|
||||
nna->up.nm_mem = netmap_mem_get(nmd);
|
||||
|
||||
|
@ -82,16 +82,16 @@ gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,
|
||||
if (ipv4) {
|
||||
/* Set the IPv4 "Total Length" field. */
|
||||
iph->tot_len = htobe16(len);
|
||||
ND("ip total length %u", be16toh(ip->tot_len));
|
||||
nm_prdis("ip total length %u", be16toh(ip->tot_len));
|
||||
|
||||
/* Set the IPv4 "Identification" field. */
|
||||
iph->id = htobe16(be16toh(iph->id) + idx);
|
||||
ND("ip identification %u", be16toh(iph->id));
|
||||
nm_prdis("ip identification %u", be16toh(iph->id));
|
||||
|
||||
/* Compute and insert the IPv4 header checksum. */
|
||||
iph->check = 0;
|
||||
iph->check = nm_os_csum_ipv4(iph);
|
||||
ND("IP csum %x", be16toh(iph->check));
|
||||
nm_prdis("IP csum %x", be16toh(iph->check));
|
||||
} else {
|
||||
/* Set the IPv6 "Payload Len" field. */
|
||||
ip6h->payload_len = htobe16(len-iphlen);
|
||||
@ -102,13 +102,13 @@ gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,
|
||||
|
||||
/* Set the TCP sequence number. */
|
||||
tcph->seq = htobe32(be32toh(tcph->seq) + segmented_bytes);
|
||||
ND("tcp seq %u", be32toh(tcph->seq));
|
||||
nm_prdis("tcp seq %u", be32toh(tcph->seq));
|
||||
|
||||
/* Zero the PSH and FIN TCP flags if this is not the last
|
||||
segment. */
|
||||
if (!last_segment)
|
||||
tcph->flags &= ~(0x8 | 0x1);
|
||||
ND("last_segment %u", last_segment);
|
||||
nm_prdis("last_segment %u", last_segment);
|
||||
|
||||
check = &tcph->check;
|
||||
check_data = (uint8_t *)tcph;
|
||||
@ -129,7 +129,7 @@ gso_fix_segment(uint8_t *pkt, size_t len, u_int ipv4, u_int iphlen, u_int tcp,
|
||||
else
|
||||
nm_os_csum_tcpudp_ipv6(ip6h, check_data, len-iphlen, check);
|
||||
|
||||
ND("TCP/UDP csum %x", be16toh(*check));
|
||||
nm_prdis("TCP/UDP csum %x", be16toh(*check));
|
||||
}
|
||||
|
||||
static inline int
|
||||
@ -170,7 +170,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
u_int dst_slots = 0;
|
||||
|
||||
if (unlikely(ft_p == ft_end)) {
|
||||
RD(1, "No source slots to process");
|
||||
nm_prlim(1, "No source slots to process");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -189,11 +189,11 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
/* Initial sanity check on the source virtio-net header. If
|
||||
* something seems wrong, just drop the packet. */
|
||||
if (src_len < na->up.virt_hdr_len) {
|
||||
RD(1, "Short src vnet header, dropping");
|
||||
nm_prlim(1, "Short src vnet header, dropping");
|
||||
return;
|
||||
}
|
||||
if (unlikely(vnet_hdr_is_bad(vh))) {
|
||||
RD(1, "Bad src vnet header, dropping");
|
||||
nm_prlim(1, "Bad src vnet header, dropping");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -266,7 +266,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
if (dst_slots >= *howmany) {
|
||||
/* We still have work to do, but we've run out of
|
||||
* dst slots, so we have to drop the packet. */
|
||||
ND(1, "Not enough slots, dropping GSO packet");
|
||||
nm_prdis(1, "Not enough slots, dropping GSO packet");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -281,7 +281,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
* encapsulation. */
|
||||
for (;;) {
|
||||
if (src_len < ethhlen) {
|
||||
RD(1, "Short GSO fragment [eth], dropping");
|
||||
nm_prlim(1, "Short GSO fragment [eth], dropping");
|
||||
return;
|
||||
}
|
||||
ethertype = be16toh(*((uint16_t *)
|
||||
@ -297,7 +297,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
(gso_hdr + ethhlen);
|
||||
|
||||
if (src_len < ethhlen + 20) {
|
||||
RD(1, "Short GSO fragment "
|
||||
nm_prlim(1, "Short GSO fragment "
|
||||
"[IPv4], dropping");
|
||||
return;
|
||||
}
|
||||
@ -310,14 +310,14 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
iphlen = 40;
|
||||
break;
|
||||
default:
|
||||
RD(1, "Unsupported ethertype, "
|
||||
nm_prlim(1, "Unsupported ethertype, "
|
||||
"dropping GSO packet");
|
||||
return;
|
||||
}
|
||||
ND(3, "type=%04x", ethertype);
|
||||
nm_prdis(3, "type=%04x", ethertype);
|
||||
|
||||
if (src_len < ethhlen + iphlen) {
|
||||
RD(1, "Short GSO fragment [IP], dropping");
|
||||
nm_prlim(1, "Short GSO fragment [IP], dropping");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -329,7 +329,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
(gso_hdr + ethhlen + iphlen);
|
||||
|
||||
if (src_len < ethhlen + iphlen + 20) {
|
||||
RD(1, "Short GSO fragment "
|
||||
nm_prlim(1, "Short GSO fragment "
|
||||
"[TCP], dropping");
|
||||
return;
|
||||
}
|
||||
@ -340,11 +340,11 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
}
|
||||
|
||||
if (src_len < gso_hdr_len) {
|
||||
RD(1, "Short GSO fragment [TCP/UDP], dropping");
|
||||
nm_prlim(1, "Short GSO fragment [TCP/UDP], dropping");
|
||||
return;
|
||||
}
|
||||
|
||||
ND(3, "gso_hdr_len %u gso_mtu %d", gso_hdr_len,
|
||||
nm_prdis(3, "gso_hdr_len %u gso_mtu %d", gso_hdr_len,
|
||||
dst_na->mfs);
|
||||
|
||||
/* Advance source pointers. */
|
||||
@ -386,7 +386,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
gso_idx, segmented_bytes,
|
||||
src_len == 0 && ft_p + 1 == ft_end);
|
||||
|
||||
ND("frame %u completed with %d bytes", gso_idx, (int)gso_bytes);
|
||||
nm_prdis("frame %u completed with %d bytes", gso_idx, (int)gso_bytes);
|
||||
dst_slot->len = gso_bytes;
|
||||
dst_slot->flags = 0;
|
||||
dst_slots++;
|
||||
@ -410,7 +410,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
src_len = ft_p->ft_len;
|
||||
}
|
||||
}
|
||||
ND(3, "%d bytes segmented", segmented_bytes);
|
||||
nm_prdis(3, "%d bytes segmented", segmented_bytes);
|
||||
|
||||
} else {
|
||||
/* Address of a checksum field into a destination slot. */
|
||||
@ -423,7 +423,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
/* Init 'check' if necessary. */
|
||||
if (vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
|
||||
if (unlikely(vh->csum_offset + vh->csum_start > src_len))
|
||||
D("invalid checksum request");
|
||||
nm_prerr("invalid checksum request");
|
||||
else
|
||||
check = (uint16_t *)(dst + vh->csum_start +
|
||||
vh->csum_offset);
|
||||
@ -468,7 +468,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
if (check && vh && (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
|
||||
*check = nm_os_csum_fold(csum);
|
||||
}
|
||||
ND(3, "using %u dst_slots", dst_slots);
|
||||
nm_prdis(3, "using %u dst_slots", dst_slots);
|
||||
|
||||
/* A second pass on the destination slots to set the slot flags,
|
||||
* using the right number of destination slots.
|
||||
@ -485,7 +485,7 @@ bdg_mismatch_datapath(struct netmap_vp_adapter *na,
|
||||
/* Update howmany and j. This is to commit the use of
|
||||
* those slots in the destination ring. */
|
||||
if (unlikely(dst_slots > *howmany)) {
|
||||
D("Slot allocation error: This is a bug");
|
||||
nm_prerr("bug: slot allocation error");
|
||||
}
|
||||
*j = j_cur;
|
||||
*howmany -= dst_slots;
|
||||
|
@ -118,8 +118,8 @@ netmap_pipe_dealloc(struct netmap_adapter *na)
|
||||
{
|
||||
if (na->na_pipes) {
|
||||
if (na->na_next_pipe > 0) {
|
||||
D("freeing not empty pipe array for %s (%d dangling pipes)!", na->name,
|
||||
na->na_next_pipe);
|
||||
nm_prerr("freeing not empty pipe array for %s (%d dangling pipes)!",
|
||||
na->name, na->na_next_pipe);
|
||||
}
|
||||
nm_os_free(na->na_pipes);
|
||||
na->na_pipes = NULL;
|
||||
@ -190,8 +190,8 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
|
||||
int complete; /* did we see a complete packet ? */
|
||||
struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
|
||||
|
||||
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
nm_prdis(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
txkring->nr_hwcur, txkring->nr_hwtail,
|
||||
txkring->rcur, txkring->rhead, txkring->rtail);
|
||||
|
||||
@ -221,7 +221,7 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
|
||||
|
||||
txkring->nr_hwcur = k;
|
||||
|
||||
ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
nm_prdis(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
txkring->nr_hwcur, txkring->nr_hwtail,
|
||||
txkring->rcur, txkring->rhead, txkring->rtail, k);
|
||||
|
||||
@ -242,8 +242,8 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
|
||||
int m; /* slots to release */
|
||||
struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
|
||||
|
||||
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
nm_prdis("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
nm_prdis(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
rxkring->nr_hwcur, rxkring->nr_hwtail,
|
||||
rxkring->rcur, rxkring->rhead, rxkring->rtail);
|
||||
|
||||
@ -274,7 +274,7 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
|
||||
txkring->pipe_tail = nm_prev(k, lim);
|
||||
rxkring->nr_hwcur = k;
|
||||
|
||||
ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
nm_prdis(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
rxkring->nr_hwcur, rxkring->nr_hwtail,
|
||||
rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
|
||||
|
||||
@ -312,6 +312,47 @@ netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
|
||||
*/
|
||||
|
||||
|
||||
int netmap_pipe_krings_create_both(struct netmap_adapter *na,
|
||||
struct netmap_adapter *ona)
|
||||
{
|
||||
enum txrx t;
|
||||
int error;
|
||||
int i;
|
||||
|
||||
/* case 1) below */
|
||||
nm_prdis("%p: case 1, create both ends", na);
|
||||
error = netmap_krings_create(na, 0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* create the krings of the other end */
|
||||
error = netmap_krings_create(ona, 0);
|
||||
if (error)
|
||||
goto del_krings1;
|
||||
|
||||
/* cross link the krings and initialize the pipe_tails */
|
||||
for_rx_tx(t) {
|
||||
enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *k1 = NMR(na, t)[i],
|
||||
*k2 = NMR(ona, r)[i];
|
||||
k1->pipe = k2;
|
||||
k2->pipe = k1;
|
||||
/* mark all peer-adapter rings as fake */
|
||||
k2->nr_kflags |= NKR_FAKERING;
|
||||
/* init tails */
|
||||
k1->pipe_tail = k1->nr_hwtail;
|
||||
k2->pipe_tail = k2->nr_hwtail;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
del_krings1:
|
||||
netmap_krings_delete(na);
|
||||
return error;
|
||||
}
|
||||
|
||||
/* netmap_pipe_krings_create.
|
||||
*
|
||||
* There are two cases:
|
||||
@ -336,46 +377,83 @@ netmap_pipe_krings_create(struct netmap_adapter *na)
|
||||
struct netmap_pipe_adapter *pna =
|
||||
(struct netmap_pipe_adapter *)na;
|
||||
struct netmap_adapter *ona = &pna->peer->up;
|
||||
int error = 0;
|
||||
|
||||
if (pna->peer_ref)
|
||||
return netmap_pipe_krings_create_both(na, ona);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
netmap_pipe_reg_both(struct netmap_adapter *na, struct netmap_adapter *ona)
|
||||
{
|
||||
int i, error = 0;
|
||||
enum txrx t;
|
||||
|
||||
if (pna->peer_ref) {
|
||||
int i;
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
/* case 1) above */
|
||||
ND("%p: case 1, create both ends", na);
|
||||
error = netmap_krings_create(na, 0);
|
||||
if (error)
|
||||
goto err;
|
||||
|
||||
/* create the krings of the other end */
|
||||
error = netmap_krings_create(ona, 0);
|
||||
if (error)
|
||||
goto del_krings1;
|
||||
|
||||
/* cross link the krings and initialize the pipe_tails */
|
||||
for_rx_tx(t) {
|
||||
enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *k1 = NMR(na, t)[i],
|
||||
*k2 = NMR(ona, r)[i];
|
||||
k1->pipe = k2;
|
||||
k2->pipe = k1;
|
||||
/* mark all peer-adapter rings as fake */
|
||||
k2->nr_kflags |= NKR_FAKERING;
|
||||
/* init tails */
|
||||
k1->pipe_tail = k1->nr_hwtail;
|
||||
k2->pipe_tail = k2->nr_hwtail;
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
/* mark the peer ring as needed */
|
||||
kring->pipe->nr_kflags |= NKR_NEEDRING;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return 0;
|
||||
|
||||
del_krings1:
|
||||
netmap_krings_delete(na);
|
||||
err:
|
||||
return error;
|
||||
/* create all missing needed rings on the other end.
|
||||
* Either our end, or the other, has been marked as
|
||||
* fake, so the allocation will not be done twice.
|
||||
*/
|
||||
error = netmap_mem_rings_create(ona);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* In case of no error we put our rings in netmap mode */
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
struct netmap_kring *sring, *dring;
|
||||
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
if ((kring->nr_kflags & NKR_FAKERING) &&
|
||||
(kring->pipe->nr_kflags & NKR_FAKERING)) {
|
||||
/* this is a re-open of a pipe
|
||||
* end-point kept alive by the other end.
|
||||
* We need to leave everything as it is
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
/* copy the buffers from the non-fake ring */
|
||||
if (kring->nr_kflags & NKR_FAKERING) {
|
||||
sring = kring->pipe;
|
||||
dring = kring;
|
||||
} else {
|
||||
sring = kring;
|
||||
dring = kring->pipe;
|
||||
}
|
||||
memcpy(dring->ring->slot,
|
||||
sring->ring->slot,
|
||||
sizeof(struct netmap_slot) *
|
||||
sring->nkr_num_slots);
|
||||
/* mark both rings as fake and needed,
|
||||
* so that buffers will not be
|
||||
* deleted by the standard machinery
|
||||
* (we will delete them by ourselves in
|
||||
* netmap_pipe_krings_delete)
|
||||
*/
|
||||
sring->nr_kflags |=
|
||||
(NKR_FAKERING | NKR_NEEDRING);
|
||||
dring->nr_kflags |=
|
||||
(NKR_FAKERING | NKR_NEEDRING);
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* netmap_pipe_reg.
|
||||
@ -417,110 +495,105 @@ netmap_pipe_reg(struct netmap_adapter *na, int onoff)
|
||||
struct netmap_pipe_adapter *pna =
|
||||
(struct netmap_pipe_adapter *)na;
|
||||
struct netmap_adapter *ona = &pna->peer->up;
|
||||
int i, error = 0;
|
||||
enum txrx t;
|
||||
int error = 0;
|
||||
|
||||
ND("%p: onoff %d", na, onoff);
|
||||
nm_prdis("%p: onoff %d", na, onoff);
|
||||
if (onoff) {
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
/* mark the peer ring as needed */
|
||||
kring->pipe->nr_kflags |= NKR_NEEDRING;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* create all missing needed rings on the other end.
|
||||
* Either our end, or the other, has been marked as
|
||||
* fake, so the allocation will not be done twice.
|
||||
*/
|
||||
error = netmap_mem_rings_create(ona);
|
||||
if (error)
|
||||
error = netmap_pipe_reg_both(na, ona);
|
||||
if (error) {
|
||||
return error;
|
||||
|
||||
/* In case of no error we put our rings in netmap mode */
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
if (nm_kring_pending_on(kring)) {
|
||||
struct netmap_kring *sring, *dring;
|
||||
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
if ((kring->nr_kflags & NKR_FAKERING) &&
|
||||
(kring->pipe->nr_kflags & NKR_FAKERING)) {
|
||||
/* this is a re-open of a pipe
|
||||
* end-point kept alive by the other end.
|
||||
* We need to leave everything as it is
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
/* copy the buffers from the non-fake ring */
|
||||
if (kring->nr_kflags & NKR_FAKERING) {
|
||||
sring = kring->pipe;
|
||||
dring = kring;
|
||||
} else {
|
||||
sring = kring;
|
||||
dring = kring->pipe;
|
||||
}
|
||||
memcpy(dring->ring->slot,
|
||||
sring->ring->slot,
|
||||
sizeof(struct netmap_slot) *
|
||||
sring->nkr_num_slots);
|
||||
/* mark both rings as fake and needed,
|
||||
* so that buffers will not be
|
||||
* deleted by the standard machinery
|
||||
* (we will delete them by ourselves in
|
||||
* netmap_pipe_krings_delete)
|
||||
*/
|
||||
sring->nr_kflags |=
|
||||
(NKR_FAKERING | NKR_NEEDRING);
|
||||
dring->nr_kflags |=
|
||||
(NKR_FAKERING | NKR_NEEDRING);
|
||||
kring->nr_mode = NKR_NETMAP_ON;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (na->active_fds == 0)
|
||||
na->na_flags |= NAF_NETMAP_ON;
|
||||
} else {
|
||||
if (na->active_fds == 0)
|
||||
na->na_flags &= ~NAF_NETMAP_ON;
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(na, t); i++) {
|
||||
struct netmap_kring *kring = NMR(na, t)[i];
|
||||
|
||||
if (nm_kring_pending_off(kring)) {
|
||||
kring->nr_mode = NKR_NETMAP_OFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
netmap_krings_mode_commit(na, onoff);
|
||||
}
|
||||
|
||||
if (na->active_fds) {
|
||||
ND("active_fds %d", na->active_fds);
|
||||
nm_prdis("active_fds %d", na->active_fds);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pna->peer_ref) {
|
||||
ND("%p: case 1.a or 2.a, nothing to do", na);
|
||||
nm_prdis("%p: case 1.a or 2.a, nothing to do", na);
|
||||
return 0;
|
||||
}
|
||||
if (onoff) {
|
||||
ND("%p: case 1.b, drop peer", na);
|
||||
nm_prdis("%p: case 1.b, drop peer", na);
|
||||
pna->peer->peer_ref = 0;
|
||||
netmap_adapter_put(na);
|
||||
} else {
|
||||
ND("%p: case 2.b, grab peer", na);
|
||||
nm_prdis("%p: case 2.b, grab peer", na);
|
||||
netmap_adapter_get(na);
|
||||
pna->peer->peer_ref = 1;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
void
|
||||
netmap_pipe_krings_delete_both(struct netmap_adapter *na,
|
||||
struct netmap_adapter *ona)
|
||||
{
|
||||
struct netmap_adapter *sna;
|
||||
enum txrx t;
|
||||
int i;
|
||||
|
||||
/* case 1) below */
|
||||
nm_prdis("%p: case 1, deleting everything", na);
|
||||
/* To avoid double-frees we zero-out all the buffers in the kernel part
|
||||
* of each ring. The reason is this: If the user is behaving correctly,
|
||||
* all buffers are found in exactly one slot in the userspace part of
|
||||
* some ring. If the user is not behaving correctly, we cannot release
|
||||
* buffers cleanly anyway. In the latter case, the allocator will
|
||||
* return to a clean state only when all its users will close.
|
||||
*/
|
||||
sna = na;
|
||||
cleanup:
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(sna, t); i++) {
|
||||
struct netmap_kring *kring = NMR(sna, t)[i];
|
||||
struct netmap_ring *ring = kring->ring;
|
||||
uint32_t j, lim = kring->nkr_num_slots - 1;
|
||||
|
||||
nm_prdis("%s ring %p hwtail %u hwcur %u",
|
||||
kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
|
||||
|
||||
if (ring == NULL)
|
||||
continue;
|
||||
|
||||
if (kring->tx == NR_RX)
|
||||
ring->slot[kring->pipe_tail].buf_idx = 0;
|
||||
|
||||
for (j = nm_next(kring->pipe_tail, lim);
|
||||
j != kring->nr_hwcur;
|
||||
j = nm_next(j, lim))
|
||||
{
|
||||
nm_prdis("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
|
||||
ring->slot[j].buf_idx = 0;
|
||||
}
|
||||
kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
|
||||
}
|
||||
|
||||
}
|
||||
if (sna != ona && ona->tx_rings) {
|
||||
sna = ona;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
netmap_mem_rings_delete(na);
|
||||
netmap_krings_delete(na); /* also zeroes tx_rings etc. */
|
||||
|
||||
if (ona->tx_rings == NULL) {
|
||||
/* already deleted, we must be on an
|
||||
* cleanup-after-error path */
|
||||
return;
|
||||
}
|
||||
netmap_mem_rings_delete(ona);
|
||||
netmap_krings_delete(ona);
|
||||
}
|
||||
|
||||
/* netmap_pipe_krings_delete.
|
||||
*
|
||||
* There are two cases:
|
||||
@ -546,67 +619,14 @@ netmap_pipe_krings_delete(struct netmap_adapter *na)
|
||||
{
|
||||
struct netmap_pipe_adapter *pna =
|
||||
(struct netmap_pipe_adapter *)na;
|
||||
struct netmap_adapter *sna, *ona; /* na of the other end */
|
||||
enum txrx t;
|
||||
int i;
|
||||
struct netmap_adapter *ona; /* na of the other end */
|
||||
|
||||
if (!pna->peer_ref) {
|
||||
ND("%p: case 2, kept alive by peer", na);
|
||||
nm_prdis("%p: case 2, kept alive by peer", na);
|
||||
return;
|
||||
}
|
||||
ona = &pna->peer->up;
|
||||
/* case 1) above */
|
||||
ND("%p: case 1, deleting everything", na);
|
||||
/* To avoid double-frees we zero-out all the buffers in the kernel part
|
||||
* of each ring. The reason is this: If the user is behaving correctly,
|
||||
* all buffers are found in exactly one slot in the userspace part of
|
||||
* some ring. If the user is not behaving correctly, we cannot release
|
||||
* buffers cleanly anyway. In the latter case, the allocator will
|
||||
* return to a clean state only when all its users will close.
|
||||
*/
|
||||
sna = na;
|
||||
cleanup:
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(sna, t); i++) {
|
||||
struct netmap_kring *kring = NMR(sna, t)[i];
|
||||
struct netmap_ring *ring = kring->ring;
|
||||
uint32_t j, lim = kring->nkr_num_slots - 1;
|
||||
|
||||
ND("%s ring %p hwtail %u hwcur %u",
|
||||
kring->name, ring, kring->nr_hwtail, kring->nr_hwcur);
|
||||
|
||||
if (ring == NULL)
|
||||
continue;
|
||||
|
||||
if (kring->tx == NR_RX)
|
||||
ring->slot[kring->pipe_tail].buf_idx = 0;
|
||||
|
||||
for (j = nm_next(kring->pipe_tail, lim);
|
||||
j != kring->nr_hwcur;
|
||||
j = nm_next(j, lim))
|
||||
{
|
||||
ND("%s[%d] %u", kring->name, j, ring->slot[j].buf_idx);
|
||||
ring->slot[j].buf_idx = 0;
|
||||
}
|
||||
kring->nr_kflags &= ~(NKR_FAKERING | NKR_NEEDRING);
|
||||
}
|
||||
|
||||
}
|
||||
if (sna != ona && ona->tx_rings) {
|
||||
sna = ona;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
netmap_mem_rings_delete(na);
|
||||
netmap_krings_delete(na); /* also zeroes tx_rings etc. */
|
||||
|
||||
if (ona->tx_rings == NULL) {
|
||||
/* already deleted, we must be on an
|
||||
* cleanup-after-error path */
|
||||
return;
|
||||
}
|
||||
netmap_mem_rings_delete(ona);
|
||||
netmap_krings_delete(ona);
|
||||
netmap_pipe_krings_delete_both(na, ona);
|
||||
}
|
||||
|
||||
|
||||
@ -615,9 +635,9 @@ netmap_pipe_dtor(struct netmap_adapter *na)
|
||||
{
|
||||
struct netmap_pipe_adapter *pna =
|
||||
(struct netmap_pipe_adapter *)na;
|
||||
ND("%p %p", na, pna->parent_ifp);
|
||||
nm_prdis("%p %p", na, pna->parent_ifp);
|
||||
if (pna->peer_ref) {
|
||||
ND("%p: clean up peer", na);
|
||||
nm_prdis("%p: clean up peer", na);
|
||||
pna->peer_ref = 0;
|
||||
netmap_adapter_put(&pna->peer->up);
|
||||
}
|
||||
@ -651,7 +671,7 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
if (cbra != NULL) {
|
||||
role = NM_PIPE_ROLE_SLAVE;
|
||||
} else {
|
||||
ND("not a pipe");
|
||||
nm_prdis("not a pipe");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -682,10 +702,10 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
if (!error)
|
||||
break;
|
||||
if (error != ENXIO || retries++) {
|
||||
ND("parent lookup failed: %d", error);
|
||||
nm_prdis("parent lookup failed: %d", error);
|
||||
return error;
|
||||
}
|
||||
ND("try to create a persistent vale port");
|
||||
nm_prdis("try to create a persistent vale port");
|
||||
/* create a persistent vale port and try again */
|
||||
*cbra = '\0';
|
||||
NMG_UNLOCK();
|
||||
@ -694,14 +714,15 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
strlcpy(hdr->nr_name, nr_name_orig, sizeof(hdr->nr_name));
|
||||
if (create_error && create_error != EEXIST) {
|
||||
if (create_error != EOPNOTSUPP) {
|
||||
D("failed to create a persistent vale port: %d", create_error);
|
||||
nm_prerr("failed to create a persistent vale port: %d",
|
||||
create_error);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
if (NETMAP_OWNED_BY_KERN(pna)) {
|
||||
ND("parent busy");
|
||||
nm_prdis("parent busy");
|
||||
error = EBUSY;
|
||||
goto put_out;
|
||||
}
|
||||
@ -711,10 +732,10 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
mna = netmap_pipe_find(pna, pipe_id);
|
||||
if (mna) {
|
||||
if (mna->role == role) {
|
||||
ND("found %s directly at %d", pipe_id, mna->parent_slot);
|
||||
nm_prdis("found %s directly at %d", pipe_id, mna->parent_slot);
|
||||
reqna = mna;
|
||||
} else {
|
||||
ND("found %s indirectly at %d", pipe_id, mna->parent_slot);
|
||||
nm_prdis("found %s indirectly at %d", pipe_id, mna->parent_slot);
|
||||
reqna = mna->peer;
|
||||
}
|
||||
/* the pipe we have found already holds a ref to the parent,
|
||||
@ -723,7 +744,7 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
netmap_unget_na(pna, ifp);
|
||||
goto found;
|
||||
}
|
||||
ND("pipe %s not found, create %d", pipe_id, create);
|
||||
nm_prdis("pipe %s not found, create %d", pipe_id, create);
|
||||
if (!create) {
|
||||
error = ENODEV;
|
||||
goto put_out;
|
||||
@ -814,10 +835,10 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
sna->peer_ref = 1;
|
||||
netmap_adapter_get(&mna->up);
|
||||
}
|
||||
ND("created master %p and slave %p", mna, sna);
|
||||
nm_prdis("created master %p and slave %p", mna, sna);
|
||||
found:
|
||||
|
||||
ND("pipe %s %s at %p", pipe_id,
|
||||
nm_prdis("pipe %s %s at %p", pipe_id,
|
||||
(reqna->role == NM_PIPE_ROLE_MASTER ? "master" : "slave"), reqna);
|
||||
*na = &reqna->up;
|
||||
netmap_adapter_get(*na);
|
||||
|
@ -445,7 +445,7 @@ netmap_vale_attach(struct nmreq_header *hdr, void *auth_token)
|
||||
error = na->nm_bdg_ctl(hdr, na);
|
||||
if (error)
|
||||
goto unref_exit;
|
||||
ND("registered %s to netmap-mode", na->name);
|
||||
nm_prdis("registered %s to netmap-mode", na->name);
|
||||
}
|
||||
vpna = (struct netmap_vp_adapter *)na;
|
||||
req->port_index = vpna->bdg_port;
|
||||
@ -533,7 +533,7 @@ netmap_vale_vp_dtor(struct netmap_adapter *na)
|
||||
struct netmap_vp_adapter *vpna = (struct netmap_vp_adapter*)na;
|
||||
struct nm_bridge *b = vpna->na_bdg;
|
||||
|
||||
ND("%s has %d references", na->name, na->na_refcount);
|
||||
nm_prdis("%s has %d references", na->name, na->na_refcount);
|
||||
|
||||
if (b) {
|
||||
netmap_bdg_detach_common(b, vpna->bdg_port, -1);
|
||||
@ -542,7 +542,7 @@ netmap_vale_vp_dtor(struct netmap_adapter *na)
|
||||
if (na->ifp != NULL && !nm_iszombie(na)) {
|
||||
NM_DETACH_NA(na->ifp);
|
||||
if (vpna->autodelete) {
|
||||
ND("releasing %s", na->ifp->if_xname);
|
||||
nm_prdis("releasing %s", na->ifp->if_xname);
|
||||
NMG_UNLOCK();
|
||||
nm_os_vi_detach(na->ifp);
|
||||
NMG_LOCK();
|
||||
@ -628,12 +628,12 @@ nm_vale_preflush(struct netmap_kring *kring, u_int end)
|
||||
* shared lock, waiting if we can sleep (if the source port is
|
||||
* attached to a user process) or with a trylock otherwise (NICs).
|
||||
*/
|
||||
ND("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
|
||||
nm_prdis("wait rlock for %d packets", ((j > end ? lim+1 : 0) + end) - j);
|
||||
if (na->up.na_flags & NAF_BDG_MAYSLEEP)
|
||||
BDG_RLOCK(b);
|
||||
else if (!BDG_RTRYLOCK(b))
|
||||
return j;
|
||||
ND(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
|
||||
nm_prdis(5, "rlock acquired for %d packets", ((j > end ? lim+1 : 0) + end) - j);
|
||||
ft = kring->nkr_ft;
|
||||
|
||||
for (; likely(j != end); j = nm_next(j, lim)) {
|
||||
@ -644,7 +644,7 @@ nm_vale_preflush(struct netmap_kring *kring, u_int end)
|
||||
ft[ft_i].ft_flags = slot->flags;
|
||||
ft[ft_i].ft_offset = 0;
|
||||
|
||||
ND("flags is 0x%x", slot->flags);
|
||||
nm_prdis("flags is 0x%x", slot->flags);
|
||||
/* we do not use the buf changed flag, but we still need to reset it */
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
|
||||
@ -667,7 +667,7 @@ nm_vale_preflush(struct netmap_kring *kring, u_int end)
|
||||
continue;
|
||||
}
|
||||
if (unlikely(netmap_verbose && frags > 1))
|
||||
RD(5, "%d frags at %d", frags, ft_i - frags);
|
||||
nm_prlim(5, "%d frags at %d", frags, ft_i - frags);
|
||||
ft[ft_i - frags].ft_frags = frags;
|
||||
frags = 1;
|
||||
if (unlikely((int)ft_i >= bridge_batch))
|
||||
@ -815,8 +815,9 @@ nm_kr_space(struct netmap_kring *k, int is_rx)
|
||||
k->nr_tail >= k->nkr_num_slots ||
|
||||
busy < 0 ||
|
||||
busy >= k->nkr_num_slots) {
|
||||
D("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d", k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
|
||||
k->nkr_lease_idx, k->nkr_num_slots);
|
||||
nm_prerr("invalid kring, cur %d tail %d lease %d lease_idx %d lim %d",
|
||||
k->nr_hwcur, k->nr_hwtail, k->nkr_hwlease,
|
||||
k->nkr_lease_idx, k->nkr_num_slots);
|
||||
}
|
||||
#endif
|
||||
return space;
|
||||
@ -893,7 +894,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
struct nm_vale_q *d;
|
||||
struct nm_bdg_fwd *start_ft = NULL;
|
||||
|
||||
ND("slot %d frags %d", i, ft[i].ft_frags);
|
||||
nm_prdis("slot %d frags %d", i, ft[i].ft_frags);
|
||||
|
||||
if (na->up.virt_hdr_len < ft[i].ft_len) {
|
||||
ft[i].ft_offset = na->up.virt_hdr_len;
|
||||
@ -909,7 +910,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
}
|
||||
dst_port = b->bdg_ops.lookup(start_ft, &dst_ring, na, b->private_data);
|
||||
if (netmap_verbose > 255)
|
||||
RD(5, "slot %d port %d -> %d", i, me, dst_port);
|
||||
nm_prlim(5, "slot %d port %d -> %d", i, me, dst_port);
|
||||
if (dst_port >= NM_BDG_NOPORT)
|
||||
continue; /* this packet is identified to be dropped */
|
||||
else if (dst_port == NM_BDG_BROADCAST)
|
||||
@ -956,7 +957,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
}
|
||||
}
|
||||
|
||||
ND(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
|
||||
nm_prdis(5, "pass 1 done %d pkts %d dsts", n, num_dsts);
|
||||
/* second pass: scan destinations */
|
||||
for (i = 0; i < num_dsts; i++) {
|
||||
struct netmap_vp_adapter *dst_na;
|
||||
@ -971,7 +972,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
int virt_hdr_mismatch = 0;
|
||||
|
||||
d_i = dsts[i];
|
||||
ND("second pass %d port %d", i, d_i);
|
||||
nm_prdis("second pass %d port %d", i, d_i);
|
||||
d = dst_ents + d_i;
|
||||
// XXX fix the division
|
||||
dst_na = b->bdg_ports[d_i/NM_BDG_MAXRINGS];
|
||||
@ -988,7 +989,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
* - when na is being deactivated but is still attached.
|
||||
*/
|
||||
if (unlikely(!nm_netmap_on(&dst_na->up))) {
|
||||
ND("not in netmap mode!");
|
||||
nm_prdis("not in netmap mode!");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@ -1006,7 +1007,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
|
||||
if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
|
||||
if (netmap_verbose) {
|
||||
RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
|
||||
nm_prlim(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
|
||||
dst_na->up.virt_hdr_len);
|
||||
}
|
||||
/* There is a virtio-net header/offloadings mismatch between
|
||||
@ -1028,11 +1029,11 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
KASSERT(dst_na->mfs > 0, ("vpna->mfs is 0"));
|
||||
needed = (needed * na->mfs) /
|
||||
(dst_na->mfs - WORST_CASE_GSO_HEADER) + 1;
|
||||
ND(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
|
||||
nm_prdis(3, "srcmtu=%u, dstmtu=%u, x=%u", na->mfs, dst_na->mfs, needed);
|
||||
}
|
||||
}
|
||||
|
||||
ND(5, "pass 2 dst %d is %x %s",
|
||||
nm_prdis(5, "pass 2 dst %d is %x %s",
|
||||
i, d_i, is_vp ? "virtual" : "nic/host");
|
||||
dst_nr = d_i & (NM_BDG_MAXRINGS-1);
|
||||
nrings = dst_na->up.num_rx_rings;
|
||||
@ -1098,7 +1099,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
if (unlikely(cnt > howmany))
|
||||
break; /* no more space */
|
||||
if (netmap_verbose && cnt > 1)
|
||||
RD(5, "rx %d frags to %d", cnt, j);
|
||||
nm_prlim(5, "rx %d frags to %d", cnt, j);
|
||||
ft_end = ft_p + cnt;
|
||||
if (unlikely(virt_hdr_mismatch)) {
|
||||
bdg_mismatch_datapath(na, dst_na, ft_p, ring, &j, lim, &howmany);
|
||||
@ -1111,7 +1112,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
slot = &ring->slot[j];
|
||||
dst = NMB(&dst_na->up, slot);
|
||||
|
||||
ND("send [%d] %d(%d) bytes at %s:%d",
|
||||
nm_prdis("send [%d] %d(%d) bytes at %s:%d",
|
||||
i, (int)copy_len, (int)dst_len,
|
||||
NM_IFPNAME(dst_ifp), j);
|
||||
/* round to a multiple of 64 */
|
||||
@ -1119,7 +1120,7 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
|
||||
if (unlikely(copy_len > NETMAP_BUF_SIZE(&dst_na->up) ||
|
||||
copy_len > NETMAP_BUF_SIZE(&na->up))) {
|
||||
RD(5, "invalid len %d, down to 64", (int)copy_len);
|
||||
nm_prlim(5, "invalid len %d, down to 64", (int)copy_len);
|
||||
copy_len = dst_len = 64; // XXX
|
||||
}
|
||||
if (ft_p->ft_flags & NS_INDIRECT) {
|
||||
@ -1155,10 +1156,10 @@ nm_vale_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
* i can recover the slots, otherwise must
|
||||
* fill them with 0 to mark empty packets.
|
||||
*/
|
||||
ND("leftover %d bufs", howmany);
|
||||
nm_prdis("leftover %d bufs", howmany);
|
||||
if (nm_next(lease_idx, lim) == kring->nkr_lease_idx) {
|
||||
/* yes i am the last one */
|
||||
ND("roll back nkr_hwlease to %d", j);
|
||||
nm_prdis("roll back nkr_hwlease to %d", j);
|
||||
kring->nkr_hwlease = j;
|
||||
} else {
|
||||
while (howmany-- > 0) {
|
||||
@ -1323,7 +1324,7 @@ netmap_vale_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
|
||||
na->nm_krings_create = netmap_vale_vp_krings_create;
|
||||
na->nm_krings_delete = netmap_vale_vp_krings_delete;
|
||||
na->nm_dtor = netmap_vale_vp_dtor;
|
||||
ND("nr_mem_id %d", req->nr_mem_id);
|
||||
nm_prdis("nr_mem_id %d", req->nr_mem_id);
|
||||
na->nm_mem = nmd ?
|
||||
netmap_mem_get(nmd):
|
||||
netmap_mem_private_new(
|
||||
@ -1594,11 +1595,11 @@ netmap_vi_create(struct nmreq_header *hdr, int autodelete)
|
||||
if (error) {
|
||||
goto err_2;
|
||||
}
|
||||
ND("returning nr_mem_id %d", req->nr_mem_id);
|
||||
nm_prdis("returning nr_mem_id %d", req->nr_mem_id);
|
||||
if (nmd)
|
||||
netmap_mem_put(nmd);
|
||||
NMG_UNLOCK();
|
||||
ND("created %s", ifp->if_xname);
|
||||
nm_prdis("created %s", ifp->if_xname);
|
||||
return 0;
|
||||
|
||||
err_2:
|
||||
|
Loading…
Reference in New Issue
Block a user