Various cleanups for readability (no functional changes)
- remove the KEVENT code, which was incomplete and not compiled anyways; - change some while() loops into for() - adjust indentation - remove extra whitespace MFC after: 1 week
This commit is contained in:
parent
f6a92b8803
commit
babc7c1258
@ -212,7 +212,7 @@ em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* we have packets to send */
|
||||
l = netmap_tidx_k2n(na, ring_nr, j);
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_tx_desc *curr = &txr->tx_base[l];
|
||||
struct em_buffer *txbuf = &txr->tx_buffers[l];
|
||||
@ -245,7 +245,6 @@ em_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
BUS_DMASYNC_PREWRITE);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwcur = k;
|
||||
|
||||
@ -339,9 +338,8 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
/* skip past packets that userspace has already processed */
|
||||
j = kring->nr_hwcur; /* netmap ring index */
|
||||
if (j != k) { /* userspace has read some packets. */
|
||||
n = 0;
|
||||
l = netmap_ridx_k2n(na, ring_nr, j); /* NIC ring index */
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_rx_desc *curr = &rxr->rx_base[l];
|
||||
struct em_buffer *rxbuf = &rxr->rx_buffers[l];
|
||||
@ -367,7 +365,6 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwavail -= n;
|
||||
kring->nr_hwcur = k;
|
||||
|
@ -170,7 +170,7 @@ igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
(adapter->hw.mac.type == e1000_82575) ? (txr->me << 4) : 0;
|
||||
|
||||
l = netmap_tidx_k2n(na, ring_nr, j);
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
union e1000_adv_tx_desc *curr =
|
||||
(union e1000_adv_tx_desc *)&txr->tx_base[l];
|
||||
@ -209,7 +209,6 @@ igb_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
BUS_DMASYNC_PREWRITE);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwcur = k;
|
||||
|
||||
@ -306,9 +305,8 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
/* skip past packets that userspace has already processed */
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* userspace has read some packets. */
|
||||
n = 0;
|
||||
l = netmap_ridx_k2n(na, ring_nr, j);
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = ring->slot + j;
|
||||
union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
|
||||
struct igb_rx_buf *rxbuf = rxr->rx_buffers + l;
|
||||
@ -333,7 +331,6 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwavail -= n;
|
||||
kring->nr_hwcur = k;
|
||||
|
@ -175,7 +175,7 @@ lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* we have packets to send */
|
||||
l = netmap_tidx_k2n(na, ring_nr, j);
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
|
||||
struct em_buffer *txbuf = &adapter->tx_buffer_area[l];
|
||||
@ -208,7 +208,6 @@ lem_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
BUS_DMASYNC_PREWRITE);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwcur = k;
|
||||
|
||||
@ -306,9 +305,8 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
/* skip past packets that userspace has already processed */
|
||||
j = kring->nr_hwcur; /* netmap ring index */
|
||||
if (j != k) { /* userspace has read some packets. */
|
||||
n = 0;
|
||||
l = netmap_ridx_k2n(na, ring_nr, j); /* NIC ring index */
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
|
||||
struct em_buffer *rxbuf = &adapter->rx_buffer_area[l];
|
||||
@ -334,7 +332,6 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwavail -= n;
|
||||
kring->nr_hwcur = k;
|
||||
|
@ -175,9 +175,8 @@ re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* we have new packets to send */
|
||||
n = 0;
|
||||
l = sc->rl_ldata.rl_tx_prodidx;
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l];
|
||||
int cmd = slot->len | RL_TDESC_CMD_EOF |
|
||||
@ -210,7 +209,6 @@ re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
sc->rl_ldata.rl_tx_prodidx = l;
|
||||
kring->nr_hwcur = k;
|
||||
@ -295,9 +293,8 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
*/
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* userspace has read some packets. */
|
||||
n = 0;
|
||||
l = netmap_ridx_k2n(na, ring_nr, j); /* the NIC index */
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
struct netmap_slot *slot = ring->slot + j;
|
||||
struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
|
||||
int cmd = na->buff_size | RL_RDESC_CMD_OWN;
|
||||
@ -326,7 +323,6 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwavail -= n;
|
||||
kring->nr_hwcur = k;
|
||||
@ -366,11 +362,10 @@ re_netmap_tx_init(struct rl_softc *sc)
|
||||
|
||||
/* l points in the netmap ring, i points in the NIC ring */
|
||||
for (i = 0; i < n; i++) {
|
||||
void *addr;
|
||||
uint64_t paddr;
|
||||
int l = netmap_tidx_n2k(na, 0, i);
|
||||
void *addr = PNMB(slot + l, &paddr);
|
||||
|
||||
addr = PNMB(slot + l, &paddr);
|
||||
desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
|
||||
desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
|
||||
netmap_load_map(sc->rl_ldata.rl_tx_mtag,
|
||||
|
@ -242,8 +242,7 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* we have new packets to send */
|
||||
l = netmap_tidx_k2n(na, ring_nr, j); /* NIC index */
|
||||
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
/*
|
||||
* Collect per-slot info.
|
||||
* Note that txbuf and curr are indexed by l.
|
||||
@ -281,6 +280,11 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
}
|
||||
|
||||
slot->flags &= ~NS_REPORT;
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
/* buffer has changed, unload and reload map */
|
||||
netmap_reload_map(txr->txtag, txbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
/*
|
||||
* Fill the slot in the NIC ring.
|
||||
* In this driver we need to rewrite the buffer
|
||||
@ -295,25 +299,14 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
IXGBE_ADVTXD_DCMD_DEXT |
|
||||
IXGBE_ADVTXD_DCMD_IFCS |
|
||||
IXGBE_TXD_CMD_EOP | flags) );
|
||||
/* If the buffer has changed, unload and reload map
|
||||
* (and possibly the physical address in the NIC
|
||||
* slot, but we did it already).
|
||||
*/
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
/* buffer has changed, unload and reload map */
|
||||
netmap_reload_map(txr->txtag, txbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
/* make sure changes to the buffer are synced */
|
||||
bus_dmamap_sync(txr->txtag, txbuf->map,
|
||||
BUS_DMASYNC_PREWRITE);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwcur = k; /* the saved ring->cur */
|
||||
|
||||
/* decrease avail by number of sent packets */
|
||||
kring->nr_hwavail -= n;
|
||||
|
||||
@ -356,7 +349,7 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
j= (j < kring->nkr_num_slots / 4 || j >= kring->nkr_num_slots*3/4) ?
|
||||
0 : report_frequency;
|
||||
kring->nr_kflags = j; /* the slot to check */
|
||||
j = txd[j].upper.fields.status & IXGBE_TXD_STAT_DD;
|
||||
j = txd[j].upper.fields.status & IXGBE_TXD_STAT_DD; // XXX cpu_to_le32 ?
|
||||
}
|
||||
if (j) {
|
||||
int delta;
|
||||
@ -396,7 +389,6 @@ ixgbe_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
if (do_lock)
|
||||
IXGBE_TX_UNLOCK(txr);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -460,25 +452,25 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
l = rxr->next_to_check;
|
||||
j = netmap_ridx_n2k(na, ring_nr, l);
|
||||
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
for (n = 0; ; n++) {
|
||||
union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l];
|
||||
uint32_t staterr = le32toh(curr->wb.upper.status_error);
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
for (n = 0; ; n++) {
|
||||
union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l];
|
||||
uint32_t staterr = le32toh(curr->wb.upper.status_error);
|
||||
|
||||
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
|
||||
break;
|
||||
ring->slot[j].len = le16toh(curr->wb.upper.length);
|
||||
bus_dmamap_sync(rxr->ptag,
|
||||
rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
|
||||
break;
|
||||
ring->slot[j].len = le16toh(curr->wb.upper.length);
|
||||
bus_dmamap_sync(rxr->ptag,
|
||||
rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
}
|
||||
if (n) { /* update the state variables */
|
||||
rxr->next_to_check = l;
|
||||
kring->nr_hwavail += n;
|
||||
}
|
||||
kring->nr_kflags &= ~NKR_PENDINTR;
|
||||
}
|
||||
if (n) { /* update the state variables */
|
||||
rxr->next_to_check = l;
|
||||
kring->nr_hwavail += n;
|
||||
}
|
||||
kring->nr_kflags &= ~NKR_PENDINTR;
|
||||
}
|
||||
|
||||
/*
|
||||
* Skip past packets that userspace has already processed
|
||||
@ -489,9 +481,8 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
*/
|
||||
j = kring->nr_hwcur;
|
||||
if (j != k) { /* userspace has read some packets. */
|
||||
n = 0;
|
||||
l = netmap_ridx_k2n(na, ring_nr, j);
|
||||
while (j != k) {
|
||||
for (n = 0; j != k; n++) {
|
||||
/* collect per-slot info, with similar validations
|
||||
* and flag handling as in the txsync code.
|
||||
*
|
||||
@ -509,19 +500,16 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
if (addr == netmap_buffer_base) /* bad buf */
|
||||
goto ring_reset;
|
||||
|
||||
curr->wb.upper.status_error = 0;
|
||||
curr->read.pkt_addr = htole64(paddr);
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
curr->wb.upper.status_error = 0;
|
||||
curr->read.pkt_addr = htole64(paddr);
|
||||
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
|
||||
BUS_DMASYNC_PREREAD);
|
||||
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
l = (l == lim) ? 0 : l + 1;
|
||||
n++;
|
||||
}
|
||||
kring->nr_hwavail -= n;
|
||||
kring->nr_hwcur = k;
|
||||
|
@ -24,9 +24,6 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* $FreeBSD$
|
||||
* $Id: netmap.c 9795 2011-12-02 11:39:08Z luigi $
|
||||
*
|
||||
* This module supports memory mapped access to network devices,
|
||||
* see netmap(4).
|
||||
*
|
||||
@ -634,100 +631,6 @@ struct netmap_priv_d {
|
||||
};
|
||||
|
||||
|
||||
static struct cdev *netmap_dev; /* /dev/netmap character device. */
|
||||
|
||||
|
||||
static d_mmap_t netmap_mmap;
|
||||
static d_ioctl_t netmap_ioctl;
|
||||
static d_poll_t netmap_poll;
|
||||
|
||||
#ifdef NETMAP_KEVENT
|
||||
static d_kqfilter_t netmap_kqfilter;
|
||||
#endif
|
||||
|
||||
static struct cdevsw netmap_cdevsw = {
|
||||
.d_version = D_VERSION,
|
||||
.d_name = "netmap",
|
||||
.d_mmap = netmap_mmap,
|
||||
.d_ioctl = netmap_ioctl,
|
||||
.d_poll = netmap_poll,
|
||||
#ifdef NETMAP_KEVENT
|
||||
.d_kqfilter = netmap_kqfilter,
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef NETMAP_KEVENT
|
||||
static int netmap_kqread(struct knote *, long);
|
||||
static int netmap_kqwrite(struct knote *, long);
|
||||
static void netmap_kqdetach(struct knote *);
|
||||
|
||||
static struct filterops netmap_read_filterops = {
|
||||
.f_isfd = 1,
|
||||
.f_attach = NULL,
|
||||
.f_detach = netmap_kqdetach,
|
||||
.f_event = netmap_kqread,
|
||||
};
|
||||
|
||||
static struct filterops netmap_write_filterops = {
|
||||
.f_isfd = 1,
|
||||
.f_attach = NULL,
|
||||
.f_detach = netmap_kqdetach,
|
||||
.f_event = netmap_kqwrite,
|
||||
};
|
||||
|
||||
/*
|
||||
* support for the kevent() system call.
|
||||
*
|
||||
* This is the kevent filter, and is executed each time a new event
|
||||
* is triggered on the device. This function execute some operation
|
||||
* depending on the received filter.
|
||||
*
|
||||
* The implementation should test the filters and should implement
|
||||
* filter operations we are interested on (a full list in /sys/event.h).
|
||||
*
|
||||
* On a match we should:
|
||||
* - set kn->kn_fop
|
||||
* - set kn->kn_hook
|
||||
* - call knlist_add() to deliver the event to the application.
|
||||
*
|
||||
* Return 0 if the event should be delivered to the application.
|
||||
*/
|
||||
static int
|
||||
netmap_kqfilter(struct cdev *dev, struct knote *kn)
|
||||
{
|
||||
/* declare variables needed to read/write */
|
||||
|
||||
switch(kn->kn_filter) {
|
||||
case EVFILT_READ:
|
||||
if (netmap_verbose)
|
||||
D("%s kqfilter: EVFILT_READ" ifp->if_xname);
|
||||
|
||||
/* read operations */
|
||||
kn->kn_fop = &netmap_read_filterops;
|
||||
break;
|
||||
|
||||
case EVFILT_WRITE:
|
||||
if (netmap_verbose)
|
||||
D("%s kqfilter: EVFILT_WRITE" ifp->if_xname);
|
||||
|
||||
/* write operations */
|
||||
kn->kn_fop = &netmap_write_filterops;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (netmap_verbose)
|
||||
D("%s kqfilter: invalid filter" ifp->if_xname);
|
||||
return(EINVAL);
|
||||
}
|
||||
|
||||
kn->kn_hook = 0;//
|
||||
knlist_add(&netmap_sc->tun_rsel.si_note, kn, 0);
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* NETMAP_KEVENT */
|
||||
|
||||
|
||||
/*
|
||||
* File descriptor's private data destructor.
|
||||
*
|
||||
@ -822,14 +725,16 @@ netmap_dtor(void *data)
|
||||
*
|
||||
* Return 0 on success, -1 otherwise.
|
||||
*/
|
||||
|
||||
static int
|
||||
netmap_mmap(__unused struct cdev *dev,
|
||||
#if __FreeBSD_version < 900000
|
||||
netmap_mmap(__unused struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr,
|
||||
int nprot)
|
||||
vm_offset_t offset, vm_paddr_t *paddr, int nprot
|
||||
#else
|
||||
netmap_mmap(__unused struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
|
||||
int nprot, __unused vm_memattr_t *memattr)
|
||||
vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
|
||||
__unused vm_memattr_t *memattr
|
||||
#endif
|
||||
)
|
||||
{
|
||||
if (nprot & PROT_EXEC)
|
||||
return (-1); // XXX -1 or EINVAL ?
|
||||
@ -1252,11 +1157,8 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
|
||||
D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
|
||||
break;
|
||||
|
||||
default:
|
||||
default: /* allow device-specific ioctls */
|
||||
{
|
||||
/*
|
||||
* allow device calls
|
||||
*/
|
||||
struct socket so;
|
||||
bzero(&so, sizeof(so));
|
||||
error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
|
||||
@ -1266,6 +1168,7 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
|
||||
// so->so_proto not null.
|
||||
error = ifioctl(&so, cmd, data, td);
|
||||
if_rele(ifp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1477,19 +1380,19 @@ netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
|
||||
/*------- driver support routines ------*/
|
||||
|
||||
/*
|
||||
* default lock wrapper. On linux we use mostly netmap-specific locks.
|
||||
* default lock wrapper.
|
||||
*/
|
||||
static void
|
||||
netmap_lock_wrapper(struct ifnet *_a, int what, u_int queueid)
|
||||
netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
|
||||
{
|
||||
struct netmap_adapter *na = NA(_a);
|
||||
struct netmap_adapter *na = NA(dev);
|
||||
|
||||
switch (what) {
|
||||
#ifndef __FreeBSD__ /* some system do not need lock on register */
|
||||
#ifdef linux /* some system do not need lock on register */
|
||||
case NETMAP_REG_LOCK:
|
||||
case NETMAP_REG_UNLOCK:
|
||||
break;
|
||||
#endif
|
||||
#endif /* linux */
|
||||
|
||||
case NETMAP_CORE_LOCK:
|
||||
mtx_lock(&na->core_lock);
|
||||
@ -1701,7 +1604,8 @@ netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
|
||||
* N rings, separate locks:
|
||||
* lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
|
||||
*/
|
||||
int netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
|
||||
int
|
||||
netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
|
||||
{
|
||||
struct netmap_adapter *na;
|
||||
struct netmap_kring *r;
|
||||
@ -1731,6 +1635,18 @@ int netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct cdevsw netmap_cdevsw = {
|
||||
.d_version = D_VERSION,
|
||||
.d_name = "netmap",
|
||||
.d_mmap = netmap_mmap,
|
||||
.d_ioctl = netmap_ioctl,
|
||||
.d_poll = netmap_poll,
|
||||
};
|
||||
|
||||
|
||||
static struct cdev *netmap_dev; /* /dev/netmap character device. */
|
||||
|
||||
|
||||
/*
|
||||
* Module loader.
|
||||
*
|
||||
@ -1744,7 +1660,6 @@ netmap_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
|
||||
error = netmap_memory_init();
|
||||
if (error != 0) {
|
||||
printf("netmap: unable to initialize the memory allocator.");
|
||||
@ -1752,11 +1667,9 @@ netmap_init(void)
|
||||
}
|
||||
printf("netmap: loaded module with %d Mbytes\n",
|
||||
(int)(netmap_mem_d->nm_totalsize >> 20));
|
||||
|
||||
netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
|
||||
"netmap");
|
||||
|
||||
return (0);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
||||
@ -1769,9 +1682,7 @@ static void
|
||||
netmap_fini(void)
|
||||
{
|
||||
destroy_dev(netmap_dev);
|
||||
|
||||
netmap_memory_fini();
|
||||
|
||||
printf("netmap: unloaded module.\n");
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user