small code cleanup in preparation for future modifications in
the memory allocator used by netmap. No functional change, two small bug fixes: - in if_re.c add a missing bus_dmamap_sync() - in netmap.c comment out a spurious free() in an error handling block
This commit is contained in:
parent
9a2611585e
commit
6e10c8b8c5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=229939
@ -3297,16 +3297,15 @@ em_setup_transmit_ring(struct tx_ring *txr)
|
||||
#ifdef DEV_NETMAP
|
||||
if (slot) {
|
||||
int si = i + na->tx_rings[txr->me].nkr_hwofs;
|
||||
uint64_t paddr;
|
||||
void *addr;
|
||||
|
||||
if (si >= na->num_tx_desc)
|
||||
si -= na->num_tx_desc;
|
||||
addr = NMB(slot + si);
|
||||
txr->tx_base[i].buffer_addr =
|
||||
htole64(vtophys(addr));
|
||||
addr = PNMB(slot + si, &paddr);
|
||||
txr->tx_base[i].buffer_addr = htole64(paddr);
|
||||
/* reload the map for netmap mode */
|
||||
netmap_load_map(txr->txtag,
|
||||
txbuf->map, addr, na->buff_size);
|
||||
netmap_load_map(txr->txtag, txbuf->map, addr);
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
|
||||
@ -4104,9 +4103,6 @@ em_setup_receive_ring(struct rx_ring *rxr)
|
||||
sj += adapter->num_rx_desc;
|
||||
|
||||
for (int j = 0; j != adapter->num_rx_desc; j++, sj++) {
|
||||
void *addr;
|
||||
int sz;
|
||||
|
||||
rxbuf = &rxr->rx_buffers[j];
|
||||
/* no mbuf and regular mode -> skip this entry */
|
||||
if (rxbuf->m_head == NULL && !slot)
|
||||
@ -4115,12 +4111,20 @@ em_setup_receive_ring(struct rx_ring *rxr)
|
||||
if (sj >= adapter->num_rx_desc)
|
||||
sj -= adapter->num_rx_desc;
|
||||
/* see comment, set slot addr and map */
|
||||
addr = slot ? NMB(slot + sj) : rxbuf->m_head->m_data;
|
||||
sz = slot ? na->buff_size : adapter->rx_mbuf_sz;
|
||||
// XXX load or reload ?
|
||||
netmap_load_map(rxr->rxtag, rxbuf->map, addr, sz);
|
||||
/* Update descriptor */
|
||||
rxr->rx_base[j].buffer_addr = htole64(vtophys(addr));
|
||||
if (slot) {
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot + sj, &paddr);
|
||||
netmap_load_map(rxr->rxtag, rxbuf->map, addr);
|
||||
/* Update descriptor */
|
||||
rxr->rx_base[j].buffer_addr = htole64(paddr);
|
||||
} else {
|
||||
/* Get the memory mapping */
|
||||
bus_dmamap_load_mbuf_sg(rxr->rxtag,
|
||||
rxbuf->map, rxbuf->m_head, seg,
|
||||
&nsegs, BUS_DMA_NOWAIT);
|
||||
/* Update descriptor */
|
||||
rxr->rx_base[j].buffer_addr = htole64(seg[0].ds_addr);
|
||||
}
|
||||
bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD);
|
||||
}
|
||||
}
|
||||
|
@ -3320,8 +3320,7 @@ igb_setup_transmit_ring(struct tx_ring *txr)
|
||||
|
||||
if (si < 0)
|
||||
si += na->num_tx_desc;
|
||||
netmap_load_map(txr->txtag, txbuf->map,
|
||||
NMB(slot + si), na->buff_size);
|
||||
netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
/* clear the watch index */
|
||||
@ -4062,15 +4061,15 @@ igb_setup_receive_ring(struct rx_ring *rxr)
|
||||
if (slot) {
|
||||
/* slot sj is mapped to the i-th NIC-ring entry */
|
||||
int sj = j + na->rx_rings[rxr->me].nkr_hwofs;
|
||||
uint64_t paddr;
|
||||
void *addr;
|
||||
|
||||
if (sj < 0)
|
||||
sj += na->num_rx_desc;
|
||||
addr = NMB(slot + sj);
|
||||
netmap_load_map(rxr->ptag,
|
||||
rxbuf->pmap, addr, na->buff_size);
|
||||
addr = PNMB(slot + sj, &paddr);
|
||||
netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
|
||||
/* Update descriptor */
|
||||
rxr->rx_base[j].read.pkt_addr = htole64(vtophys(addr));
|
||||
rxr->rx_base[j].read.pkt_addr = htole64(paddr);
|
||||
continue;
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
|
@ -2670,16 +2670,15 @@ lem_setup_transmit_structures(struct adapter *adapter)
|
||||
if (slot) {
|
||||
/* slot si is mapped to the i-th NIC-ring entry */
|
||||
int si = i + na->tx_rings[0].nkr_hwofs;
|
||||
uint64_t paddr;
|
||||
void *addr;
|
||||
|
||||
if (si > na->num_tx_desc)
|
||||
si -= na->num_tx_desc;
|
||||
addr = NMB(slot + si);
|
||||
adapter->tx_desc_base[si].buffer_addr =
|
||||
htole64(vtophys(addr));
|
||||
addr = PNMB(slot + si, &paddr);
|
||||
adapter->tx_desc_base[si].buffer_addr = htole64(paddr);
|
||||
/* reload the map for netmap mode */
|
||||
netmap_load_map(adapter->txtag,
|
||||
tx_buffer->map, addr, na->buff_size);
|
||||
netmap_load_map(adapter->txtag, tx_buffer->map, addr);
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
tx_buffer->next_eop = -1;
|
||||
@ -3247,16 +3246,15 @@ lem_setup_receive_structures(struct adapter *adapter)
|
||||
if (slot) {
|
||||
/* slot si is mapped to the i-th NIC-ring entry */
|
||||
int si = i + na->rx_rings[0].nkr_hwofs;
|
||||
uint64_t paddr;
|
||||
void *addr;
|
||||
|
||||
if (si > na->num_rx_desc)
|
||||
si -= na->num_rx_desc;
|
||||
addr = NMB(slot + si);
|
||||
netmap_load_map(adapter->rxtag,
|
||||
rx_buffer->map, addr, na->buff_size);
|
||||
addr = PNMB(slot + si, &paddr);
|
||||
netmap_load_map(adapter->rxtag, rx_buffer->map, addr);
|
||||
/* Update descriptor */
|
||||
adapter->rx_desc_base[i].buffer_addr =
|
||||
htole64(vtophys(addr));
|
||||
adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
|
||||
continue;
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
|
@ -2876,8 +2876,7 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
|
||||
|
||||
if (si >= na->num_tx_desc)
|
||||
si -= na->num_tx_desc;
|
||||
netmap_load_map(txr->txtag, txbuf->map,
|
||||
NMB(slot + si), na->buff_size);
|
||||
netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
/* Clear the EOP index */
|
||||
@ -3810,16 +3809,15 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
|
||||
*/
|
||||
if (slot) {
|
||||
int sj = j + na->rx_rings[rxr->me].nkr_hwofs;
|
||||
uint64_t paddr;
|
||||
void *addr;
|
||||
|
||||
if (sj >= na->num_rx_desc)
|
||||
sj -= na->num_rx_desc;
|
||||
addr = NMB(slot + sj);
|
||||
netmap_load_map(rxr->ptag,
|
||||
rxbuf->pmap, addr, na->buff_size);
|
||||
addr = PNMB(slot + sj, &paddr);
|
||||
netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
|
||||
/* Update descriptor */
|
||||
rxr->rx_base[j].read.pkt_addr =
|
||||
htole64(vtophys(addr));
|
||||
rxr->rx_base[j].read.pkt_addr = htole64(paddr);
|
||||
continue;
|
||||
}
|
||||
#endif /* DEV_NETMAP */
|
||||
|
@ -222,7 +222,8 @@ em_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
int flags = ((slot->flags & NS_REPORT) ||
|
||||
j == 0 || j == report_frequency) ?
|
||||
E1000_TXD_CMD_RS : 0;
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
int len = slot->len;
|
||||
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
|
||||
if (do_lock)
|
||||
@ -236,10 +237,9 @@ em_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
htole32(adapter->txd_cmd | len |
|
||||
(E1000_TXD_CMD_EOP | flags) );
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
curr->buffer_addr = htole64(vtophys(addr));
|
||||
curr->buffer_addr = htole64(paddr);
|
||||
/* buffer has changed, reload map */
|
||||
netmap_reload_map(txr->txtag, txbuf->map,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(txr->txtag, txbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
@ -355,7 +355,8 @@ em_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_rx_desc *curr = &rxr->rx_base[l];
|
||||
struct em_buffer *rxbuf = &rxr->rx_buffers[l];
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
|
||||
if (addr == netmap_buffer_base) { /* bad buf */
|
||||
if (do_lock)
|
||||
@ -365,10 +366,9 @@ em_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
|
||||
curr->status = 0;
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
curr->buffer_addr = htole64(vtophys(addr));
|
||||
curr->buffer_addr = htole64(paddr);
|
||||
/* buffer has changed, reload map */
|
||||
netmap_reload_map(rxr->rxtag, rxbuf->map,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(rxr->rxtag, rxbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
|
@ -181,7 +181,8 @@ igb_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct igb_tx_buffer *txbuf = &txr->tx_buffers[l];
|
||||
union e1000_adv_tx_desc *curr =
|
||||
(union e1000_adv_tx_desc *)&txr->tx_base[l];
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
int flags = ((slot->flags & NS_REPORT) ||
|
||||
j == 0 || j == report_frequency) ?
|
||||
E1000_ADVTXD_DCMD_RS : 0;
|
||||
@ -195,7 +196,7 @@ igb_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
|
||||
slot->flags &= ~NS_REPORT;
|
||||
// XXX do we need to set the address ?
|
||||
curr->read.buffer_addr = htole64(vtophys(addr));
|
||||
curr->read.buffer_addr = htole64(paddr);
|
||||
curr->read.olinfo_status =
|
||||
htole32(olinfo_status |
|
||||
(len<< E1000_ADVTXD_PAYLEN_SHIFT));
|
||||
@ -206,8 +207,7 @@ igb_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
E1000_ADVTXD_DCMD_EOP | flags);
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
/* buffer has changed, reload map */
|
||||
netmap_reload_map(txr->txtag, txbuf->map,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(txr->txtag, txbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
@ -317,7 +317,8 @@ igb_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = ring->slot + j;
|
||||
union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
|
||||
struct igb_rx_buf *rxbuf = rxr->rx_buffers + l;
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
|
||||
if (addr == netmap_buffer_base) { /* bad buf */
|
||||
if (do_lock)
|
||||
@ -326,10 +327,9 @@ igb_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
}
|
||||
|
||||
curr->wb.upper.status_error = 0;
|
||||
curr->read.pkt_addr = htole64(vtophys(addr));
|
||||
curr->read.pkt_addr = htole64(paddr);
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
netmap_reload_map(rxr->ptag, rxbuf->pmap,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,8 @@ lem_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_tx_desc *curr = &adapter->tx_desc_base[l];
|
||||
struct em_buffer *txbuf = &adapter->tx_buffer_area[l];
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
int flags = ((slot->flags & NS_REPORT) ||
|
||||
j == 0 || j == report_frequency) ?
|
||||
E1000_TXD_CMD_RS : 0;
|
||||
@ -204,10 +205,9 @@ lem_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
htole32( adapter->txd_cmd | len |
|
||||
(E1000_TXD_CMD_EOP | flags) );
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
curr->buffer_addr = htole64(vtophys(addr));
|
||||
curr->buffer_addr = htole64(paddr);
|
||||
/* buffer has changed, reload map */
|
||||
netmap_reload_map(adapter->txtag, txbuf->map,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(adapter->txtag, txbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
@ -314,7 +314,8 @@ lem_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
|
||||
struct em_buffer *rxbuf = &adapter->rx_buffer_area[l];
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
|
||||
if (addr == netmap_buffer_base) { /* bad buf */
|
||||
if (do_lock)
|
||||
@ -323,10 +324,9 @@ lem_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
}
|
||||
curr->status = 0;
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
curr->buffer_addr = htole64(vtophys(addr));
|
||||
curr->buffer_addr = htole64(paddr);
|
||||
/* buffer has changed, and reload map */
|
||||
netmap_reload_map(adapter->rxtag, rxbuf->map,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(adapter->rxtag, rxbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
|
@ -183,7 +183,8 @@ re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l];
|
||||
int cmd = slot->len | RL_TDESC_CMD_EOF |
|
||||
RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
int len = slot->len;
|
||||
|
||||
if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
|
||||
@ -197,12 +198,11 @@ re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
cmd |= RL_TDESC_CMD_EOR;
|
||||
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
uint64_t paddr = vtophys(addr);
|
||||
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
|
||||
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
|
||||
/* buffer has changed, unload and reload map */
|
||||
netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
|
||||
txd[l].tx_dmamap, addr, na->buff_size);
|
||||
txd[l].tx_dmamap, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
slot->flags &= ~NS_REPORT;
|
||||
@ -304,7 +304,8 @@ re_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = ring->slot + j;
|
||||
struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
|
||||
int cmd = na->buff_size | RL_RDESC_CMD_OWN;
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
|
||||
if (addr == netmap_buffer_base) { /* bad buf */
|
||||
if (do_lock)
|
||||
@ -318,11 +319,10 @@ re_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
desc->rl_cmdstat = htole32(cmd);
|
||||
slot->flags &= ~NS_REPORT;
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
uint64_t paddr = vtophys(addr);
|
||||
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
|
||||
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
|
||||
netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
|
||||
rxd[l].rx_dmamap, addr, na->buff_size);
|
||||
rxd[l].rx_dmamap, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
|
||||
@ -377,12 +377,11 @@ re_netmap_tx_init(struct rl_softc *sc)
|
||||
if (l >= n)
|
||||
l -= n;
|
||||
|
||||
addr = NMB(slot + l);
|
||||
paddr = vtophys(addr);
|
||||
addr = PNMB(slot + l, &paddr);
|
||||
desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
|
||||
desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
|
||||
netmap_load_map(sc->rl_ldata.rl_tx_mtag,
|
||||
txd[i].tx_dmamap, addr, na->buff_size);
|
||||
txd[i].tx_dmamap, addr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,8 +406,12 @@ re_netmap_rx_init(struct rl_softc *sc)
|
||||
if (l >= n)
|
||||
l -= n;
|
||||
|
||||
addr = NMB(slot + l);
|
||||
paddr = vtophys(addr);
|
||||
addr = PNMB(slot + l, &paddr);
|
||||
|
||||
netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
|
||||
sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
|
||||
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
|
||||
sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
|
||||
desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
|
||||
desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
|
||||
cmdstat = na->buff_size;
|
||||
@ -422,9 +425,5 @@ re_netmap_rx_init(struct rl_softc *sc)
|
||||
if (i < n - 1 - kring->nr_hwavail) // XXX + 1 ?
|
||||
cmdstat |= RL_RDESC_CMD_OWN;
|
||||
desc[i].rl_cmdstat = htole32(cmdstat);
|
||||
|
||||
netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
|
||||
sc->rl_ldata.rl_rx_desc[i].rx_dmamap,
|
||||
addr, na->buff_size);
|
||||
}
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
* Note that txbuf and curr are indexed by l.
|
||||
*
|
||||
* In this driver we collect the buffer address
|
||||
* (using the NMB() macro) because we always
|
||||
* (using the PNMB() macro) because we always
|
||||
* need to rewrite it into the NIC ring.
|
||||
* Many other drivers preserve the address, so
|
||||
* we only need to access it if NS_BUF_CHANGED
|
||||
@ -262,7 +262,8 @@ ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
struct ixgbe_tx_buf *txbuf = &txr->tx_buffers[l];
|
||||
union ixgbe_adv_tx_desc *curr = &txr->tx_base[l];
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
// XXX type for flags and len ?
|
||||
int flags = ((slot->flags & NS_REPORT) ||
|
||||
j == 0 || j == report_frequency) ?
|
||||
@ -290,7 +291,7 @@ ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
* address in the NIC ring. Other drivers do not
|
||||
* need this.
|
||||
*/
|
||||
curr->read.buffer_addr = htole64(vtophys(addr));
|
||||
curr->read.buffer_addr = htole64(paddr);
|
||||
curr->read.olinfo_status = 0;
|
||||
curr->read.cmd_type_len =
|
||||
htole32(txr->txd_cmd | len |
|
||||
@ -303,8 +304,7 @@ ixgbe_netmap_txsync(void *a, u_int ring_nr, int do_lock)
|
||||
*/
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
/* buffer has changed, unload and reload map */
|
||||
netmap_reload_map(txr->txtag, txbuf->map,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(txr->txtag, txbuf->map, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
@ -479,16 +479,16 @@ ixgbe_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
|
||||
struct netmap_slot *slot = &ring->slot[j];
|
||||
union ixgbe_adv_rx_desc *curr = &rxr->rx_base[l];
|
||||
struct ixgbe_rx_buf *rxbuf = &rxr->rx_buffers[l];
|
||||
void *addr = NMB(slot);
|
||||
uint64_t paddr;
|
||||
void *addr = PNMB(slot, &paddr);
|
||||
|
||||
if (addr == netmap_buffer_base) /* bad buf */
|
||||
goto ring_reset;
|
||||
|
||||
curr->wb.upper.status_error = 0;
|
||||
curr->read.pkt_addr = htole64(vtophys(addr));
|
||||
curr->read.pkt_addr = htole64(paddr);
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
netmap_reload_map(rxr->ptag, rxbuf->pmap,
|
||||
addr, na->buff_size);
|
||||
netmap_reload_map(rxr->ptag, rxbuf->pmap, addr);
|
||||
slot->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
|
||||
|
@ -104,6 +104,13 @@ MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
|
||||
static void * netmap_malloc(size_t size, const char *msg);
|
||||
static void netmap_free(void *addr, const char *msg);
|
||||
|
||||
#define netmap_if_malloc(len) netmap_malloc(len, "nifp")
|
||||
#define netmap_if_free(v) netmap_free((v), "nifp")
|
||||
|
||||
#define netmap_ring_malloc(len) netmap_malloc(len, "ring")
|
||||
#define netmap_free_rings(na) \
|
||||
netmap_free((na)->tx_rings[0].ring, "shadow rings");
|
||||
|
||||
/*
|
||||
* Allocator for a pool of packet buffers. For each buffer we have
|
||||
* one entry in the bitmap to signal the state. Allocation scans
|
||||
@ -123,7 +130,7 @@ struct netmap_buf_pool {
|
||||
struct netmap_buf_pool nm_buf_pool;
|
||||
/* XXX move these two vars back into netmap_buf_pool */
|
||||
u_int netmap_total_buffers;
|
||||
char *netmap_buffer_base;
|
||||
char *netmap_buffer_base; /* address of an invalid buffer */
|
||||
|
||||
/* user-controlled variables */
|
||||
int netmap_verbose;
|
||||
@ -233,6 +240,12 @@ struct netmap_priv_d {
|
||||
uint16_t np_txpoll;
|
||||
};
|
||||
|
||||
/* Shorthand to compute a netmap interface offset. */
|
||||
#define netmap_if_offset(v) \
|
||||
((char *) (v) - (char *) netmap_mem_d->nm_buffer)
|
||||
/* .. and get a physical address given a memory offset */
|
||||
#define netmap_ofstophys(o) \
|
||||
(vtophys(netmap_mem_d->nm_buffer) + (o))
|
||||
|
||||
static struct cdev *netmap_dev; /* /dev/netmap character device. */
|
||||
static struct netmap_mem_d *netmap_mem_d; /* Our memory allocator. */
|
||||
@ -397,10 +410,10 @@ netmap_dtor(void *data)
|
||||
ring->slot[j].buf_idx);
|
||||
}
|
||||
NMA_UNLOCK();
|
||||
netmap_free(na->tx_rings[0].ring, "shadow rings");
|
||||
netmap_free_rings(na);
|
||||
wakeup(na);
|
||||
}
|
||||
netmap_free(nifp, "nifp");
|
||||
netmap_if_free(nifp);
|
||||
|
||||
na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
|
||||
|
||||
@ -432,7 +445,7 @@ netmap_if_new(const char *ifname, struct netmap_adapter *na)
|
||||
* to the tx and rx rings in the shared memory region.
|
||||
*/
|
||||
len = sizeof(struct netmap_if) + 2 * n * sizeof(ssize_t);
|
||||
nifp = netmap_malloc(len, "nifp");
|
||||
nifp = netmap_if_malloc(len);
|
||||
if (nifp == NULL)
|
||||
return (NULL);
|
||||
|
||||
@ -455,13 +468,13 @@ netmap_if_new(const char *ifname, struct netmap_adapter *na)
|
||||
len = n * (2 * sizeof(struct netmap_ring) +
|
||||
(na->num_tx_desc + na->num_rx_desc) *
|
||||
sizeof(struct netmap_slot) );
|
||||
buff = netmap_malloc(len, "shadow rings");
|
||||
buff = netmap_ring_malloc(len);
|
||||
if (buff == NULL) {
|
||||
D("failed to allocate %d bytes for %s shadow ring",
|
||||
len, ifname);
|
||||
error:
|
||||
(na->refcount)--;
|
||||
netmap_free(nifp, "nifp, rings failed");
|
||||
netmap_if_free(nifp);
|
||||
return (NULL);
|
||||
}
|
||||
/* do we have the bufers ? we are in need of num_tx_desc buffers for
|
||||
@ -907,11 +920,12 @@ netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
|
||||
/*
|
||||
* do something similar to netmap_dtor().
|
||||
*/
|
||||
netmap_free(na->tx_rings[0].ring, "rings, reg.failed");
|
||||
free(na->tx_rings, M_DEVBUF);
|
||||
netmap_free_rings(na);
|
||||
// XXX tx_rings is inline, must not be freed.
|
||||
// free(na->tx_rings, M_DEVBUF); // XXX wrong ?
|
||||
na->tx_rings = na->rx_rings = NULL;
|
||||
na->refcount--;
|
||||
netmap_free(nifp, "nifp, rings failed");
|
||||
netmap_if_free(nifp);
|
||||
nifp = NULL;
|
||||
}
|
||||
}
|
||||
@ -1388,22 +1402,18 @@ ns_dmamap_cb(__unused void *arg, __unused bus_dma_segment_t * segs,
|
||||
* XXX buflen is probably not needed, buffers have constant size.
|
||||
*/
|
||||
void
|
||||
netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map,
|
||||
void *buf, bus_size_t buflen)
|
||||
netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
|
||||
{
|
||||
bus_addr_t paddr;
|
||||
bus_dmamap_unload(tag, map);
|
||||
bus_dmamap_load(tag, map, buf, buflen, ns_dmamap_cb, &paddr,
|
||||
BUS_DMA_NOWAIT);
|
||||
bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, ns_dmamap_cb,
|
||||
NULL, BUS_DMA_NOWAIT);
|
||||
}
|
||||
|
||||
void
|
||||
netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map,
|
||||
void *buf, bus_size_t buflen)
|
||||
netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf)
|
||||
{
|
||||
bus_addr_t paddr;
|
||||
bus_dmamap_load(tag, map, buf, buflen, ns_dmamap_cb, &paddr,
|
||||
BUS_DMA_NOWAIT);
|
||||
bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, ns_dmamap_cb,
|
||||
NULL, BUS_DMA_NOWAIT);
|
||||
}
|
||||
|
||||
/*------ netmap memory allocator -------*/
|
||||
|
@ -169,10 +169,8 @@ int netmap_start(struct ifnet *, struct mbuf *);
|
||||
enum txrx { NR_RX = 0, NR_TX = 1 };
|
||||
struct netmap_slot *netmap_reset(struct netmap_adapter *na,
|
||||
enum txrx tx, int n, u_int new_cur);
|
||||
void netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map,
|
||||
void *buf, bus_size_t buflen);
|
||||
void netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map,
|
||||
void *buf, bus_size_t buflen);
|
||||
void netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf);
|
||||
void netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf);
|
||||
int netmap_ring_reinit(struct netmap_kring *);
|
||||
|
||||
/*
|
||||
@ -206,11 +204,11 @@ enum { /* verbose flags */
|
||||
|
||||
|
||||
/*
|
||||
* return the address of a buffer.
|
||||
* NMB return the virtual address of a buffer (buffer 0 on bad index)
|
||||
* PNMB also fills the physical address
|
||||
* XXX this is a special version with hardwired 2k bufs
|
||||
* On error return netmap_buffer_base which is detected as a bad pointer.
|
||||
*/
|
||||
static inline char *
|
||||
static inline void *
|
||||
NMB(struct netmap_slot *slot)
|
||||
{
|
||||
uint32_t i = slot->buf_idx;
|
||||
@ -222,4 +220,18 @@ NMB(struct netmap_slot *slot)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void *
|
||||
PNMB(struct netmap_slot *slot, uint64_t *pp)
|
||||
{
|
||||
uint32_t i = slot->buf_idx;
|
||||
void *ret = (i >= netmap_total_buffers) ? netmap_buffer_base :
|
||||
#if NETMAP_BUF_SIZE == 2048
|
||||
netmap_buffer_base + (i << 11);
|
||||
#else
|
||||
netmap_buffer_base + (i *NETMAP_BUF_SIZE);
|
||||
#endif
|
||||
*pp = vtophys(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* _NET_NETMAP_KERN_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user