o As illustrated by e. g. figure 7-14 of the Intel 82599 10 GbE
controller datasheet revision 3.3, in the context of Ethernet MACs the control data describing the packet buffers typically are named "descriptors". Each of these descriptors references one buffer, multiple of which a packet can be composed of. By contrast, in comments, messages and the names of structure members, iflib(4) refers to DMA resources employed for RX and TX buffers (rather than control data) as "desc(riptors)". This odd naming convention of iflib(4) made reviewing r343085 and identifying wrong and missing bus_dmamap_sync(9) calls in particular way harder than it already is. This convention may also explain why the netmap(4) part of iflib(4) pairs the DMA tags for control data with DMA maps of buffers and vice versa in calls to bus_dma(9) functions. Therefore, change iflib(4) to refer to buf(fers) when buffers and not the usual understanding of descriptors is meant. This change does not include corrections to the DMA resources used in the netmap(4) parts. However, it revises error messages to state which kind of allocation/creation failed. Specifically, the "Unable to allocate tx_buffer (map) memory" copy & pasted inappropriately on several occasions was replaced with proper messages. o Enhance some other error messages to indicate which half - RX or TX - they apply to instead of using identical text in both cases and generally canonicalize them. o Correct the descriptions of iflib_{r,t}xsd_alloc() to reflect reality; current code doesn't use {r,t}x_buffer structures. o In iflib_queues_alloc(): - Remove redundant BUS_DMA_NOWAIT of iflib_dma_alloc() calls, - change the M_WAITOK from malloc(9) calls into M_NOWAIT. The return values are already checked, deferred DMA allocations not being an option at this point, BUS_DMA_NOWAIT has to be used anyway and prior malloc(9) calls in this function also specify M_NOWAIT. Reviewed by: shurd Differential Revision: https://reviews.freebsd.org/D19067
This commit is contained in:
parent
8b153f7788
commit
ad1f2c5455
216
sys/net/iflib.c
216
sys/net/iflib.c
@ -353,8 +353,8 @@ struct iflib_txq {
|
||||
uint8_t ift_closed;
|
||||
uint8_t ift_update_freq;
|
||||
struct iflib_filter_info ift_filter_info;
|
||||
bus_dma_tag_t ift_desc_tag;
|
||||
bus_dma_tag_t ift_tso_desc_tag;
|
||||
bus_dma_tag_t ift_buf_tag;
|
||||
bus_dma_tag_t ift_tso_buf_tag;
|
||||
iflib_dma_info_t ift_ifdi;
|
||||
#define MTX_NAME_LEN 16
|
||||
char ift_mtx_name[MTX_NAME_LEN];
|
||||
@ -389,7 +389,7 @@ struct iflib_fl {
|
||||
iflib_rxsd_array_t ifl_sds;
|
||||
iflib_rxq_t ifl_rxq;
|
||||
uint8_t ifl_id;
|
||||
bus_dma_tag_t ifl_desc_tag;
|
||||
bus_dma_tag_t ifl_buf_tag;
|
||||
iflib_dma_info_t ifl_ifdi;
|
||||
uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE);
|
||||
caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH];
|
||||
@ -922,10 +922,9 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
|
||||
if_ctx_t ctx = ifp->if_softc;
|
||||
iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
|
||||
|
||||
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
|
||||
bus_dmamap_sync(txq->ift_buf_tag, txq->ift_ifdi->idi_map,
|
||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
||||
|
||||
|
||||
/*
|
||||
* First part: process new packets to send.
|
||||
* nm_i is the current index in the netmap kring,
|
||||
@ -992,7 +991,8 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
|
||||
|
||||
if (slot->flags & NS_BUF_CHANGED) {
|
||||
/* buffer has changed, reload map */
|
||||
netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
|
||||
netmap_reload_map(na, txq->ift_buf_tag,
|
||||
txq->ift_sds.ifsd_map[nic_i], addr);
|
||||
}
|
||||
/* make sure changes to the buffer are synced */
|
||||
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
|
||||
@ -1005,7 +1005,7 @@ iflib_netmap_txsync(struct netmap_kring *kring, int flags)
|
||||
kring->nr_hwcur = nm_i;
|
||||
|
||||
/* synchronize the NIC ring */
|
||||
bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
|
||||
bus_dmamap_sync(txq->ift_buf_tag, txq->ift_ifdi->idi_map,
|
||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
||||
|
||||
/* (re)start the tx unit up to slot nic_i (excluded) */
|
||||
@ -1072,8 +1072,9 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
|
||||
for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) {
|
||||
if (fl->ifl_sds.ifsd_map == NULL)
|
||||
continue;
|
||||
bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map,
|
||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_sync(rxq->ifr_fl[i].ifl_buf_tag,
|
||||
fl->ifl_ifdi->idi_map,
|
||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
||||
}
|
||||
/*
|
||||
* First part: import newly received packets.
|
||||
@ -1199,7 +1200,8 @@ iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
|
||||
* netmap slot index, si
|
||||
*/
|
||||
int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
|
||||
netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
|
||||
netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i],
|
||||
NMB(na, slot + si));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1576,12 +1578,13 @@ _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid,
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
* Allocate memory for tx_buffer structures. The tx_buffer stores all
|
||||
* the information needed to transmit a packet on the wire. This is
|
||||
* called only once at attach, setup is done every reset.
|
||||
* Allocate DMA resources for TX buffers as well as memory for the TX
|
||||
* mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a
|
||||
* iflib_sw_tx_desc_array structure, storing all the information that
|
||||
* is needed to transmit a packet on the wire. This is called only
|
||||
* once at attach, setup is done every reset.
|
||||
*
|
||||
**********************************************************************/
|
||||
|
||||
static int
|
||||
iflib_txsd_alloc(iflib_txq_t txq)
|
||||
{
|
||||
@ -1607,7 +1610,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup DMA descriptor areas.
|
||||
* Set up DMA tags for TX buffers.
|
||||
*/
|
||||
if ((err = bus_dma_tag_create(bus_get_dma_tag(dev),
|
||||
1, 0, /* alignment, bounds */
|
||||
@ -1620,7 +1623,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
|
||||
0, /* flags */
|
||||
NULL, /* lockfunc */
|
||||
NULL, /* lockfuncarg */
|
||||
&txq->ift_desc_tag))) {
|
||||
&txq->ift_buf_tag))) {
|
||||
device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err);
|
||||
device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n",
|
||||
(uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize);
|
||||
@ -1638,38 +1641,42 @@ iflib_txsd_alloc(iflib_txq_t txq)
|
||||
0, /* flags */
|
||||
NULL, /* lockfunc */
|
||||
NULL, /* lockfuncarg */
|
||||
&txq->ift_tso_desc_tag))) {
|
||||
device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err);
|
||||
&txq->ift_tso_buf_tag))) {
|
||||
device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n",
|
||||
err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Allocate memory for the TX mbuf map. */
|
||||
if (!(txq->ift_sds.ifsd_m =
|
||||
(struct mbuf **) malloc(sizeof(struct mbuf *) *
|
||||
scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
||||
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
||||
device_printf(dev, "Unable to allocate TX mbuf map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Create the descriptor buffer dma maps */
|
||||
/*
|
||||
* Create the DMA maps for TX buffers.
|
||||
*/
|
||||
if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc(
|
||||
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
|
||||
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
||||
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
|
||||
device_printf(dev,
|
||||
"Unable to allocate TX buffer DMA map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc(
|
||||
sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset],
|
||||
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
||||
device_printf(dev, "Unable to allocate TSO tx_buffer "
|
||||
"map memory\n");
|
||||
device_printf(dev,
|
||||
"Unable to allocate TSO TX buffer map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
|
||||
err = bus_dmamap_create(txq->ift_desc_tag, 0,
|
||||
err = bus_dmamap_create(txq->ift_buf_tag, 0,
|
||||
&txq->ift_sds.ifsd_map[i]);
|
||||
if (err != 0) {
|
||||
device_printf(dev, "Unable to create TX DMA map\n");
|
||||
@ -1677,7 +1684,7 @@ iflib_txsd_alloc(iflib_txq_t txq)
|
||||
}
|
||||
if (!tso)
|
||||
continue;
|
||||
err = bus_dmamap_create(txq->ift_tso_desc_tag, 0,
|
||||
err = bus_dmamap_create(txq->ift_tso_buf_tag, 0,
|
||||
&txq->ift_sds.ifsd_tso_map[i]);
|
||||
if (err != 0) {
|
||||
device_printf(dev, "Unable to create TSO TX DMA map\n");
|
||||
@ -1700,9 +1707,9 @@ iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
|
||||
if (txq->ift_sds.ifsd_map != NULL)
|
||||
map = txq->ift_sds.ifsd_map[i];
|
||||
if (map != NULL) {
|
||||
bus_dmamap_sync(txq->ift_desc_tag, map, BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_desc_tag, map);
|
||||
bus_dmamap_destroy(txq->ift_desc_tag, map);
|
||||
bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_buf_tag, map);
|
||||
bus_dmamap_destroy(txq->ift_buf_tag, map);
|
||||
txq->ift_sds.ifsd_map[i] = NULL;
|
||||
}
|
||||
|
||||
@ -1710,10 +1717,10 @@ iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
|
||||
if (txq->ift_sds.ifsd_tso_map != NULL)
|
||||
map = txq->ift_sds.ifsd_tso_map[i];
|
||||
if (map != NULL) {
|
||||
bus_dmamap_sync(txq->ift_tso_desc_tag, map,
|
||||
bus_dmamap_sync(txq->ift_tso_buf_tag, map,
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_tso_desc_tag, map);
|
||||
bus_dmamap_destroy(txq->ift_tso_desc_tag, map);
|
||||
bus_dmamap_unload(txq->ift_tso_buf_tag, map);
|
||||
bus_dmamap_destroy(txq->ift_tso_buf_tag, map);
|
||||
txq->ift_sds.ifsd_tso_map[i] = NULL;
|
||||
}
|
||||
}
|
||||
@ -1737,13 +1744,13 @@ iflib_txq_destroy(iflib_txq_t txq)
|
||||
free(txq->ift_sds.ifsd_m, M_IFLIB);
|
||||
txq->ift_sds.ifsd_m = NULL;
|
||||
}
|
||||
if (txq->ift_desc_tag != NULL) {
|
||||
bus_dma_tag_destroy(txq->ift_desc_tag);
|
||||
txq->ift_desc_tag = NULL;
|
||||
if (txq->ift_buf_tag != NULL) {
|
||||
bus_dma_tag_destroy(txq->ift_buf_tag);
|
||||
txq->ift_buf_tag = NULL;
|
||||
}
|
||||
if (txq->ift_tso_desc_tag != NULL) {
|
||||
bus_dma_tag_destroy(txq->ift_tso_desc_tag);
|
||||
txq->ift_tso_desc_tag = NULL;
|
||||
if (txq->ift_tso_buf_tag != NULL) {
|
||||
bus_dma_tag_destroy(txq->ift_tso_buf_tag);
|
||||
txq->ift_tso_buf_tag = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1757,14 +1764,14 @@ iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
|
||||
return;
|
||||
|
||||
if (txq->ift_sds.ifsd_map != NULL) {
|
||||
bus_dmamap_sync(txq->ift_desc_tag,
|
||||
bus_dmamap_sync(txq->ift_buf_tag,
|
||||
txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[i]);
|
||||
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]);
|
||||
}
|
||||
if (txq->ift_sds.ifsd_tso_map != NULL) {
|
||||
bus_dmamap_sync(txq->ift_tso_desc_tag,
|
||||
bus_dmamap_sync(txq->ift_tso_buf_tag,
|
||||
txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_tso_desc_tag,
|
||||
bus_dmamap_unload(txq->ift_tso_buf_tag,
|
||||
txq->ift_sds.ifsd_tso_map[i]);
|
||||
}
|
||||
m_free(*mp);
|
||||
@ -1803,10 +1810,13 @@ iflib_txq_setup(iflib_txq_t txq)
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
* Allocate memory for rx_buffer structures. Since we use one
|
||||
* rx_buffer per received packet, the maximum number of rx_buffer's
|
||||
* that we'll need is equal to the number of receive descriptors
|
||||
* that we've allocated.
|
||||
* Allocate DMA resources for RX buffers as well as memory for the RX
|
||||
* mbuf map, direct RX cluster pointer map and RX cluster bus address
|
||||
* map. RX DMA map, RX mbuf map, direct RX cluster pointer map and
|
||||
* RX cluster map are kept in a iflib_sw_rx_desc_array structure.
|
||||
* Since we use use one entry in iflib_sw_rx_desc_array per received
|
||||
* packet, the maximum number of entries we'll need is equal to the
|
||||
* number of hardware receive descriptors that we've allocated.
|
||||
*
|
||||
**********************************************************************/
|
||||
static int
|
||||
@ -1825,6 +1835,7 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
|
||||
fl = rxq->ifr_fl;
|
||||
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
|
||||
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
|
||||
/* Set up DMA tag for RX buffers. */
|
||||
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
|
||||
1, 0, /* alignment, bounds */
|
||||
BUS_SPACE_MAXADDR, /* lowaddr */
|
||||
@ -1836,45 +1847,56 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
|
||||
0, /* flags */
|
||||
NULL, /* lockfunc */
|
||||
NULL, /* lockarg */
|
||||
&fl->ifl_desc_tag);
|
||||
&fl->ifl_buf_tag);
|
||||
if (err) {
|
||||
device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
|
||||
__func__, err);
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX DMA tag: %d\n", err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Allocate memory for the RX mbuf map. */
|
||||
if (!(fl->ifl_sds.ifsd_m =
|
||||
(struct mbuf **) malloc(sizeof(struct mbuf *) *
|
||||
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
||||
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX mbuf map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Allocate memory for the direct RX cluster pointer map. */
|
||||
if (!(fl->ifl_sds.ifsd_cl =
|
||||
(caddr_t *) malloc(sizeof(caddr_t) *
|
||||
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
||||
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX cluster map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Allocate memory for the RX cluster bus address map. */
|
||||
if (!(fl->ifl_sds.ifsd_ba =
|
||||
(bus_addr_t *) malloc(sizeof(bus_addr_t) *
|
||||
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
||||
device_printf(dev, "Unable to allocate rx bus addr memory\n");
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX bus address map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Create the descriptor buffer dma maps */
|
||||
/*
|
||||
* Create the DMA maps for RX buffers.
|
||||
*/
|
||||
if (!(fl->ifl_sds.ifsd_map =
|
||||
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
|
||||
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX buffer DMA map memory\n");
|
||||
err = ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
|
||||
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
|
||||
err = bus_dmamap_create(fl->ifl_buf_tag, 0,
|
||||
&fl->ifl_sds.ifsd_map[i]);
|
||||
if (err != 0) {
|
||||
device_printf(dev, "Unable to create RX buffer DMA map\n");
|
||||
goto fail;
|
||||
@ -1974,7 +1996,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
|
||||
|
||||
cb_arg.error = 0;
|
||||
MPASS(sd_map != NULL);
|
||||
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx],
|
||||
err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx],
|
||||
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg,
|
||||
BUS_DMA_NOWAIT);
|
||||
if (err != 0 || cb_arg.error) {
|
||||
@ -1986,7 +2008,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
|
||||
break;
|
||||
}
|
||||
|
||||
bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx],
|
||||
bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx],
|
||||
BUS_DMASYNC_PREREAD);
|
||||
sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr;
|
||||
sd_cl[frag_idx] = cl;
|
||||
@ -2087,14 +2109,14 @@ iflib_fl_bufs_free(iflib_fl_t fl)
|
||||
|
||||
if (*sd_cl != NULL) {
|
||||
sd_map = fl->ifl_sds.ifsd_map[i];
|
||||
bus_dmamap_sync(fl->ifl_desc_tag, sd_map,
|
||||
bus_dmamap_sync(fl->ifl_buf_tag, sd_map,
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
|
||||
bus_dmamap_unload(fl->ifl_buf_tag, sd_map);
|
||||
if (*sd_cl != NULL)
|
||||
uma_zfree(fl->ifl_zone, *sd_cl);
|
||||
// XXX: Should this get moved out?
|
||||
if (iflib_in_detach(fl->ifl_rxq->ifr_ctx))
|
||||
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
|
||||
bus_dmamap_destroy(fl->ifl_buf_tag, sd_map);
|
||||
if (*sd_m != NULL) {
|
||||
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
|
||||
uma_zfree(zone_mbuf, *sd_m);
|
||||
@ -2196,23 +2218,23 @@ iflib_rx_sds_free(iflib_rxq_t rxq)
|
||||
if (rxq->ifr_fl != NULL) {
|
||||
for (i = 0; i < rxq->ifr_nfl; i++) {
|
||||
fl = &rxq->ifr_fl[i];
|
||||
if (fl->ifl_desc_tag != NULL) {
|
||||
if (fl->ifl_buf_tag != NULL) {
|
||||
if (fl->ifl_sds.ifsd_map != NULL) {
|
||||
for (j = 0; j < fl->ifl_size; j++) {
|
||||
if (fl->ifl_sds.ifsd_map[j] ==
|
||||
NULL)
|
||||
continue;
|
||||
bus_dmamap_sync(
|
||||
fl->ifl_desc_tag,
|
||||
fl->ifl_buf_tag,
|
||||
fl->ifl_sds.ifsd_map[j],
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
bus_dmamap_unload(
|
||||
fl->ifl_desc_tag,
|
||||
fl->ifl_buf_tag,
|
||||
fl->ifl_sds.ifsd_map[j]);
|
||||
}
|
||||
}
|
||||
bus_dma_tag_destroy(fl->ifl_desc_tag);
|
||||
fl->ifl_desc_tag = NULL;
|
||||
bus_dma_tag_destroy(fl->ifl_buf_tag);
|
||||
fl->ifl_buf_tag = NULL;
|
||||
}
|
||||
free(fl->ifl_sds.ifsd_m, M_IFLIB);
|
||||
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
|
||||
@ -2497,9 +2519,9 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd)
|
||||
|
||||
/* not valid assert if bxe really does SGE from non-contiguous elements */
|
||||
MPASS(fl->ifl_cidx == cidx);
|
||||
bus_dmamap_sync(fl->ifl_desc_tag, map, BUS_DMASYNC_POSTREAD);
|
||||
bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD);
|
||||
if (unload)
|
||||
bus_dmamap_unload(fl->ifl_desc_tag, map);
|
||||
bus_dmamap_unload(fl->ifl_buf_tag, map);
|
||||
fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1);
|
||||
if (__predict_false(fl->ifl_cidx == 0))
|
||||
fl->ifl_gen = 0;
|
||||
@ -2582,7 +2604,7 @@ iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
|
||||
m->m_data += 2;
|
||||
#endif
|
||||
memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len);
|
||||
bus_dmamap_sync(rxq->ifr_fl->ifl_desc_tag,
|
||||
bus_dmamap_sync(rxq->ifr_fl->ifl_buf_tag,
|
||||
rxq->ifr_fl->ifl_sds.ifsd_map[ri->iri_frags[0].irf_idx],
|
||||
BUS_DMASYNC_PREREAD);
|
||||
m->m_len = ri->iri_frags[0].irf_len;
|
||||
@ -3083,9 +3105,9 @@ iflib_remove_mbuf(iflib_txq_t txq)
|
||||
ifsd_m = txq->ift_sds.ifsd_m;
|
||||
m = ifsd_m[pidx];
|
||||
ifsd_m[pidx] = NULL;
|
||||
bus_dmamap_unload(txq->ift_desc_tag, txq->ift_sds.ifsd_map[pidx]);
|
||||
bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]);
|
||||
if (txq->ift_sds.ifsd_tso_map != NULL)
|
||||
bus_dmamap_unload(txq->ift_tso_desc_tag,
|
||||
bus_dmamap_unload(txq->ift_tso_buf_tag,
|
||||
txq->ift_sds.ifsd_tso_map[pidx]);
|
||||
#if MEMORY_LOGGING
|
||||
txq->ift_dequeued++;
|
||||
@ -3162,6 +3184,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
||||
if_ctx_t ctx;
|
||||
if_shared_ctx_t sctx;
|
||||
if_softc_ctx_t scctx;
|
||||
bus_dma_tag_t buf_tag;
|
||||
bus_dma_segment_t *segs;
|
||||
struct mbuf *m_head, **ifsd_m;
|
||||
void *next_txd;
|
||||
@ -3169,7 +3192,6 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
||||
struct if_pkt_info pi;
|
||||
int remap = 0;
|
||||
int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
|
||||
bus_dma_tag_t desc_tag;
|
||||
|
||||
ctx = txq->ift_ctx;
|
||||
sctx = ctx->ifc_sctx;
|
||||
@ -3200,13 +3222,13 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
||||
ifsd_m = txq->ift_sds.ifsd_m;
|
||||
|
||||
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
|
||||
desc_tag = txq->ift_tso_desc_tag;
|
||||
buf_tag = txq->ift_tso_buf_tag;
|
||||
max_segs = scctx->isc_tx_tso_segments_max;
|
||||
map = txq->ift_sds.ifsd_tso_map[pidx];
|
||||
MPASS(desc_tag != NULL);
|
||||
MPASS(buf_tag != NULL);
|
||||
MPASS(max_segs > 0);
|
||||
} else {
|
||||
desc_tag = txq->ift_desc_tag;
|
||||
buf_tag = txq->ift_buf_tag;
|
||||
max_segs = scctx->isc_tx_nsegments;
|
||||
map = txq->ift_sds.ifsd_map[pidx];
|
||||
}
|
||||
@ -3238,7 +3260,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
||||
}
|
||||
|
||||
retry:
|
||||
err = bus_dmamap_load_mbuf_sg(desc_tag, map, m_head, segs, &nsegs,
|
||||
err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs,
|
||||
BUS_DMA_NOWAIT);
|
||||
defrag:
|
||||
if (__predict_false(err)) {
|
||||
@ -3284,7 +3306,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
||||
*/
|
||||
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
|
||||
txq->ift_no_desc_avail++;
|
||||
bus_dmamap_unload(desc_tag, map);
|
||||
bus_dmamap_unload(buf_tag, map);
|
||||
DBG_COUNTER_INC(encap_txq_avail_fail);
|
||||
DBG_COUNTER_INC(encap_txd_encap_fail);
|
||||
if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
|
||||
@ -3311,7 +3333,7 @@ iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
|
||||
#ifdef PKT_DEBUG
|
||||
print_pkt(&pi);
|
||||
#endif
|
||||
bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE);
|
||||
bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE);
|
||||
if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) {
|
||||
bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
|
||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
||||
@ -3387,16 +3409,16 @@ iflib_tx_desc_free(iflib_txq_t txq, int n)
|
||||
if ((m = ifsd_m[cidx]) != NULL) {
|
||||
prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
|
||||
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
|
||||
bus_dmamap_sync(txq->ift_tso_desc_tag,
|
||||
bus_dmamap_sync(txq->ift_tso_buf_tag,
|
||||
txq->ift_sds.ifsd_tso_map[cidx],
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_tso_desc_tag,
|
||||
bus_dmamap_unload(txq->ift_tso_buf_tag,
|
||||
txq->ift_sds.ifsd_tso_map[cidx]);
|
||||
} else {
|
||||
bus_dmamap_sync(txq->ift_desc_tag,
|
||||
bus_dmamap_sync(txq->ift_buf_tag,
|
||||
txq->ift_sds.ifsd_map[cidx],
|
||||
BUS_DMASYNC_POSTWRITE);
|
||||
bus_dmamap_unload(txq->ift_desc_tag,
|
||||
bus_dmamap_unload(txq->ift_buf_tag,
|
||||
txq->ift_sds.ifsd_map[cidx]);
|
||||
}
|
||||
/* XXX we don't support any drivers that batch packets yet */
|
||||
@ -5203,15 +5225,18 @@ iflib_queues_alloc(if_ctx_t ctx)
|
||||
for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
|
||||
/* Set up some basics */
|
||||
|
||||
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
|
||||
device_printf(dev, "failed to allocate iflib_dma_info\n");
|
||||
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs,
|
||||
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate TX DMA info memory\n");
|
||||
err = ENOMEM;
|
||||
goto err_tx_desc;
|
||||
}
|
||||
txq->ift_ifdi = ifdip;
|
||||
for (j = 0; j < ntxqs; j++, ifdip++) {
|
||||
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
|
||||
device_printf(dev, "Unable to allocate Descriptor memory\n");
|
||||
if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate TX descriptors\n");
|
||||
err = ENOMEM;
|
||||
goto err_tx_desc;
|
||||
}
|
||||
@ -5255,8 +5280,10 @@ iflib_queues_alloc(if_ctx_t ctx)
|
||||
for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) {
|
||||
/* Set up some basics */
|
||||
|
||||
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) {
|
||||
device_printf(dev, "failed to allocate iflib_dma_info\n");
|
||||
if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs,
|
||||
M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX DMA info memory\n");
|
||||
err = ENOMEM;
|
||||
goto err_tx_desc;
|
||||
}
|
||||
@ -5266,8 +5293,9 @@ iflib_queues_alloc(if_ctx_t ctx)
|
||||
rxq->ifr_ntxqirq = 1;
|
||||
rxq->ifr_txqid[0] = i;
|
||||
for (j = 0; j < nrxqs; j++, ifdip++) {
|
||||
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) {
|
||||
device_printf(dev, "Unable to allocate Descriptor memory\n");
|
||||
if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) {
|
||||
device_printf(dev,
|
||||
"Unable to allocate RX descriptors\n");
|
||||
err = ENOMEM;
|
||||
goto err_tx_desc;
|
||||
}
|
||||
@ -5319,7 +5347,8 @@ iflib_queues_alloc(if_ctx_t ctx)
|
||||
}
|
||||
}
|
||||
if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) {
|
||||
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
|
||||
device_printf(ctx->ifc_dev,
|
||||
"Unable to allocate device TX queue\n");
|
||||
iflib_tx_structures_free(ctx);
|
||||
free(vaddrs, M_IFLIB);
|
||||
free(paddrs, M_IFLIB);
|
||||
@ -5340,7 +5369,8 @@ iflib_queues_alloc(if_ctx_t ctx)
|
||||
}
|
||||
}
|
||||
if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) {
|
||||
device_printf(ctx->ifc_dev, "device queue allocation failed\n");
|
||||
device_printf(ctx->ifc_dev,
|
||||
"Unable to allocate device RX queue\n");
|
||||
iflib_tx_structures_free(ctx);
|
||||
free(vaddrs, M_IFLIB);
|
||||
free(paddrs, M_IFLIB);
|
||||
|
Loading…
Reference in New Issue
Block a user