Rework and simplify Tx DMA mapping in ENA

Driver working in LLQ mode in some cases can send only few last segments
of the mbuf using DMA engine, and the rest of them are sent to the
device using direct PCI transaction. To map the only necessary data, two DMA
maps were used. That solution was very rough and was causing a bug - if
both maps were used (head_map and seg_map), there was a race in between
two flows on two queues and the device was receiving corrupted
data which could be further received on the other host if the Tx cksum
offload was enabled.

As it's ok to map whole mbuf and then send to the device only needed
segments, the design was simplified to use only single DMA map.

The driver version was updated to v2.1.1 as it's important bug fix.

Submitted by: Michal Krawczyk <mk@semihalf.com>
Obtained from: Semihalf
MFC after: 2 weeks
Sponsored by: Amazon, Inc.
This commit is contained in:
Marcin Wojtas 2020-02-24 15:35:31 +00:00
parent 20b91f0aa5
commit 888810f0fb
3 changed files with 54 additions and 148 deletions

View File

@ -558,14 +558,9 @@ ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
}
}
#endif /* DEV_NETMAP */
if (tx_info->map_head != NULL) {
bus_dmamap_destroy(tx_tag, tx_info->map_head);
tx_info->map_head = NULL;
}
if (tx_info->map_seg != NULL) {
bus_dmamap_destroy(tx_tag, tx_info->map_seg);
tx_info->map_seg = NULL;
if (tx_info->dmamap != NULL) {
bus_dmamap_destroy(tx_tag, tx_info->dmamap);
tx_info->dmamap = NULL;
}
}
}
@ -627,24 +622,13 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
/* ... and create the buffer DMA maps */
for (i = 0; i < tx_ring->ring_size; i++) {
err = bus_dmamap_create(adapter->tx_buf_tag, 0,
&tx_ring->tx_buffer_info[i].map_head);
&tx_ring->tx_buffer_info[i].dmamap);
if (unlikely(err != 0)) {
ena_trace(ENA_ALERT,
"Unable to create Tx DMA map_head for buffer %d\n",
"Unable to create Tx DMA map for buffer %d\n",
i);
goto err_map_release;
}
tx_ring->tx_buffer_info[i].seg_mapped = false;
err = bus_dmamap_create(adapter->tx_buf_tag, 0,
&tx_ring->tx_buffer_info[i].map_seg);
if (unlikely(err != 0)) {
ena_trace(ENA_ALERT,
"Unable to create Tx DMA map_seg for buffer %d\n",
i);
goto err_map_release;
}
tx_ring->tx_buffer_info[i].head_mapped = false;
#ifdef DEV_NETMAP
if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
@ -720,27 +704,12 @@ ena_free_tx_resources(struct ena_adapter *adapter, int qid)
/* Free buffer DMA maps, */
for (int i = 0; i < tx_ring->ring_size; i++) {
if (tx_ring->tx_buffer_info[i].head_mapped == true) {
bus_dmamap_sync(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].map_head,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].map_head);
tx_ring->tx_buffer_info[i].head_mapped = false;
}
bus_dmamap_sync(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].dmamap);
bus_dmamap_destroy(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].map_head);
if (tx_ring->tx_buffer_info[i].seg_mapped == true) {
bus_dmamap_sync(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].map_seg,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].map_seg);
tx_ring->tx_buffer_info[i].seg_mapped = false;
}
bus_dmamap_destroy(adapter->tx_buf_tag,
tx_ring->tx_buffer_info[i].map_seg);
tx_ring->tx_buffer_info[i].dmamap);
#ifdef DEV_NETMAP
if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
@ -1209,21 +1178,9 @@ ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
qid, i);
}
if (tx_info->head_mapped == true) {
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_info->map_head);
tx_info->head_mapped = false;
}
if (tx_info->seg_mapped == true) {
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_info->map_seg);
tx_info->seg_mapped = false;
}
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
m_free(tx_info->mbuf);
tx_info->mbuf = NULL;

View File

@ -41,7 +41,7 @@
#define DRV_MODULE_VER_MAJOR 2
#define DRV_MODULE_VER_MINOR 1
#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_VER_SUBMINOR 1
#define DRV_MODULE_NAME "ena"
@ -241,13 +241,8 @@ struct ena_tx_buffer {
unsigned int tx_descs;
/* # of buffers used by this mbuf */
unsigned int num_of_bufs;
bus_dmamap_t map_head;
bus_dmamap_t map_seg;
/* Indicate if segments of the mbuf were mapped */
bool seg_mapped;
/* Indicate if bufs[0] maps the linear data of the mbuf */
bool head_mapped;
bus_dmamap_t dmamap;
/* Used to detect missing tx packets */
struct bintime timestamp;

View File

@ -52,7 +52,6 @@ static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *,
static void ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *);
static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
struct mbuf **mbuf);
static void ena_dmamap_llq(void *, bus_dma_segment_t *, int, int);
static int ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
static void ena_start_xmit(struct ena_ring *);
@ -263,21 +262,10 @@ ena_tx_cleanup(struct ena_ring *tx_ring)
tx_info->mbuf = NULL;
bintime_clear(&tx_info->timestamp);
/* Map is no longer required */
if (tx_info->head_mapped == true) {
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_info->map_head);
tx_info->head_mapped = false;
}
if (tx_info->seg_mapped == true) {
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_info->map_seg);
tx_info->seg_mapped = false;
}
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(adapter->tx_buf_tag,
tx_info->dmamap);
ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n",
tx_ring->qid, mbuf);
@ -814,22 +802,6 @@ ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
return (0);
}
static void
ena_dmamap_llq(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
struct ena_com_buf *ena_buf = arg;
if (unlikely(error != 0)) {
ena_buf->paddr = 0;
return;
}
KASSERT(nseg == 1, ("Invalid num of segments for LLQ dma"));
ena_buf->paddr = segs->ds_addr;
ena_buf->len = segs->ds_len;
}
static int
ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
struct mbuf *mbuf, void **push_hdr, u16 *header_len)
@ -837,15 +809,29 @@ ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
struct ena_adapter *adapter = tx_ring->adapter;
struct ena_com_buf *ena_buf;
bus_dma_segment_t segs[ENA_BUS_DMA_SEGS];
size_t iseg = 0;
uint32_t mbuf_head_len, frag_len;
uint16_t push_len = 0;
uint16_t delta = 0;
int i, rc, nsegs;
int rc, nsegs;
mbuf_head_len = mbuf->m_len;
tx_info->mbuf = mbuf;
ena_buf = tx_info->bufs;
/*
* For easier maintaining of the DMA map, map the whole mbuf even if
* the LLQ is used. The descriptors will be filled using the segments.
*/
rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->dmamap, mbuf,
segs, &nsegs, BUS_DMA_NOWAIT);
if (unlikely((rc != 0) || (nsegs == 0))) {
ena_trace(ENA_WARNING,
"dmamap load failed! err: %d nsegs: %d\n", rc, nsegs);
goto dma_error;
}
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
/*
* When the device is LLQ mode, the driver will copy
@ -885,19 +871,16 @@ ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
* in the first mbuf of the mbuf chain.
*/
if (mbuf_head_len > push_len) {
rc = bus_dmamap_load(adapter->tx_buf_tag,
tx_info->map_head,
mbuf->m_data + push_len, mbuf_head_len - push_len,
ena_dmamap_llq, ena_buf, BUS_DMA_NOWAIT);
if (unlikely((rc != 0) || (ena_buf->paddr == 0)))
goto single_dma_error;
ena_buf->paddr = segs[iseg].ds_addr + push_len;
ena_buf->len = segs[iseg].ds_len - push_len;
ena_buf++;
tx_info->num_of_bufs++;
tx_info->head_mapped = true;
}
mbuf = mbuf->m_next;
/*
* Advance the seg index as either the 1st mbuf was mapped or is
* a part of push_hdr.
*/
iseg++;
} else {
*push_hdr = NULL;
/*
@ -918,7 +901,7 @@ ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
* If LLQ is not supported, the loop will be skipped.
*/
while (delta > 0) {
frag_len = mbuf->m_len;
frag_len = segs[iseg].ds_len;
/*
* If whole segment contains header just move to the
@ -931,50 +914,32 @@ ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
* Map rest of the packet data that was contained in
* the mbuf.
*/
rc = bus_dmamap_load(adapter->tx_buf_tag,
tx_info->map_head, mbuf->m_data + delta,
frag_len - delta, ena_dmamap_llq, ena_buf,
BUS_DMA_NOWAIT);
if (unlikely((rc != 0) || (ena_buf->paddr == 0)))
goto single_dma_error;
ena_buf->paddr = segs[iseg].ds_addr + delta;
ena_buf->len = frag_len - delta;
ena_buf++;
tx_info->num_of_bufs++;
tx_info->head_mapped = true;
delta = 0;
}
mbuf = mbuf->m_next;
iseg++;
}
if (mbuf == NULL) {
return (0);
}
/* Map rest of the mbufs */
rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map_seg, mbuf,
segs, &nsegs, BUS_DMA_NOWAIT);
if (unlikely((rc != 0) || (nsegs == 0))) {
ena_trace(ENA_WARNING,
"dmamap load failed! err: %d nsegs: %d\n", rc, nsegs);
goto dma_error;
}
for (i = 0; i < nsegs; i++) {
ena_buf->len = segs[i].ds_len;
ena_buf->paddr = segs[i].ds_addr;
/* Map rest of the mbuf */
while (iseg < nsegs) {
ena_buf->paddr = segs[iseg].ds_addr;
ena_buf->len = segs[iseg].ds_len;
ena_buf++;
iseg++;
tx_info->num_of_bufs++;
}
tx_info->num_of_bufs += nsegs;
tx_info->seg_mapped = true;
return (0);
dma_error:
if (tx_info->head_mapped == true)
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head);
single_dma_error:
counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
tx_info->mbuf = NULL;
return (rc);
@ -1100,25 +1065,14 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
}
}
if (tx_info->head_mapped == true)
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head,
BUS_DMASYNC_PREWRITE);
if (tx_info->seg_mapped == true)
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
BUS_DMASYNC_PREWRITE);
return (0);
dma_error:
tx_info->mbuf = NULL;
if (tx_info->seg_mapped == true) {
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_seg);
tx_info->seg_mapped = false;
}
if (tx_info->head_mapped == true) {
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head);
tx_info->head_mapped = false;
}
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
return (rc);
}