Cover ENA driver code with branch predictioning statements

The driver was using it in only few places, so the rest of the code
was covered with those statement.

Submitted by: Michal Krawczyk <mk@semihalf.com>
Reviewed by: rlibby
Obtained from: Semihalf
Sponsored by: Amazon, Inc.
Differential Revision: https://reviews.freebsd.org/D12861
This commit is contained in:
mw 2017-11-09 11:59:21 +00:00
parent 570375cfe3
commit 39206a80d6

View File

@ -227,7 +227,7 @@ ena_dma_alloc(device_t dmadev, bus_size_t size,
maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
if (dma_space_addr == 0)
if (unlikely(dma_space_addr == 0))
dma_space_addr = BUS_SPACE_MAXADDR;
error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
@ -242,7 +242,7 @@ ena_dma_alloc(device_t dmadev, bus_size_t size,
NULL, /* lockfunc */
NULL, /* lockarg */
&dma->tag);
if (error != 0) {
if (unlikely(error != 0)) {
device_printf(dmadev, "%s: bus_dma_tag_create failed: %d\n",
__func__, error);
goto fail_tag;
@ -250,7 +250,7 @@ ena_dma_alloc(device_t dmadev, bus_size_t size,
error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
if (error != 0) {
if (unlikely(error != 0)) {
device_printf(dmadev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
__func__, (uintmax_t)size, error);
goto fail_map_create;
@ -259,7 +259,7 @@ ena_dma_alloc(device_t dmadev, bus_size_t size,
dma->paddr = 0;
error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
size, ena_dmamap_callback, &dma->paddr, mapflags);
if ((error != 0) || (dma->paddr == 0)) {
if (unlikely((error != 0) || (dma->paddr == 0))) {
device_printf(dmadev, "%s: bus_dmamap_load failed: %d\n",
__func__, error);
goto fail_map_load;
@ -287,7 +287,7 @@ ena_allocate_pci_resources(struct ena_adapter* adapter)
adapter->memory = NULL;
adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
if (adapter->registers == NULL) {
if (unlikely(adapter->registers == NULL)) {
device_printf(pdev, "Unable to allocate bus resource: "
"registers\n");
return (ENXIO);
@ -350,7 +350,7 @@ ena_change_mtu(if_t ifp, int new_mtu)
int rc, old_mtu, max_frame;
rc = ena_com_get_dev_attr_feat(adapter->ena_dev, &get_feat_ctx);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"Cannot get attribute for ena device\n");
return (ENXIO);
@ -363,9 +363,9 @@ ena_change_mtu(if_t ifp, int new_mtu)
if_setmtu(ifp, new_mtu);
max_frame = ETHER_MAX_FRAME(ifp, ETHERTYPE_VLAN, 1);
if ((new_mtu < ENA_MIN_FRAME_LEN) ||
if (unlikely((new_mtu < ENA_MIN_FRAME_LEN) ||
(new_mtu > get_feat_ctx.dev_attr.max_mtu) ||
(max_frame > ENA_MAX_FRAME_LEN)) {
(max_frame > ENA_MAX_FRAME_LEN))) {
device_printf(adapter->pdev, "Invalid MTU setting. "
"new_mtu: %d\n", new_mtu);
goto error;
@ -531,7 +531,7 @@ ena_setup_tx_dma_tag(struct ena_adapter *adapter)
NULL, /* lockfuncarg */
&adapter->tx_buf_tag);
if (ret != 0)
if (unlikely(ret != 0))
device_printf(adapter->pdev, "Unable to create Tx DMA tag\n");
return (ret);
@ -544,7 +544,7 @@ ena_free_tx_dma_tag(struct ena_adapter *adapter)
ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
if (ret == 0)
if (likely(ret == 0))
adapter->tx_buf_tag = NULL;
return (ret);
@ -569,7 +569,7 @@ ena_setup_rx_dma_tag(struct ena_adapter *adapter)
NULL, /* lockarg */
&adapter->rx_buf_tag);
if (ret != 0)
if (unlikely(ret != 0))
device_printf(adapter->pdev, "Unable to create Rx DMA tag\n");
return (ret);
@ -582,7 +582,7 @@ ena_free_rx_dma_tag(struct ena_adapter *adapter)
ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
if (ret == 0)
if (likely(ret == 0))
adapter->rx_buf_tag = NULL;
return (ret);
@ -608,12 +608,12 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (tx_ring->tx_buffer_info == NULL)
if (unlikely(tx_ring->tx_buffer_info == NULL))
return (ENOMEM);
size = sizeof(uint16_t) * tx_ring->ring_size;
tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (tx_ring->free_tx_ids == NULL)
if (unlikely(tx_ring->free_tx_ids == NULL))
goto err_buf_info_free;
/* Req id stack for TX OOO completions */
@ -636,7 +636,7 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
for (i = 0; i < tx_ring->ring_size; i++) {
err = bus_dmamap_create(adapter->tx_buf_tag, 0,
&tx_ring->tx_buffer_info[i].map);
if (err != 0) {
if (unlikely(err != 0)) {
device_printf(adapter->pdev,
"Unable to create Tx DMA map for buffer %d\n", i);
goto err_buf_info_unmap;
@ -647,7 +647,7 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
if (tx_ring->enqueue_tq == NULL) {
if (unlikely(tx_ring->enqueue_tq == NULL)) {
device_printf(adapter->pdev,
"Unable to create taskqueue for enqueue task\n");
i = tx_ring->ring_size;
@ -971,13 +971,13 @@ ena_alloc_rx_mbuf(struct ena_adapter *adapter,
int nsegs, error;
/* if previous allocated frag is not used */
if (rx_info->mbuf != NULL)
if (unlikely(rx_info->mbuf != NULL))
return (0);
/* Get mbuf using UMA allocator */
rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES);
if (rx_info->mbuf == NULL) {
if (unlikely(rx_info->mbuf == NULL)) {
counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
return (ENOMEM);
}
@ -990,7 +990,7 @@ ena_alloc_rx_mbuf(struct ena_adapter *adapter,
adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
if ((error != 0) || (nsegs != 1)) {
if (unlikely((error != 0) || (nsegs != 1))) {
device_printf(adapter->pdev, "failed to map mbuf, error: %d, "
"nsegs: %d\n", error, nsegs);
counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
@ -1062,7 +1062,7 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
rx_info = &rx_ring->rx_buffer_info[req_id];
rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"failed to alloc buffer for rx queue\n");
break;
@ -1079,14 +1079,14 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
rx_ring->ring_size);
}
if (i < num) {
if (unlikely(i < num)) {
counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
device_printf(adapter->pdev,
"refilled rx queue %d with %d pages only\n",
rx_ring->qid, i);
}
if (i != 0) {
if (likely(i != 0)) {
wmb();
ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
}
@ -1270,7 +1270,7 @@ ena_create_io_queues(struct ena_adapter *adapter)
ctx.msix_vector = msix_vector;
ctx.qid = ena_qid;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"Failed to create io RX queue[%d] rc: %d\n", i, rc);
goto err_rx;
@ -1280,7 +1280,7 @@ ena_create_io_queues(struct ena_adapter *adapter)
rc = ena_com_get_io_handlers(ena_dev, ena_qid,
&ring->ena_com_io_sq,
&ring->ena_com_io_cq);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"Failed to get RX queue handlers. RX queue num"
" %d rc: %d\n", i, rc);
@ -1337,11 +1337,11 @@ ena_tx_cleanup(struct ena_ring *tx_ring)
struct mbuf *mbuf;
rc = ena_com_tx_comp_req_id_get(io_cq, &req_id);
if (rc != 0)
if (unlikely(rc != 0))
break;
rc = validate_tx_req_id(tx_ring, req_id);
if (rc != 0)
if (unlikely(rc != 0))
break;
tx_info = &tx_ring->tx_buffer_info[req_id];
@ -1351,7 +1351,7 @@ ena_tx_cleanup(struct ena_ring *tx_ring)
tx_info->mbuf = NULL;
bintime_clear(&tx_info->timestamp);
if (tx_info->num_of_bufs != 0) {
if (likely(tx_info->num_of_bufs != 0)) {
/* Map is no longer required */
bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
}
@ -1364,7 +1364,7 @@ ena_tx_cleanup(struct ena_ring *tx_ring)
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
tx_ring->ring_size);
if (--commit == 0) {
if (unlikely(--commit == 0)) {
commit = TX_COMMIT;
/* update ring state every TX_COMMIT descriptor */
tx_ring->next_to_clean = next_to_clean;
@ -1374,12 +1374,12 @@ ena_tx_cleanup(struct ena_ring *tx_ring)
ena_com_update_dev_comp_head(io_cq);
total_done = 0;
}
} while (--budget);
} while (likely(--budget));
work_done = TX_BUDGET - budget;
/* If there is still something to commit update ring state */
if (commit != TX_COMMIT) {
if (likely(commit != TX_COMMIT)) {
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid],
total_done);
@ -1397,7 +1397,7 @@ ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
{
struct ena_adapter *adapter = rx_ring->adapter;
if (adapter->rss_support) {
if (likely(adapter->rss_support)) {
mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
if (ena_rx_ctx->frag &&
@ -1522,7 +1522,7 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
return (NULL);
}
if (m_append(mbuf, len, rx_info->mbuf->m_data) == 0) {
if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p",
mbuf);
@ -1550,8 +1550,8 @@ ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
{
/* if IP and error */
if ((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
ena_rx_ctx->l3_csum_err) {
if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
ena_rx_ctx->l3_csum_err)) {
/* ipv4 checksum error */
mbuf->m_pkthdr.csum_flags = 0;
counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
@ -1583,7 +1583,7 @@ ena_deferred_rx_cleanup(void *arg, int pending)
* If deferred task was executed, perform cleanup of all awaiting
* descs (or until given budget is depleted to avoid infinite loop).
*/
while (budget--) {
while (likely(budget--)) {
if (ena_rx_cleanup(rx_ring) == 0)
break;
}
@ -1748,7 +1748,7 @@ ena_handle_msix(void *arg)
int qid, ena_qid;
int txc, rxc, i;
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
return;
ena_trace(ENA_DBG, "MSI-X TX/RX routine");
@ -1765,7 +1765,7 @@ ena_handle_msix(void *arg)
* being executed and rx ring is being cleaned up in
* another thread.
*/
if (ENA_RING_MTX_TRYLOCK(rx_ring) != 0) {
if (likely(ENA_RING_MTX_TRYLOCK(rx_ring) != 0)) {
rxc = ena_rx_cleanup(rx_ring);
ENA_RING_MTX_UNLOCK(rx_ring);
} else {
@ -1777,7 +1777,7 @@ ena_handle_msix(void *arg)
txc = ena_tx_cleanup(tx_ring);
ENA_RING_MTX_UNLOCK(tx_ring);
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
return;
if ((txc != TX_BUDGET) && (rxc != RX_BUDGET))
@ -1814,7 +1814,7 @@ ena_enable_msix(struct ena_adapter *adapter)
}
rc = pci_alloc_msix(dev, &msix_vecs);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(dev,
"Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
@ -1877,7 +1877,7 @@ ena_setup_io_intr(struct ena_adapter *adapter)
* We still want to bind rings to the corresponding cpu
* using something similar to the RSS round-robin technique.
*/
if (last_bind_cpu < 0)
if (unlikely(last_bind_cpu < 0))
last_bind_cpu = CPU_FIRST();
adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
last_bind_cpu;
@ -1899,7 +1899,7 @@ ena_request_mgmnt_irq(struct ena_adapter *adapter)
irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
&irq->vector, flags);
if (irq->res == NULL) {
if (unlikely(irq->res == NULL)) {
device_printf(adapter->pdev, "could not allocate "
"irq vector: %d\n", irq->vector);
return (ENXIO);
@ -1907,7 +1907,7 @@ ena_request_mgmnt_irq(struct ena_adapter *adapter)
rc = bus_activate_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "could not activate "
"irq vector: %d\n", irq->vector);
goto err_res_free;
@ -1916,7 +1916,7 @@ ena_request_mgmnt_irq(struct ena_adapter *adapter)
rc = bus_setup_intr(adapter->pdev, irq->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
irq->data, &irq->cookie);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "failed to register "
"interrupt handler for irq %ju: %d\n",
rman_get_start(irq->res), rc);
@ -1931,7 +1931,7 @@ ena_request_mgmnt_irq(struct ena_adapter *adapter)
irq->vector);
rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
if (rcc != 0)
if (unlikely(rcc != 0))
device_printf(adapter->pdev, "dev has no parent while "
"releasing res for irq: %d\n", irq->vector);
irq->res = NULL;
@ -1946,7 +1946,7 @@ ena_request_io_irq(struct ena_adapter *adapter)
unsigned long flags = 0;
int rc = 0, i, rcc;
if (adapter->msix_enabled == 0) {
if (unlikely(adapter->msix_enabled == 0)) {
device_printf(adapter->pdev, "failed to request irq\n");
return (EINVAL);
} else {
@ -1956,12 +1956,12 @@ ena_request_io_irq(struct ena_adapter *adapter)
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i];
if (irq->requested)
if (unlikely(irq->requested))
continue;
irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
&irq->vector, flags);
if (irq->res == NULL) {
if (unlikely(irq->res == NULL)) {
device_printf(adapter->pdev, "could not allocate "
"irq vector: %d\n", irq->vector);
goto err;
@ -1970,7 +1970,7 @@ ena_request_io_irq(struct ena_adapter *adapter)
rc = bus_setup_intr(adapter->pdev, irq->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
irq->handler, irq->data, &irq->cookie);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "failed to register "
"interrupt handler for irq %ju: %d\n",
rman_get_start(irq->res), rc);
@ -1999,7 +1999,7 @@ ena_request_io_irq(struct ena_adapter *adapter)
free both intr and resources */
if (irq->requested)
rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
if (rcc != 0)
if (unlikely(rcc != 0))
device_printf(adapter->pdev, "could not release"
" irq: %d, error: %d\n", irq->vector, rcc);
@ -2012,7 +2012,7 @@ ena_request_io_irq(struct ena_adapter *adapter)
rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
}
if (rcc != 0)
if (unlikely(rcc != 0))
device_printf(adapter->pdev, "dev has no parent while "
"releasing res for irq: %d\n", irq->vector);
irq->requested = false;
@ -2033,7 +2033,7 @@ ena_free_mgmnt_irq(struct ena_adapter *adapter)
ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
irq->vector);
rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
if (rc != 0)
if (unlikely(rc != 0))
device_printf(adapter->pdev, "failed to tear "
"down irq: %d\n", irq->vector);
irq->requested = 0;
@ -2045,7 +2045,7 @@ ena_free_mgmnt_irq(struct ena_adapter *adapter)
rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
irq->res = NULL;
if (rc != 0)
if (unlikely(rc != 0))
device_printf(adapter->pdev, "dev has no parent while "
"releasing res for irq: %d\n", irq->vector);
}
@ -2064,7 +2064,7 @@ ena_free_io_irq(struct ena_adapter *adapter)
irq->vector);
rc = bus_teardown_intr(adapter->pdev, irq->res,
irq->cookie);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "failed to tear "
"down irq: %d\n", irq->vector);
}
@ -2077,7 +2077,7 @@ ena_free_io_irq(struct ena_adapter *adapter)
rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
irq->vector, irq->res);
irq->res = NULL;
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "dev has no parent"
" while releasing res for irq: %d\n",
irq->vector);
@ -2153,14 +2153,14 @@ ena_up_complete(struct ena_adapter *adapter)
{
int rc;
if (adapter->rss_support) {
if (likely(adapter->rss_support)) {
rc = ena_rss_configure(adapter);
if (rc != 0)
return (rc);
}
rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
if (rc != 0)
if (unlikely(rc != 0))
return (rc);
ena_refill_all_rx_bufs(adapter);
@ -2175,12 +2175,12 @@ ena_up(struct ena_adapter *adapter)
{
int rc = 0;
if (device_is_attached(adapter->pdev) == 0) {
if (unlikely(device_is_attached(adapter->pdev) == 0)) {
device_printf(adapter->pdev, "device is not attached!\n");
return (ENXIO);
}
if (!adapter->running == false) {
if (unlikely(!adapter->running)) {
device_printf(adapter->pdev, "device is not running!\n");
return (ENXIO);
}
@ -2191,38 +2191,38 @@ ena_up(struct ena_adapter *adapter)
/* setup interrupts for IO queues */
ena_setup_io_intr(adapter);
rc = ena_request_io_irq(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_ALERT, "err_req_irq");
goto err_req_irq;
}
/* allocate transmit descriptors */
rc = ena_setup_all_tx_resources(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_ALERT, "err_setup_tx");
goto err_setup_tx;
}
/* allocate receive descriptors */
rc = ena_setup_all_rx_resources(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_ALERT, "err_setup_rx");
goto err_setup_rx;
}
/* create IO queues for Rx & Tx */
rc = ena_create_io_queues(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_ALERT,
"create IO queues failed");
goto err_io_que;
}
if (adapter->link_status)
if (unlikely(adapter->link_status))
if_link_state_change(adapter->ifp, LINK_STATE_UP);
rc = ena_up_complete(adapter);
if (rc != 0)
if (unlikely(rc != 0))
goto err_up_complete;
counter_u64_add(adapter->dev_stats.interface_up, 1);
@ -2492,7 +2492,7 @@ ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
ena_trace(ENA_DBG, "enter");
ifp = adapter->ifp = if_gethandle(IFT_ETHER);
if (ifp == NULL) {
if (unlikely(ifp == NULL)) {
device_printf(pdev, "can not allocate ifnet structure\n");
return (ENXIO);
}
@ -2561,7 +2561,7 @@ ena_down(struct ena_adapter *adapter)
if (adapter->trigger_reset) {
rc = ena_com_dev_reset(adapter->ena_dev,
adapter->reset_reason);
if (rc != 0)
if (unlikely(rc != 0))
device_printf(adapter->pdev,
"Device reset failed\n");
}
@ -2685,7 +2685,7 @@ ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT,
adapter->max_tx_sgl_size - 1);
if (collapsed_mbuf == NULL) {
if (unlikely(collapsed_mbuf == NULL)) {
counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
return (ENOMEM);
}
@ -2721,7 +2721,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_WARNING,
"Failed to collapse mbuf! err: %d", rc);
return (rc);
@ -2746,7 +2746,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map,
*mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
if ((rc != 0) || (nsegs == 0)) {
if (unlikely((rc != 0) || (nsegs == 0))) {
ena_trace(ENA_WARNING,
"dmamap load failed! err: %d nsegs: %d", rc, nsegs);
counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
@ -2775,7 +2775,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
ena_tx_csum(&ena_tx_ctx, *mbuf);
/* Prepare the packet's descriptors and send them to device */
rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_WARNING, "failed to prepare tx bufs\n");
counter_enter();
counter_u64_add_protected(tx_ring->tx_stats.queue_stop, 1);
@ -2823,10 +2823,10 @@ ena_start_xmit(struct ena_ring *tx_ring)
int acum_pkts = 0;
int ret = 0;
if ((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)
if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
return;
if (!adapter->link_status)
if (unlikely(!adapter->link_status))
return;
ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
@ -2837,11 +2837,11 @@ ena_start_xmit(struct ena_ring *tx_ring)
" header csum flags %#jx",
mbuf, mbuf->m_flags, mbuf->m_pkthdr.csum_flags);
if (!ena_com_sq_have_enough_space(io_sq,
ENA_TX_CLEANUP_THRESHOLD))
if (unlikely(!ena_com_sq_have_enough_space(io_sq,
ENA_TX_CLEANUP_THRESHOLD)))
ena_tx_cleanup(tx_ring);
if ((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0) {
if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
if (ret == ENA_COM_NO_MEM) {
drbr_putback(adapter->ifp, tx_ring->br, mbuf);
} else if (ret == ENA_COM_NO_SPACE) {
@ -2856,14 +2856,15 @@ ena_start_xmit(struct ena_ring *tx_ring)
drbr_advance(adapter->ifp, tx_ring->br);
if ((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)
if (unlikely((if_getdrvflags(adapter->ifp) &
IFF_DRV_RUNNING) == 0))
return;
acum_pkts++;
BPF_MTAP(adapter->ifp, mbuf);
if (acum_pkts == DB_THRESHOLD) {
if (unlikely(acum_pkts == DB_THRESHOLD)) {
acum_pkts = 0;
wmb();
/* Trigger the dma engine */
@ -2873,7 +2874,7 @@ ena_start_xmit(struct ena_ring *tx_ring)
}
if (acum_pkts != 0) {
if (likely(acum_pkts != 0)) {
wmb();
/* Trigger the dma engine */
ena_com_write_sq_doorbell(io_sq);
@ -2906,7 +2907,7 @@ ena_mq_start(if_t ifp, struct mbuf *m)
int ret, is_drbr_empty;
uint32_t i;
if ((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0)
if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
return (ENODEV);
/* Which queue to use */
@ -2934,7 +2935,7 @@ ena_mq_start(if_t ifp, struct mbuf *m)
/* Check if drbr is empty before putting packet */
is_drbr_empty = drbr_empty(ifp, tx_ring->br);
ret = drbr_enqueue(ifp, tx_ring->br, m);
if (ret != 0) {
if (unlikely(ret != 0)) {
taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
return (ret);
}
@ -3084,7 +3085,7 @@ ena_rss_init_default_deferred(void *arg)
int rc;
dc = devclass_find("ena");
if (dc == NULL) {
if (unlikely(dc == NULL)) {
ena_trace(ENA_DBG, "No devclass ena\n");
return;
}
@ -3095,7 +3096,7 @@ ena_rss_init_default_deferred(void *arg)
if (adapter != NULL) {
rc = ena_rss_init_default(adapter);
adapter->rss_support = true;
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"WARNING: RSS was not properly initialized,"
" it will affect bandwidth\n");
@ -3114,7 +3115,7 @@ ena_config_host_info(struct ena_com_dev *ena_dev)
/* Allocate only the host info */
rc = ena_com_allocate_host_info(ena_dev);
if (rc != 0) {
if (unlikely(rc != 0)) {
ena_trace(ENA_ALERT, "Cannot allocate host info\n");
return;
}
@ -3135,7 +3136,7 @@ ena_config_host_info(struct ena_com_dev *ena_dev)
(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
rc = ena_com_set_host_attributes(ena_dev);
if (rc != 0) {
if (unlikely(rc != 0)) {
if (rc == EOPNOTSUPP)
ena_trace(ENA_WARNING, "Cannot set host attributes\n");
else
@ -3161,7 +3162,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
int rc;
rc = ena_com_mmio_reg_read_request_init(ena_dev);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev, "failed to init mmio read less\n");
return (rc);
}
@ -3174,19 +3175,19 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev, "Can not reset device\n");
goto err_mmio_read_less;
}
rc = ena_com_validate_version(ena_dev);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev, "device version is too low\n");
goto err_mmio_read_less;
}
dma_width = ena_com_get_dma_width(ena_dev);
if (dma_width < 0) {
if (unlikely(dma_width < 0)) {
device_printf(pdev, "Invalid dma width value %d", dma_width);
rc = dma_width;
goto err_mmio_read_less;
@ -3195,7 +3196,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev,
"Can not initialize ena admin queue with device\n");
goto err_mmio_read_less;
@ -3212,7 +3213,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
/* Get Device Attributes */
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev,
"Cannot get attribute for ena device rc: %d\n", rc);
goto err_admin_init;
@ -3226,7 +3227,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
aenq_groups &= get_feat_ctx->aenq.supported_groups;
rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
goto err_admin_init;
}
@ -3251,7 +3252,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
int rc;
rc = ena_enable_msix(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "Error with MSI-X enablement\n");
return (rc);
}
@ -3259,7 +3260,7 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
ena_setup_mgmnt_intr(adapter);
rc = ena_request_mgmnt_irq(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
goto err_disable_msix;
}
@ -3303,7 +3304,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (adapter->wd_active == 0)
return;
if (adapter->keep_alive_timeout == 0)
if (likely(adapter->keep_alive_timeout == 0))
return;
timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
@ -3537,7 +3538,7 @@ ena_reset_task(void *arg, int pending)
/* Finished destroy part. Restart the device */
rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx,
&adapter->wd_active);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"ENA device init failed! (err: %d)\n", rc);
goto err_dev_free;
@ -3545,7 +3546,7 @@ ena_reset_task(void *arg, int pending)
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev, "Enable MSI-X failed\n");
goto err_com_free;
}
@ -3553,7 +3554,7 @@ ena_reset_task(void *arg, int pending)
/* If the interface was up before the reset bring it up */
if (dev_up) {
rc = ena_up(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(adapter->pdev,
"Failed to create I/O queues\n");
goto err_msix_free;
@ -3617,7 +3618,7 @@ ena_attach(device_t pdev)
device_printf(pdev, "%s\n", ena_version);
rc = ena_allocate_pci_resources(adapter);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev, "PCI resource allocation failed!\n");
ena_free_pci_resources(adapter);
return (rc);
@ -3638,7 +3639,7 @@ ena_attach(device_t pdev)
((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
rman_get_bushandle(adapter->registers);
if (((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0) {
if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
device_printf(pdev, "failed to pmap registers bar\n");
rc = ENXIO;
goto err_bus_free;
@ -3648,7 +3649,7 @@ ena_attach(device_t pdev)
/* Device initialization */
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
rc = ENXIO;
goto err_bus_free;
@ -3677,7 +3678,7 @@ ena_attach(device_t pdev)
/* calculatre ring sizes */
queue_size = ena_calc_queue_size(adapter,&tx_sgl_size,
&rx_sgl_size, &get_feat_ctx);
if ((queue_size <= 0) || (io_queue_num <= 0)) {
if (unlikely((queue_size <= 0) || (io_queue_num <= 0))) {
rc = ENA_COM_FAULT;
goto err_com_free;
}
@ -3692,11 +3693,11 @@ ena_attach(device_t pdev)
/* set up dma tags for rx and tx buffers */
rc = ena_setup_tx_dma_tag(adapter);
if (rc != 0)
if (unlikely(rc != 0))
goto err_com_free;
rc = ena_setup_rx_dma_tag(adapter);
if (rc != 0)
if (unlikely(rc != 0))
goto err_tx_tag_free;
/* initialize rings basic information */
@ -3705,13 +3706,13 @@ ena_attach(device_t pdev)
/* setup network interface */
rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev,"Error with network interface setup\n");
goto err_io_free;
}
rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
if (rc != 0) {
if (unlikely(rc != 0)) {
device_printf(pdev,
"Failed to enable and set the admin interrupts\n");
goto err_ifp_free;
@ -3799,16 +3800,16 @@ ena_detach(device_t pdev)
ena_free_counters((counter_u64_t *)&adapter->dev_stats,
sizeof(struct ena_stats_dev));
if (adapter->rss_support)
if (likely(adapter->rss_support))
ena_com_rss_destroy(ena_dev);
rc = ena_free_rx_dma_tag(adapter);
if (rc != 0)
if (unlikely(rc != 0))
device_printf(adapter->pdev,
"Unmapped RX DMA tag associations\n");
rc = ena_free_tx_dma_tag(adapter);
if (rc != 0)
if (unlikely(rc != 0))
device_printf(adapter->pdev,
"Unmapped TX DMA tag associations\n");