|
|
|
@ -306,7 +306,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
|
|
|
|
|
|
|
|
|
|
op_own = cqe->op_own;
|
|
|
|
|
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
|
|
|
|
|
n = ntohl(cqe->byte_cnt);
|
|
|
|
|
n = rte_be_to_cpu_32(cqe->byte_cnt);
|
|
|
|
|
else
|
|
|
|
|
n = 1;
|
|
|
|
|
cq_ci += n;
|
|
|
|
@ -436,7 +436,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
|
|
|
|
|
/* Replace the Ethernet type by the VLAN if necessary. */
|
|
|
|
|
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
|
|
|
|
|
uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
|
|
|
|
|
uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
|
|
|
|
|
buf->vlan_tci);
|
|
|
|
|
unsigned int len = 2 * ETHER_ADDR_LEN - 2;
|
|
|
|
|
|
|
|
|
|
addr += 2;
|
|
|
|
@ -514,8 +515,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
} else {
|
|
|
|
|
/* NOP WQE. */
|
|
|
|
|
wqe->ctrl = (rte_v128u32_t){
|
|
|
|
|
htonl(txq->wqe_ci << 8),
|
|
|
|
|
htonl(txq->qp_num_8s | 1),
|
|
|
|
|
rte_cpu_to_be_32(
|
|
|
|
|
txq->wqe_ci << 8),
|
|
|
|
|
rte_cpu_to_be_32(
|
|
|
|
|
txq->qp_num_8s | 1),
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
};
|
|
|
|
@ -554,7 +557,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
break;
|
|
|
|
|
max_wqe -= n;
|
|
|
|
|
if (tso) {
|
|
|
|
|
inl = htonl(copy_b | MLX5_INLINE_SEG);
|
|
|
|
|
uint32_t inl =
|
|
|
|
|
rte_cpu_to_be_32(copy_b |
|
|
|
|
|
MLX5_INLINE_SEG);
|
|
|
|
|
|
|
|
|
|
pkt_inline_sz =
|
|
|
|
|
MLX5_WQE_DS(tso_header_sz) *
|
|
|
|
|
MLX5_WQE_DWORD_SIZE;
|
|
|
|
|
|
|
|
|
|
rte_memcpy((void *)raw,
|
|
|
|
|
(void *)&inl, sizeof(inl));
|
|
|
|
|
raw += sizeof(inl);
|
|
|
|
@ -603,9 +613,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
ds = 3;
|
|
|
|
|
use_dseg:
|
|
|
|
|
/* Add the remaining packet as a simple ds. */
|
|
|
|
|
naddr = htonll(addr);
|
|
|
|
|
naddr = rte_cpu_to_be_64(addr);
|
|
|
|
|
*dseg = (rte_v128u32_t){
|
|
|
|
|
htonl(length),
|
|
|
|
|
rte_cpu_to_be_32(length),
|
|
|
|
|
mlx5_tx_mb2mr(txq, buf),
|
|
|
|
|
naddr,
|
|
|
|
|
naddr >> 32,
|
|
|
|
@ -642,9 +652,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
total_length += length;
|
|
|
|
|
#endif
|
|
|
|
|
/* Store segment information. */
|
|
|
|
|
naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
|
|
|
|
|
naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
|
|
|
|
|
*dseg = (rte_v128u32_t){
|
|
|
|
|
htonl(length),
|
|
|
|
|
rte_cpu_to_be_32(length),
|
|
|
|
|
mlx5_tx_mb2mr(txq, buf),
|
|
|
|
|
naddr,
|
|
|
|
|
naddr >> 32,
|
|
|
|
@ -667,21 +677,23 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
/* Initialize known and common part of the WQE structure. */
|
|
|
|
|
if (tso) {
|
|
|
|
|
wqe->ctrl = (rte_v128u32_t){
|
|
|
|
|
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
|
|
|
|
|
htonl(txq->qp_num_8s | ds),
|
|
|
|
|
rte_cpu_to_be_32((txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_TSO),
|
|
|
|
|
rte_cpu_to_be_32(txq->qp_num_8s | ds),
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
};
|
|
|
|
|
wqe->eseg = (rte_v128u32_t){
|
|
|
|
|
0,
|
|
|
|
|
cs_flags | (htons(tso_segsz) << 16),
|
|
|
|
|
cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
|
|
|
|
|
0,
|
|
|
|
|
(ehdr << 16) | htons(tso_header_sz),
|
|
|
|
|
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
|
|
|
|
|
};
|
|
|
|
|
} else {
|
|
|
|
|
wqe->ctrl = (rte_v128u32_t){
|
|
|
|
|
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
|
|
|
|
|
htonl(txq->qp_num_8s | ds),
|
|
|
|
|
rte_cpu_to_be_32((txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_SEND),
|
|
|
|
|
rte_cpu_to_be_32(txq->qp_num_8s | ds),
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
};
|
|
|
|
@ -689,7 +701,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
0,
|
|
|
|
|
cs_flags,
|
|
|
|
|
0,
|
|
|
|
|
(ehdr << 16) | htons(pkt_inline_sz),
|
|
|
|
|
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
next_wqe:
|
|
|
|
@ -709,7 +721,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
comp = txq->elts_comp + i + j + k;
|
|
|
|
|
if (comp >= MLX5_TX_COMP_THRESH) {
|
|
|
|
|
/* Request completion on last WQE. */
|
|
|
|
|
last_wqe->ctrl2 = htonl(8);
|
|
|
|
|
last_wqe->ctrl2 = rte_cpu_to_be_32(8);
|
|
|
|
|
/* Save elts_head in unused "immediate" field of WQE. */
|
|
|
|
|
last_wqe->ctrl3 = txq->elts_head;
|
|
|
|
|
txq->elts_comp = 0;
|
|
|
|
@ -748,13 +760,14 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
|
|
|
|
|
mpw->len = length;
|
|
|
|
|
mpw->total_len = 0;
|
|
|
|
|
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
|
|
|
|
|
mpw->wqe->eseg.mss = htons(length);
|
|
|
|
|
mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
|
|
|
|
|
mpw->wqe->eseg.inline_hdr_sz = 0;
|
|
|
|
|
mpw->wqe->eseg.rsvd0 = 0;
|
|
|
|
|
mpw->wqe->eseg.rsvd1 = 0;
|
|
|
|
|
mpw->wqe->eseg.rsvd2 = 0;
|
|
|
|
|
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
|
|
|
|
|
(txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
|
|
|
|
|
mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
|
|
|
|
|
(txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_TSO);
|
|
|
|
|
mpw->wqe->ctrl[2] = 0;
|
|
|
|
|
mpw->wqe->ctrl[3] = 0;
|
|
|
|
|
mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
|
|
|
|
@ -783,7 +796,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
|
|
|
|
|
* Store size in multiple of 16 bytes. Control and Ethernet segments
|
|
|
|
|
* count as 2.
|
|
|
|
|
*/
|
|
|
|
|
mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
|
|
|
|
|
mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
|
|
|
|
|
mpw->state = MLX5_MPW_STATE_CLOSED;
|
|
|
|
|
if (num < 3)
|
|
|
|
|
++txq->wqe_ci;
|
|
|
|
@ -892,9 +905,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
dseg = mpw.data.dseg[mpw.pkts_n];
|
|
|
|
|
addr = rte_pktmbuf_mtod(buf, uintptr_t);
|
|
|
|
|
*dseg = (struct mlx5_wqe_data_seg){
|
|
|
|
|
.byte_count = htonl(DATA_LEN(buf)),
|
|
|
|
|
.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
|
|
|
|
|
.lkey = mlx5_tx_mb2mr(txq, buf),
|
|
|
|
|
.addr = htonll(addr),
|
|
|
|
|
.addr = rte_cpu_to_be_64(addr),
|
|
|
|
|
};
|
|
|
|
|
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
|
|
|
|
|
length += DATA_LEN(buf);
|
|
|
|
@ -922,7 +935,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
volatile struct mlx5_wqe *wqe = mpw.wqe;
|
|
|
|
|
|
|
|
|
|
/* Request completion on last WQE. */
|
|
|
|
|
wqe->ctrl[2] = htonl(8);
|
|
|
|
|
wqe->ctrl[2] = rte_cpu_to_be_32(8);
|
|
|
|
|
/* Save elts_head in unused "immediate" field of WQE. */
|
|
|
|
|
wqe->ctrl[3] = elts_head;
|
|
|
|
|
txq->elts_comp = 0;
|
|
|
|
@ -962,12 +975,12 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
|
|
|
|
|
mpw->len = length;
|
|
|
|
|
mpw->total_len = 0;
|
|
|
|
|
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
|
|
|
|
|
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
|
|
|
|
|
(txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_TSO);
|
|
|
|
|
mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
|
|
|
|
|
(txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_TSO);
|
|
|
|
|
mpw->wqe->ctrl[2] = 0;
|
|
|
|
|
mpw->wqe->ctrl[3] = 0;
|
|
|
|
|
mpw->wqe->eseg.mss = htons(length);
|
|
|
|
|
mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
|
|
|
|
|
mpw->wqe->eseg.inline_hdr_sz = 0;
|
|
|
|
|
mpw->wqe->eseg.cs_flags = 0;
|
|
|
|
|
mpw->wqe->eseg.rsvd0 = 0;
|
|
|
|
@ -998,9 +1011,10 @@ mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
|
|
|
|
|
* Store size in multiple of 16 bytes. Control and Ethernet segments
|
|
|
|
|
* count as 2.
|
|
|
|
|
*/
|
|
|
|
|
mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
|
|
|
|
|
mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
|
|
|
|
|
MLX5_WQE_DS(size));
|
|
|
|
|
mpw->state = MLX5_MPW_STATE_CLOSED;
|
|
|
|
|
inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
|
|
|
|
|
inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
|
|
|
|
|
txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1140,9 +1154,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
|
dseg = mpw.data.dseg[mpw.pkts_n];
|
|
|
|
|
addr = rte_pktmbuf_mtod(buf, uintptr_t);
|
|
|
|
|
*dseg = (struct mlx5_wqe_data_seg){
|
|
|
|
|
.byte_count = htonl(DATA_LEN(buf)),
|
|
|
|
|
.byte_count =
|
|
|
|
|
rte_cpu_to_be_32(DATA_LEN(buf)),
|
|
|
|
|
.lkey = mlx5_tx_mb2mr(txq, buf),
|
|
|
|
|
.addr = htonll(addr),
|
|
|
|
|
.addr = rte_cpu_to_be_64(addr),
|
|
|
|
|
};
|
|
|
|
|
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
|
|
|
|
|
length += DATA_LEN(buf);
|
|
|
|
@ -1214,7 +1229,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
|
|
|
|
|
volatile struct mlx5_wqe *wqe = mpw.wqe;
|
|
|
|
|
|
|
|
|
|
/* Request completion on last WQE. */
|
|
|
|
|
wqe->ctrl[2] = htonl(8);
|
|
|
|
|
wqe->ctrl[2] = rte_cpu_to_be_32(8);
|
|
|
|
|
/* Save elts_head in unused "immediate" field of WQE. */
|
|
|
|
|
wqe->ctrl[3] = elts_head;
|
|
|
|
|
txq->elts_comp = 0;
|
|
|
|
@ -1254,9 +1269,10 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
|
|
|
|
|
mpw->pkts_n = 0;
|
|
|
|
|
mpw->total_len = sizeof(struct mlx5_wqe);
|
|
|
|
|
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
|
|
|
|
|
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
|
|
|
|
|
(txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_ENHANCED_MPSW);
|
|
|
|
|
mpw->wqe->ctrl[0] =
|
|
|
|
|
rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
|
|
|
|
|
(txq->wqe_ci << 8) |
|
|
|
|
|
MLX5_OPCODE_ENHANCED_MPSW);
|
|
|
|
|
mpw->wqe->ctrl[2] = 0;
|
|
|
|
|
mpw->wqe->ctrl[3] = 0;
|
|
|
|
|
memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
|
|
|
|
@ -1264,9 +1280,9 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
|
|
|
|
|
uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
|
|
|
|
|
|
|
|
|
|
/* Pad the first 2 DWORDs with zero-length inline header. */
|
|
|
|
|
*(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
|
|
|
|
|
*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
|
|
|
|
|
*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
|
|
|
|
|
htonl(MLX5_INLINE_SEG);
|
|
|
|
|
rte_cpu_to_be_32(MLX5_INLINE_SEG);
|
|
|
|
|
mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
|
|
|
|
|
/* Start from the next WQEBB. */
|
|
|
|
|
mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
|
|
|
|
@ -1294,7 +1310,8 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
|
|
|
|
|
/* Store size in multiple of 16 bytes. Control and Ethernet segments
|
|
|
|
|
* count as 2.
|
|
|
|
|
*/
|
|
|
|
|
mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
|
|
|
|
|
mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
|
|
|
|
|
MLX5_WQE_DS(mpw->total_len));
|
|
|
|
|
mpw->state = MLX5_MPW_STATE_CLOSED;
|
|
|
|
|
ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
|
|
|
|
|
txq->wqe_ci += ret;
|
|
|
|
@ -1449,9 +1466,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
dseg = mpw.data.dseg[mpw.pkts_n];
|
|
|
|
|
addr = rte_pktmbuf_mtod(buf, uintptr_t);
|
|
|
|
|
*dseg = (struct mlx5_wqe_data_seg){
|
|
|
|
|
.byte_count = htonl(DATA_LEN(buf)),
|
|
|
|
|
.byte_count = rte_cpu_to_be_32(
|
|
|
|
|
DATA_LEN(buf)),
|
|
|
|
|
.lkey = mlx5_tx_mb2mr(txq, buf),
|
|
|
|
|
.addr = htonll(addr),
|
|
|
|
|
.addr = rte_cpu_to_be_64(addr),
|
|
|
|
|
};
|
|
|
|
|
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
|
|
|
|
|
length += DATA_LEN(buf);
|
|
|
|
@ -1474,7 +1492,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
|
|
|
|
|
assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
|
|
|
|
|
assert(length == DATA_LEN(buf));
|
|
|
|
|
inl_hdr = htonl(length | MLX5_INLINE_SEG);
|
|
|
|
|
inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
|
|
|
|
|
addr = rte_pktmbuf_mtod(buf, uintptr_t);
|
|
|
|
|
mpw.data.raw = (volatile void *)
|
|
|
|
|
((uintptr_t)mpw.data.raw + inl_pad);
|
|
|
|
@ -1530,9 +1548,9 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
|
|
|
|
|
rte_prefetch2((void *)(addr +
|
|
|
|
|
n * RTE_CACHE_LINE_SIZE));
|
|
|
|
|
naddr = htonll(addr);
|
|
|
|
|
naddr = rte_cpu_to_be_64(addr);
|
|
|
|
|
*dseg = (rte_v128u32_t) {
|
|
|
|
|
htonl(length),
|
|
|
|
|
rte_cpu_to_be_32(length),
|
|
|
|
|
mlx5_tx_mb2mr(txq, buf),
|
|
|
|
|
naddr,
|
|
|
|
|
naddr >> 32,
|
|
|
|
@ -1560,7 +1578,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
volatile struct mlx5_wqe *wqe = mpw.wqe;
|
|
|
|
|
|
|
|
|
|
/* Request completion on last WQE. */
|
|
|
|
|
wqe->ctrl[2] = htonl(8);
|
|
|
|
|
wqe->ctrl[2] = rte_cpu_to_be_32(8);
|
|
|
|
|
/* Save elts_head in unused "immediate" field of WQE. */
|
|
|
|
|
wqe->ctrl[3] = elts_head;
|
|
|
|
|
txq->elts_comp = 0;
|
|
|
|
@ -1644,8 +1662,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
|
|
|
|
|
(volatile struct mlx5_mini_cqe8 (*)[8])
|
|
|
|
|
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
|
|
|
|
|
|
|
|
|
|
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
|
|
|
|
|
*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
|
|
|
|
|
len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
|
|
|
|
|
*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
|
|
|
|
|
if ((++zip->ai & 7) == 0) {
|
|
|
|
|
/* Invalidate consumed CQEs */
|
|
|
|
|
idx = zip->ca;
|
|
|
|
@ -1693,7 +1711,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
|
|
|
|
|
cqe_cnt].pkt_info);
|
|
|
|
|
|
|
|
|
|
/* Fix endianness. */
|
|
|
|
|
zip->cqe_cnt = ntohl(cqe->byte_cnt);
|
|
|
|
|
zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
|
|
|
|
|
/*
|
|
|
|
|
* Current mini array position is the one returned by
|
|
|
|
|
* check_cqe64().
|
|
|
|
@ -1708,8 +1726,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
|
|
|
|
|
--rxq->cq_ci;
|
|
|
|
|
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
|
|
|
|
|
/* Get packet size to return. */
|
|
|
|
|
len = ntohl((*mc)[0].byte_cnt);
|
|
|
|
|
*rss_hash = ntohl((*mc)[0].rx_hash_result);
|
|
|
|
|
len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
|
|
|
|
|
*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
|
|
|
|
|
zip->ai = 1;
|
|
|
|
|
/* Prefetch all the entries to be invalidated */
|
|
|
|
|
idx = zip->ca;
|
|
|
|
@ -1719,8 +1737,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
|
|
|
|
|
++idx;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
len = ntohl(cqe->byte_cnt);
|
|
|
|
|
*rss_hash = ntohl(cqe->rx_hash_res);
|
|
|
|
|
len = rte_be_to_cpu_32(cqe->byte_cnt);
|
|
|
|
|
*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
|
|
|
|
|
}
|
|
|
|
|
/* Error while receiving packet. */
|
|
|
|
|
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
|
|
|
|
@ -1744,7 +1762,7 @@ static inline uint32_t
|
|
|
|
|
rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
|
|
|
|
|
{
|
|
|
|
|
uint32_t ol_flags = 0;
|
|
|
|
|
uint16_t flags = ntohs(cqe->hdr_type_etc);
|
|
|
|
|
uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
|
|
|
|
|
|
|
|
|
|
ol_flags =
|
|
|
|
|
TRANSPOSE(flags,
|
|
|
|
@ -1851,7 +1869,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
|
|
|
|
|
pkt->ol_flags |= PKT_RX_FDIR;
|
|
|
|
|
if (cqe->sop_drop_qpn !=
|
|
|
|
|
htonl(MLX5_FLOW_MARK_DEFAULT)) {
|
|
|
|
|
rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
|
|
|
|
|
uint32_t mark = cqe->sop_drop_qpn;
|
|
|
|
|
|
|
|
|
|
pkt->ol_flags |= PKT_RX_FDIR_ID;
|
|
|
|
@ -1863,10 +1881,11 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
|
|
|
|
|
if (rxq->vlan_strip &&
|
|
|
|
|
(cqe->hdr_type_etc &
|
|
|
|
|
htons(MLX5_CQE_VLAN_STRIPPED))) {
|
|
|
|
|
rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
|
|
|
|
|
pkt->ol_flags |= PKT_RX_VLAN_PKT |
|
|
|
|
|
PKT_RX_VLAN_STRIPPED;
|
|
|
|
|
pkt->vlan_tci = ntohs(cqe->vlan_info);
|
|
|
|
|
pkt->vlan_tci =
|
|
|
|
|
rte_be_to_cpu_16(cqe->vlan_info);
|
|
|
|
|
}
|
|
|
|
|
if (rxq->crc_present)
|
|
|
|
|
len -= ETHER_CRC_LEN;
|
|
|
|
@ -1882,7 +1901,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
* of the buffers are already known, only the buffer address
|
|
|
|
|
* changes.
|
|
|
|
|
*/
|
|
|
|
|
wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
|
|
|
|
|
wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
|
|
|
|
|
if (len > DATA_LEN(seg)) {
|
|
|
|
|
len -= DATA_LEN(seg);
|
|
|
|
|
++NB_SEGS(pkt);
|
|
|
|
@ -1910,9 +1929,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
|
|
|
|
|
/* Update the consumer index. */
|
|
|
|
|
rxq->rq_ci = rq_ci >> sges_n;
|
|
|
|
|
rte_wmb();
|
|
|
|
|
*rxq->cq_db = htonl(rxq->cq_ci);
|
|
|
|
|
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
|
|
|
|
|
rte_wmb();
|
|
|
|
|
*rxq->rq_db = htonl(rxq->rq_ci);
|
|
|
|
|
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
|
|
|
|
|
#ifdef MLX5_PMD_SOFT_COUNTERS
|
|
|
|
|
/* Increment packets counter. */
|
|
|
|
|
rxq->stats.ipackets += i;
|
|
|
|
|