net/mlx5: replace network to host macros

Signed-off-by: Shachar Beiser <shacharbe@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
This commit is contained in:
Shachar Beiser 2017-09-17 10:42:02 +00:00 committed by Ferruh Yigit
parent 671f877687
commit 6b30a6a855
7 changed files with 107 additions and 79 deletions

View File

@ -263,11 +263,15 @@ hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,
(*mac)[0], (*mac)[1], (*mac)[2],
(*mac)[3], (*mac)[4], (*mac)[5]
},
.vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
.vlan_tag = (vlan_enabled ?
rte_cpu_to_be_16(vlan_id)
: 0),
},
.mask = {
.dst_mac = "\xff\xff\xff\xff\xff\xff",
.vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
.vlan_tag = (vlan_enabled ?
rte_cpu_to_be_16(0xfff) :
0),
},
};
DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"

View File

@ -203,7 +203,7 @@ txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
txq_ctrl->txq.mp2mr[idx].mr = mr;
txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey);
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
(void *)txq_ctrl, mp->name, (void *)mp,
txq_ctrl->txq.mp2mr[idx].lkey);

View File

@ -159,14 +159,18 @@ hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
mac[0], mac[1], mac[2],
mac[3], mac[4], mac[5],
},
.vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
.vlan_tag = (vlan_enabled ?
rte_cpu_to_be_16(vlan_id) :
0),
},
.mask = {
.dst_mac = {
mask[0], mask[1], mask[2],
mask[3], mask[4], mask[5],
},
.vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
.vlan_tag = (vlan_enabled ?
rte_cpu_to_be_16(0xfff) :
0),
},
};

View File

@ -672,9 +672,10 @@ rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n)
/* scat->addr must be able to store a pointer. */
assert(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
.byte_count = htonl(DATA_LEN(buf)),
.lkey = htonl(rxq_ctrl->mr->lkey),
.addr =
rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)),
.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey),
};
(*rxq_ctrl->rxq.elts)[i] = buf;
}
@ -1077,7 +1078,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
/* Update doorbell counter. */
rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
rte_wmb();
*rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
*rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci);
DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
assert(ret == 0);
return 0;

View File

@ -306,7 +306,7 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
op_own = cqe->op_own;
if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
n = ntohl(cqe->byte_cnt);
n = rte_be_to_cpu_32(cqe->byte_cnt);
else
n = 1;
cq_ci += n;
@ -436,7 +436,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
buf->vlan_tci);
unsigned int len = 2 * ETHER_ADDR_LEN - 2;
addr += 2;
@ -514,8 +515,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
} else {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
htonl(txq->wqe_ci << 8),
htonl(txq->qp_num_8s | 1),
rte_cpu_to_be_32(
txq->wqe_ci << 8),
rte_cpu_to_be_32(
txq->qp_num_8s | 1),
0,
0,
};
@ -554,7 +557,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
max_wqe -= n;
if (tso) {
inl = htonl(copy_b | MLX5_INLINE_SEG);
uint32_t inl =
rte_cpu_to_be_32(copy_b |
MLX5_INLINE_SEG);
pkt_inline_sz =
MLX5_WQE_DS(tso_header_sz) *
MLX5_WQE_DWORD_SIZE;
rte_memcpy((void *)raw,
(void *)&inl, sizeof(inl));
raw += sizeof(inl);
@ -603,9 +613,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
naddr = htonll(addr);
naddr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t){
htonl(length),
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
@ -642,9 +652,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
total_length += length;
#endif
/* Store segment information. */
naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
htonl(length),
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
@ -667,21 +677,23 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Initialize known and common part of the WQE structure. */
if (tso) {
wqe->ctrl = (rte_v128u32_t){
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
htonl(txq->qp_num_8s | ds),
rte_cpu_to_be_32((txq->wqe_ci << 8) |
MLX5_OPCODE_TSO),
rte_cpu_to_be_32(txq->qp_num_8s | ds),
0,
0,
};
wqe->eseg = (rte_v128u32_t){
0,
cs_flags | (htons(tso_segsz) << 16),
cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
0,
(ehdr << 16) | htons(tso_header_sz),
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
} else {
wqe->ctrl = (rte_v128u32_t){
htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
htonl(txq->qp_num_8s | ds),
rte_cpu_to_be_32((txq->wqe_ci << 8) |
MLX5_OPCODE_SEND),
rte_cpu_to_be_32(txq->qp_num_8s | ds),
0,
0,
};
@ -689,7 +701,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
0,
cs_flags,
0,
(ehdr << 16) | htons(pkt_inline_sz),
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
}
next_wqe:
@ -709,7 +721,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
/* Request completion on last WQE. */
last_wqe->ctrl2 = htonl(8);
last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
@ -748,13 +760,14 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->len = length;
mpw->total_len = 0;
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->eseg.mss = htons(length);
mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
mpw->wqe->eseg.rsvd1 = 0;
mpw->wqe->eseg.rsvd2 = 0;
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
@ -783,7 +796,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
mpw->state = MLX5_MPW_STATE_CLOSED;
if (num < 3)
++txq->wqe_ci;
@ -892,9 +905,9 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
.addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
@ -922,7 +935,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
wqe->ctrl[2] = htonl(8);
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
@ -962,12 +975,12 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->len = length;
mpw->total_len = 0;
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
mpw->wqe->eseg.mss = htons(length);
mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.cs_flags = 0;
mpw->wqe->eseg.rsvd0 = 0;
@ -998,9 +1011,10 @@ mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
MLX5_WQE_DS(size));
mpw->state = MLX5_MPW_STATE_CLOSED;
inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
}
@ -1140,9 +1154,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
.byte_count =
rte_cpu_to_be_32(DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
.addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
@ -1214,7 +1229,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
wqe->ctrl[2] = htonl(8);
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
@ -1254,9 +1269,10 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
mpw->pkts_n = 0;
mpw->total_len = sizeof(struct mlx5_wqe);
mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_ENHANCED_MPSW);
mpw->wqe->ctrl[0] =
rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_ENHANCED_MPSW);
mpw->wqe->ctrl[2] = 0;
mpw->wqe->ctrl[3] = 0;
memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
@ -1264,9 +1280,9 @@ mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
/* Pad the first 2 DWORDs with zero-length inline header. */
*(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
htonl(MLX5_INLINE_SEG);
rte_cpu_to_be_32(MLX5_INLINE_SEG);
mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
/* Start from the next WQEBB. */
mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
@ -1294,7 +1310,8 @@ mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
/* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
MLX5_WQE_DS(mpw->total_len));
mpw->state = MLX5_MPW_STATE_CLOSED;
ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
txq->wqe_ci += ret;
@ -1449,9 +1466,10 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
dseg = mpw.data.dseg[mpw.pkts_n];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
.byte_count = rte_cpu_to_be_32(
DATA_LEN(buf)),
.lkey = mlx5_tx_mb2mr(txq, buf),
.addr = htonll(addr),
.addr = rte_cpu_to_be_64(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
length += DATA_LEN(buf);
@ -1474,7 +1492,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
assert(length == DATA_LEN(buf));
inl_hdr = htonl(length | MLX5_INLINE_SEG);
inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
addr = rte_pktmbuf_mtod(buf, uintptr_t);
mpw.data.raw = (volatile void *)
((uintptr_t)mpw.data.raw + inl_pad);
@ -1530,9 +1548,9 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
rte_prefetch2((void *)(addr +
n * RTE_CACHE_LINE_SIZE));
naddr = htonll(addr);
naddr = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t) {
htonl(length),
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
naddr,
naddr >> 32,
@ -1560,7 +1578,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
wqe->ctrl[2] = htonl(8);
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
@ -1644,8 +1662,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
(volatile struct mlx5_mini_cqe8 (*)[8])
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
@ -1693,7 +1711,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
cqe_cnt].pkt_info);
/* Fix endianness. */
zip->cqe_cnt = ntohl(cqe->byte_cnt);
zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
/*
* Current mini array position is the one returned by
* check_cqe64().
@ -1708,8 +1726,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
--rxq->cq_ci;
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
len = ntohl((*mc)[0].byte_cnt);
*rss_hash = ntohl((*mc)[0].rx_hash_result);
len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
@ -1719,8 +1737,8 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
++idx;
}
} else {
len = ntohl(cqe->byte_cnt);
*rss_hash = ntohl(cqe->rx_hash_res);
len = rte_be_to_cpu_32(cqe->byte_cnt);
*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@ -1744,7 +1762,7 @@ static inline uint32_t
rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
uint16_t flags = ntohs(cqe->hdr_type_etc);
uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
ol_flags =
TRANSPOSE(flags,
@ -1851,7 +1869,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
pkt->ol_flags |= PKT_RX_FDIR;
if (cqe->sop_drop_qpn !=
htonl(MLX5_FLOW_MARK_DEFAULT)) {
rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
uint32_t mark = cqe->sop_drop_qpn;
pkt->ol_flags |= PKT_RX_FDIR_ID;
@ -1863,10 +1881,11 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
if (rxq->vlan_strip &&
(cqe->hdr_type_etc &
htons(MLX5_CQE_VLAN_STRIPPED))) {
rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
pkt->ol_flags |= PKT_RX_VLAN_PKT |
PKT_RX_VLAN_STRIPPED;
pkt->vlan_tci = ntohs(cqe->vlan_info);
pkt->vlan_tci =
rte_be_to_cpu_16(cqe->vlan_info);
}
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
@ -1882,7 +1901,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* of the buffers are already known, only the buffer address
* changes.
*/
wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
if (len > DATA_LEN(seg)) {
len -= DATA_LEN(seg);
++NB_SEGS(pkt);
@ -1910,9 +1929,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Update the consumer index. */
rxq->rq_ci = rq_ci >> sges_n;
rte_wmb();
*rxq->cq_db = htonl(rxq->cq_ci);
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
rte_wmb();
*rxq->rq_db = htonl(rxq->rq_ci);
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment packets counter. */
rxq->stats.ipackets += i;

View File

@ -269,7 +269,7 @@ struct txq {
uintptr_t start; /* Start address of MR */
uintptr_t end; /* End address of MR */
struct ibv_mr *mr; /* Memory Region (for mp). */
uint32_t lkey; /* htonl(mr->lkey) */
uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */
} mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
uint16_t mr_cache_idx; /* Index of last hit entry. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
@ -492,7 +492,7 @@ mlx5_tx_complete(struct txq *txq)
}
#endif /* NDEBUG */
++cq_ci;
txq->wqe_pi = ntohs(cqe->wqe_counter);
txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
ctrl = (volatile struct mlx5_wqe_ctrl *)
tx_mlx5_wqe(txq, txq->wqe_pi);
elts_tail = ctrl->ctrl3;
@ -530,7 +530,7 @@ mlx5_tx_complete(struct txq *txq)
txq->elts_tail = elts_tail;
/* Update the consumer index. */
rte_wmb();
*txq->cq_db = htonl(cq_ci);
*txq->cq_db = rte_cpu_to_be_32(cq_ci);
}
/**
@ -581,7 +581,7 @@ mlx5_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
if (txq->mp2mr[i].start <= addr &&
txq->mp2mr[i].end >= addr) {
assert(txq->mp2mr[i].lkey != (uint32_t)-1);
assert(htonl(txq->mp2mr[i].mr->lkey) ==
assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) ==
txq->mp2mr[i].lkey);
txq->mr_cache_idx = i;
return txq->mp2mr[i].lkey;
@ -606,7 +606,7 @@ mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
volatile uint64_t *src = ((volatile uint64_t *)wqe);
rte_io_wmb();
*txq->qp_db = htonl(txq->wqe_ci);
*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
*dst = *src;

View File

@ -293,7 +293,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Fill ESEG in the header. */
_mm_store_si128(t_wqe + 1,
_mm_set_epi16(0, 0, 0, 0,
htons(len), cs_flags,
rte_cpu_to_be_16(len), cs_flags,
0, 0));
txq->wqe_ci = wqe_ci;
}
@ -302,7 +302,7 @@ txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
wqe->ctrl[2] = htonl(8);
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
++txq->cq_pi;
@ -564,11 +564,11 @@ rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
return;
}
for (i = 0; i < n; ++i)
wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
rxq->rq_ci += n;
rte_wmb();
*rxq->rq_db = htonl(rxq->rq_ci);
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
}
/**
@ -1251,7 +1251,7 @@ rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
}
rte_wmb();
*rxq->cq_db = htonl(rxq->cq_ci);
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
return rcvd_pkt;
}