mbuf: rename physical address to IOVA
Rename buf_physaddr to buf_iova. Keep the deprecated name in an anonymous union to avoid breaking the API. Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com> Reviewed-by: Anatoly Burakov <anatoly.burakov@intel.com> Signed-off-by: Thomas Monjalon <thomas@monjalon.net> Acked-by: Olivier Matz <olivier.matz@6wind.com>
This commit is contained in:
parent
c3ee68b879
commit
455da54539
@ -50,7 +50,7 @@ fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
|
||||
/* start of buffer is after mbuf structure and priv data */
|
||||
m->priv_size = 0;
|
||||
m->buf_addr = (char *)m + mbuf_hdr_size;
|
||||
m->buf_physaddr = rte_mempool_virt2iova(obj) +
|
||||
m->buf_iova = rte_mempool_virt2iova(obj) +
|
||||
mbuf_offset + mbuf_hdr_size;
|
||||
m->buf_len = segment_sz;
|
||||
m->data_len = segment_sz;
|
||||
@ -81,7 +81,7 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
|
||||
/* start of buffer is after mbuf structure and priv data */
|
||||
m->priv_size = 0;
|
||||
m->buf_addr = (char *)m + mbuf_hdr_size;
|
||||
m->buf_physaddr = next_seg_phys_addr;
|
||||
m->buf_iova = next_seg_phys_addr;
|
||||
next_seg_phys_addr += mbuf_hdr_size + segment_sz;
|
||||
m->buf_len = segment_sz;
|
||||
m->data_len = segment_sz;
|
||||
|
@ -482,7 +482,7 @@
|
||||
sodipodi:role="line"
|
||||
x="187.85715"
|
||||
y="347.7193"
|
||||
id="tspan5240">(m->buf_physaddr is the</tspan><tspan
|
||||
id="tspan5240">(m->buf_iova is the</tspan><tspan
|
||||
sodipodi:role="line"
|
||||
x="187.85715"
|
||||
y="360.2193"
|
||||
|
@ -314,7 +314,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
|
||||
* These routines are called with help of below MACRO's
|
||||
*/
|
||||
|
||||
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
|
||||
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
|
||||
#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
|
||||
|
||||
/**
|
||||
|
@ -105,8 +105,8 @@ build_proto_fd(dpaa2_sec_session *sess,
|
||||
DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
|
||||
|
||||
/* save physical address of mbuf */
|
||||
op->sym->aead.digest.phys_addr = mbuf->buf_physaddr;
|
||||
mbuf->buf_physaddr = (uint64_t)op;
|
||||
op->sym->aead.digest.phys_addr = mbuf->buf_iova;
|
||||
mbuf->buf_iova = (uint64_t)op;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -723,8 +723,8 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
|
||||
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
|
||||
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
|
||||
|
||||
op = (struct rte_crypto_op *)mbuf->buf_physaddr;
|
||||
mbuf->buf_physaddr = op->sym->aead.digest.phys_addr;
|
||||
op = (struct rte_crypto_op *)mbuf->buf_iova;
|
||||
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
|
||||
op->sym->aead.digest.phys_addr = 0L;
|
||||
|
||||
sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
|
||||
|
@ -798,9 +798,9 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
|
||||
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
|
||||
ses->iv.offset);
|
||||
|
||||
src_start_addr = sym->m_src->buf_physaddr + sym->m_src->data_off;
|
||||
src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
|
||||
if (sym->m_dst)
|
||||
dst_start_addr = sym->m_dst->buf_physaddr + sym->m_dst->data_off;
|
||||
dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
|
||||
else
|
||||
dst_start_addr = src_start_addr;
|
||||
|
||||
|
@ -499,22 +499,22 @@ eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
|
||||
case 0:
|
||||
while (count != nb) {
|
||||
queue->paddress_q[seed_m++] =
|
||||
(*mbufs++)->buf_physaddr;
|
||||
(*mbufs++)->buf_iova;
|
||||
count++;
|
||||
/* FALLTHROUGH */
|
||||
case 3:
|
||||
queue->paddress_q[seed_m++] =
|
||||
(*mbufs++)->buf_physaddr;
|
||||
(*mbufs++)->buf_iova;
|
||||
count++;
|
||||
/* FALLTHROUGH */
|
||||
case 2:
|
||||
queue->paddress_q[seed_m++] =
|
||||
(*mbufs++)->buf_physaddr;
|
||||
(*mbufs++)->buf_iova;
|
||||
count++;
|
||||
/* FALLTHROUGH */
|
||||
case 1:
|
||||
queue->paddress_q[seed_m++] =
|
||||
(*mbufs++)->buf_physaddr;
|
||||
(*mbufs++)->buf_iova;
|
||||
count++;
|
||||
/* FALLTHROUGH */
|
||||
|
||||
|
@ -140,7 +140,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
rxq->sw_ring[idx] = mbuf;
|
||||
rxq->rx_ring[idx] = mbuf->buf_physaddr;
|
||||
rxq->rx_ring[idx] = mbuf->buf_iova;
|
||||
}
|
||||
rxq->pkt_first_seg = NULL;
|
||||
rxq->pkt_last_seg = NULL;
|
||||
@ -400,7 +400,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
|
||||
rx_mb = rxq->sw_ring[bd_cons];
|
||||
rxq->sw_ring[bd_cons] = new_mb;
|
||||
rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
|
||||
rxq->rx_ring[bd_prod] = new_mb->buf_iova;
|
||||
|
||||
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
|
||||
rte_prefetch0(rxq->sw_ring[rx_pref]);
|
||||
|
@ -41,7 +41,7 @@
|
||||
#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
|
||||
|
||||
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
|
||||
((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
|
||||
((uint64_t)((mb)->buf_iova + (mb)->data_off))
|
||||
|
||||
#define DB_IDX_MASK 0xffffff
|
||||
#define DB_IDX_VALID (0x1 << 26)
|
||||
|
@ -149,7 +149,7 @@ static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
|
||||
struct rte_mbuf *m = mbuf;
|
||||
|
||||
for (; m; m = m->next, addr++) {
|
||||
*addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
|
||||
*addr = m->buf_iova + rte_pktmbuf_headroom(m);
|
||||
if (*addr == 0)
|
||||
goto out_err;
|
||||
}
|
||||
@ -423,7 +423,7 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
|
||||
mbuf->nb_segs = 1;
|
||||
mbuf->port = rxq->rspq.port_id;
|
||||
|
||||
mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_physaddr +
|
||||
mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
|
||||
mbuf->data_off,
|
||||
adap->sge.fl_align);
|
||||
mapping |= buf_size_idx;
|
||||
|
@ -77,7 +77,7 @@
|
||||
(_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
|
||||
(_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
|
||||
(_fd)->opaque |= (_mbuf)->pkt_len; \
|
||||
(_fd)->addr = (_mbuf)->buf_physaddr; \
|
||||
(_fd)->addr = (_mbuf)->buf_iova; \
|
||||
(_fd)->bpid = _bpid; \
|
||||
} while (0)
|
||||
|
||||
@ -514,7 +514,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
|
||||
|
||||
sgt = temp->buf_addr + temp->data_off;
|
||||
fd->format = QM_FD_SG;
|
||||
fd->addr = temp->buf_physaddr;
|
||||
fd->addr = temp->buf_iova;
|
||||
fd->offset = temp->data_off;
|
||||
fd->bpid = bpid;
|
||||
fd->length20 = mbuf->pkt_len;
|
||||
@ -523,7 +523,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
|
||||
sg_temp = &sgt[i++];
|
||||
sg_temp->opaque = 0;
|
||||
sg_temp->val = 0;
|
||||
sg_temp->addr = cur_seg->buf_physaddr;
|
||||
sg_temp->addr = cur_seg->buf_iova;
|
||||
sg_temp->offset = cur_seg->data_off;
|
||||
sg_temp->length = cur_seg->data_len;
|
||||
if (RTE_MBUF_DIRECT(cur_seg)) {
|
||||
|
@ -1167,7 +1167,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
|
||||
|
||||
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
|
||||
/* prepare physical address for DMA transaction */
|
||||
ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM;
|
||||
ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
|
||||
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
|
||||
/* pass resource to device */
|
||||
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
|
||||
@ -1726,7 +1726,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
* consideration pushed header
|
||||
*/
|
||||
if (mbuf->data_len > ena_tx_ctx.header_len) {
|
||||
ebuf->paddr = mbuf->buf_physaddr +
|
||||
ebuf->paddr = mbuf->buf_iova +
|
||||
mbuf->data_off +
|
||||
ena_tx_ctx.header_len;
|
||||
ebuf->len = mbuf->data_len - ena_tx_ctx.header_len;
|
||||
@ -1735,7 +1735,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
}
|
||||
|
||||
while ((mbuf = mbuf->next) != NULL) {
|
||||
ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off;
|
||||
ebuf->paddr = mbuf->buf_iova + mbuf->data_off;
|
||||
ebuf->len = mbuf->data_len;
|
||||
ebuf++;
|
||||
tx_info->num_of_bufs++;
|
||||
|
@ -316,7 +316,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
|
||||
}
|
||||
|
||||
mb->data_off = RTE_PKTMBUF_HEADROOM;
|
||||
dma_addr = (dma_addr_t)(mb->buf_physaddr
|
||||
dma_addr = (dma_addr_t)(mb->buf_iova
|
||||
+ RTE_PKTMBUF_HEADROOM);
|
||||
rq_enet_desc_enc(rqd, dma_addr,
|
||||
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
|
||||
|
@ -386,7 +386,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
|
||||
/* Push descriptor for newly allocated mbuf */
|
||||
nmb->data_off = RTE_PKTMBUF_HEADROOM;
|
||||
dma_addr = (dma_addr_t)(nmb->buf_physaddr +
|
||||
dma_addr = (dma_addr_t)(nmb->buf_iova +
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
rq_enet_desc_enc(rqd_ptr, dma_addr,
|
||||
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
|
||||
@ -578,7 +578,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
vlan_id = 0;
|
||||
vlan_tag_insert = 0;
|
||||
bus_addr = (dma_addr_t)
|
||||
(tx_pkt->buf_physaddr + tx_pkt->data_off);
|
||||
(tx_pkt->buf_iova + tx_pkt->data_off);
|
||||
|
||||
descs = (struct wq_enet_desc *)wq->ring.descs;
|
||||
desc_p = descs + head_idx;
|
||||
@ -630,7 +630,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
if (tx_pkt->next == NULL)
|
||||
eop = 1;
|
||||
desc_p = descs + head_idx;
|
||||
bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
|
||||
bus_addr = (dma_addr_t)(tx_pkt->buf_iova
|
||||
+ tx_pkt->data_off);
|
||||
wq_enet_desc_enc((struct wq_enet_desc *)
|
||||
&desc_tmp, bus_addr, data_len,
|
||||
|
@ -253,11 +253,11 @@ struct fm10k_txq_ops {
|
||||
};
|
||||
|
||||
#define MBUF_DMA_ADDR(mb) \
|
||||
((uint64_t) ((mb)->buf_physaddr + (mb)->data_off))
|
||||
((uint64_t) ((mb)->buf_iova + (mb)->data_off))
|
||||
|
||||
/* enforce 512B alignment on default Rx DMA addresses */
|
||||
#define MBUF_DMA_ADDR_DEFAULT(mb) \
|
||||
((uint64_t) RTE_ALIGN(((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM),\
|
||||
((uint64_t) RTE_ALIGN(((mb)->buf_iova + RTE_PKTMBUF_HEADROOM),\
|
||||
FM10K_RX_DATABUF_ALIGN))
|
||||
|
||||
static inline void fifo_reset(struct fifo *fifo, uint32_t len)
|
||||
|
@ -330,8 +330,8 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
|
||||
p1 = (uintptr_t)&mb1->rearm_data;
|
||||
*(uint64_t *)p1 = rxq->mbuf_initializer;
|
||||
|
||||
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
|
||||
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
|
||||
offsetof(struct rte_mbuf, buf_addr) + 8);
|
||||
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
|
||||
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
|
||||
|
@ -100,7 +100,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
|
||||
p1 = (uintptr_t)&mb1->rearm_data;
|
||||
*(uint64_t *)p1 = rxq->mbuf_initializer;
|
||||
|
||||
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
|
||||
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
|
||||
vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
|
||||
vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
|
||||
|
||||
@ -538,7 +538,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
|
||||
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
|
||||
|
||||
vector unsigned long descriptor = (vector unsigned long){
|
||||
pkt->buf_physaddr + pkt->data_off, high_qw};
|
||||
pkt->buf_iova + pkt->data_off, high_qw};
|
||||
*(vector unsigned long *)txdp = descriptor;
|
||||
}
|
||||
|
||||
|
@ -81,13 +81,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
|
||||
mb0 = rxep[0].mbuf;
|
||||
mb1 = rxep[1].mbuf;
|
||||
|
||||
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
|
||||
paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
|
||||
dma_addr0 = vdupq_n_u64(paddr);
|
||||
|
||||
/* flush desc with pa dma_addr */
|
||||
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
|
||||
|
||||
paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
|
||||
paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
|
||||
dma_addr1 = vdupq_n_u64(paddr);
|
||||
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
|
||||
}
|
||||
@ -515,7 +515,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
|
||||
((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
|
||||
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
|
||||
|
||||
uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw};
|
||||
uint64x2_t descriptor = {pkt->buf_iova + pkt->data_off, high_qw};
|
||||
vst1q_u64((uint64_t *)txdp, descriptor);
|
||||
}
|
||||
|
||||
|
@ -86,8 +86,8 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
|
||||
mb0 = rxep[0].mbuf;
|
||||
mb1 = rxep[1].mbuf;
|
||||
|
||||
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
|
||||
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
|
||||
offsetof(struct rte_mbuf, buf_addr) + 8);
|
||||
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
|
||||
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
|
||||
@ -549,7 +549,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
|
||||
((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
|
||||
|
||||
__m128i descriptor = _mm_set_epi64x(high_qw,
|
||||
pkt->buf_physaddr + pkt->data_off);
|
||||
pkt->buf_iova + pkt->data_off);
|
||||
_mm_store_si128((__m128i *)txdp, descriptor);
|
||||
}
|
||||
|
||||
|
@ -87,13 +87,13 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
|
||||
* Data to be rearmed is 6 bytes long.
|
||||
*/
|
||||
vst1_u8((uint8_t *)&mb0->rearm_data, p);
|
||||
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
|
||||
paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
|
||||
dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
|
||||
/* flush desc with pa dma_addr */
|
||||
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
|
||||
|
||||
vst1_u8((uint8_t *)&mb1->rearm_data, p);
|
||||
paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
|
||||
paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
|
||||
dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
|
||||
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
|
||||
}
|
||||
@ -414,7 +414,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp,
|
||||
struct rte_mbuf *pkt, uint64_t flags)
|
||||
{
|
||||
uint64x2_t descriptor = {
|
||||
pkt->buf_physaddr + pkt->data_off,
|
||||
pkt->buf_iova + pkt->data_off,
|
||||
(uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
|
||||
|
||||
vst1q_u64((uint64_t *)&txdp->read, descriptor);
|
||||
|
@ -86,8 +86,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
|
||||
mb0 = rxep[0].mbuf;
|
||||
mb1 = rxep[1].mbuf;
|
||||
|
||||
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) !=
|
||||
/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
|
||||
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
|
||||
offsetof(struct rte_mbuf, buf_addr) + 8);
|
||||
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
|
||||
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
|
||||
@ -667,7 +667,7 @@ vtx1(volatile union ixgbe_adv_tx_desc *txdp,
|
||||
{
|
||||
__m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 46 |
|
||||
flags | pkt->data_len,
|
||||
pkt->buf_physaddr + pkt->data_off);
|
||||
pkt->buf_iova + pkt->data_off);
|
||||
_mm_store_si128((__m128i *)&txdp->read, descriptor);
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
#define NFP_QCP_MAX_ADD 0x7f
|
||||
|
||||
#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
|
||||
(uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
|
||||
(uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
|
||||
|
||||
/* nfp_qcp_ptr - Read or Write Pointer of a queue */
|
||||
enum nfp_qcp_ptr {
|
||||
|
@ -493,7 +493,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
|
||||
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
|
||||
*/
|
||||
if (!hw->virtio_user_dev)
|
||||
vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
|
||||
vq->offset = offsetof(struct rte_mbuf, buf_iova);
|
||||
else {
|
||||
vq->vq_ring_mem = (uintptr_t)mz->addr;
|
||||
vq->offset = offsetof(struct rte_mbuf, buf_addr);
|
||||
|
@ -79,7 +79,7 @@ struct rte_mbuf;
|
||||
#define VIRTIO_MBUF_ADDR(mb, vq) \
|
||||
((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
|
||||
#else
|
||||
#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
|
||||
#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -456,7 +456,7 @@ va2pa(struct rte_mbuf *m)
|
||||
{
|
||||
return (void *)((unsigned long)m -
|
||||
((unsigned long)m->buf_addr -
|
||||
(unsigned long)m->buf_physaddr));
|
||||
(unsigned long)m->buf_iova));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -134,7 +134,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
|
||||
/* start of buffer is after mbuf structure and priv data */
|
||||
m->priv_size = priv_size;
|
||||
m->buf_addr = (char *)m + mbuf_size;
|
||||
m->buf_physaddr = rte_mempool_virt2iova(m) + mbuf_size;
|
||||
m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
|
||||
m->buf_len = (uint16_t)buf_len;
|
||||
|
||||
/* keep some headroom between start of buffer and data */
|
||||
@ -211,8 +211,8 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
|
||||
/* generic checks */
|
||||
if (m->pool == NULL)
|
||||
rte_panic("bad mbuf pool\n");
|
||||
if (m->buf_physaddr == 0)
|
||||
rte_panic("bad phys addr\n");
|
||||
if (m->buf_iova == 0)
|
||||
rte_panic("bad IO addr\n");
|
||||
if (m->buf_addr == NULL)
|
||||
rte_panic("bad virt addr\n");
|
||||
|
||||
@ -243,8 +243,8 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
|
||||
|
||||
__rte_mbuf_sanity_check(m, 1);
|
||||
|
||||
fprintf(f, "dump mbuf at %p, phys=%"PRIx64", buf_len=%u\n",
|
||||
m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
|
||||
fprintf(f, "dump mbuf at %p, iova=%"PRIx64", buf_len=%u\n",
|
||||
m, (uint64_t)m->buf_iova, (unsigned)m->buf_len);
|
||||
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
|
||||
"in_port=%u\n", m->pkt_len, m->ol_flags,
|
||||
(unsigned)m->nb_segs, (unsigned)m->port);
|
||||
|
@ -432,7 +432,11 @@ struct rte_mbuf {
|
||||
* same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
|
||||
* working on vector drivers easier.
|
||||
*/
|
||||
phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
|
||||
RTE_STD_C11
|
||||
union {
|
||||
rte_iova_t buf_iova;
|
||||
rte_iova_t buf_physaddr; /**< deprecated */
|
||||
} __rte_aligned(sizeof(rte_iova_t));
|
||||
|
||||
/* next 8 bytes are initialised on RX descriptor rearm */
|
||||
MARKER64 rearm_data;
|
||||
@ -631,7 +635,7 @@ static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
|
||||
static inline phys_addr_t
|
||||
rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
|
||||
{
|
||||
return mb->buf_physaddr + mb->data_off;
|
||||
return mb->buf_iova + mb->data_off;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -649,7 +653,7 @@ rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
|
||||
static inline phys_addr_t
|
||||
rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
|
||||
{
|
||||
return mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
|
||||
return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -840,7 +844,7 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
|
||||
* For standard needs, prefer rte_pktmbuf_alloc().
|
||||
*
|
||||
* The caller can expect that the following fields of the mbuf structure
|
||||
* are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1,
|
||||
* are initialized: buf_addr, buf_iova, buf_len, refcnt=1, nb_segs=1,
|
||||
* next=NULL, pool, priv_size. The other fields must be initialized
|
||||
* by the caller.
|
||||
*
|
||||
@ -1250,7 +1254,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
|
||||
|
||||
rte_mbuf_refcnt_update(md, 1);
|
||||
mi->priv_size = m->priv_size;
|
||||
mi->buf_physaddr = m->buf_physaddr;
|
||||
mi->buf_iova = m->buf_iova;
|
||||
mi->buf_addr = m->buf_addr;
|
||||
mi->buf_len = m->buf_len;
|
||||
|
||||
@ -1298,7 +1302,7 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
|
||||
|
||||
m->priv_size = priv_size;
|
||||
m->buf_addr = (char *)m + mbuf_size;
|
||||
m->buf_physaddr = rte_mempool_virt2iova(m) + mbuf_size;
|
||||
m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
|
||||
m->buf_len = (uint16_t)buf_len;
|
||||
rte_pktmbuf_reset_headroom(m);
|
||||
m->data_len = 0;
|
||||
@ -1569,7 +1573,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
|
||||
* The offset into the data to calculate address from.
|
||||
*/
|
||||
#define rte_pktmbuf_mtophys_offset(m, o) \
|
||||
(phys_addr_t)((m)->buf_physaddr + (m)->data_off + (o))
|
||||
(rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
|
||||
|
||||
/**
|
||||
* A macro that returns the physical address that points to the start of the
|
||||
|
@ -978,7 +978,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
|
||||
cur->data_len = cpy_len;
|
||||
cur->data_off = 0;
|
||||
cur->buf_addr = (void *)(uintptr_t)desc_addr;
|
||||
cur->buf_physaddr = hpa;
|
||||
cur->buf_iova = hpa;
|
||||
|
||||
/*
|
||||
* In zero copy mode, one mbuf can only reference data
|
||||
|
@ -906,7 +906,7 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
|
||||
}
|
||||
|
||||
badbuf = *buf;
|
||||
badbuf.buf_physaddr = 0;
|
||||
badbuf.buf_iova = 0;
|
||||
if (verify_mbuf_check_panics(&badbuf)) {
|
||||
printf("Error with bad-physaddr mbuf test\n");
|
||||
return -1;
|
||||
|
Loading…
Reference in New Issue
Block a user