mem: remove physical address aliases

Remove the deprecated unioned fields phys_addr
from the structures rte_memseg and rte_memzone.
They are replaced with the fields iova which are at the same offsets.

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Andrew Rybchenko <arybchenko@solarflare.com>
Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
This commit is contained in:
Thomas Monjalon 2020-09-14 16:36:31 +02:00
parent c23432a9c2
commit 72f82c4324
13 changed files with 24 additions and 28 deletions
doc/guides/rel_notes
drivers
lib
librte_eal/include
librte_kni

@ -87,6 +87,10 @@ API Changes
* eal: The ``rte_logs`` struct and global symbol was made private
and is no longer part of the API.
* mem: Removed the unioned field ``phys_addr`` from
the structures ``rte_memseg`` and ``rte_memzone``.
The field ``iova`` is remaining from the old unions.
* mbuf: Removed the unioned field ``refcnt_atomic`` from
the structures ``rte_mbuf`` and ``rte_mbuf_ext_shared_info``.
The field ``refcnt`` is remaining from the old unions.

@ -368,7 +368,7 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
if (memseg)
return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr);
return (size_t)NULL;
}

@ -440,7 +440,7 @@ dpaax_memevent_walk_memsegs(const struct rte_memseg_list *msl __rte_unused,
void *arg __rte_unused)
{
DPAAX_DEBUG("Walking for %p (pa=%"PRIu64") and len %zu",
ms->addr, ms->phys_addr, len);
ms->addr, ms->iova, len);
dpaax_iova_table_update(rte_mem_virt2phy(ms->addr), ms->addr, len);
return 0;
}

@ -957,7 +957,7 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
ICP_QAT_FW_SLICE_XLAT);
comp_req->u1.xlt_pars.inter_buff_ptr =
interm_buff_mz->phys_addr;
interm_buff_mz->iova;
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG

@ -242,7 +242,7 @@ qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
}
mz_start = (uint8_t *)memzone->addr;
mz_start_phys = memzone->phys_addr;
mz_start_phys = memzone->iova;
QAT_LOG(DEBUG, "Memzone %s: addr = %p, phys = 0x%"PRIx64
", size required %d, size created %zu",
inter_buff_mz_name, mz_start, mz_start_phys,

@ -546,7 +546,7 @@ ccp_add_device(struct ccp_device *dev, int type)
cmd_q->qsize, SOCKET_ID_ANY);
cmd_q->qbase_addr = (void *)q_mz->addr;
cmd_q->qbase_desc = (void *)q_mz->addr;
cmd_q->qbase_phys_addr = q_mz->phys_addr;
cmd_q->qbase_phys_addr = q_mz->iova;
cmd_q->qcontrol = 0;
/* init control reg to zero */

@ -556,7 +556,7 @@ otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
}
mem = rz->addr;
dma_addr = rz->phys_addr;
dma_addr = rz->iova;
alloc_len = len;
memset(mem, 0, len);

@ -411,7 +411,7 @@ virtio_crypto_queue_setup(struct rte_cryptodev *dev,
* and only accepts 32 bit page frame number.
* Check if the allocated physical memory exceeds 16TB.
*/
if ((mz->phys_addr + vq->vq_ring_size - 1)
if ((mz->iova + vq->vq_ring_size - 1)
>> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
"above 16TB!");
@ -420,10 +420,10 @@ virtio_crypto_queue_setup(struct rte_cryptodev *dev,
memset(mz->addr, 0, sizeof(mz->len));
vq->mz = mz;
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_mem = mz->iova;
vq->vq_ring_virt_mem = mz->addr;
VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
(uint64_t)mz->phys_addr);
(uint64_t)mz->iova);
VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
(uint64_t)(uintptr_t)mz->addr);

@ -258,7 +258,7 @@ ice_alloc_dma_mem(__rte_unused struct ice_hw *hw,
mem->size = size;
mem->va = mz->addr;
mem->pa = mz->phys_addr;
mem->pa = mz->iova;
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
"%"PRIu64, mz->name, mem->pa);

@ -414,7 +414,7 @@ static inline phys_addr_t pfe_mem_vtop(uint64_t vaddr)
memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
if (memseg)
return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr);
return (size_t)NULL;
}

@ -43,11 +43,7 @@ extern "C" {
#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)
/**< Prevent this segment from being freed back to the OS. */
struct rte_memseg {
RTE_STD_C11
union {
phys_addr_t phys_addr; /**< deprecated - Start physical address. */
rte_iova_t iova; /**< Start IO address. */
};
rte_iova_t iova; /**< Start IO address. */
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */

@ -51,11 +51,7 @@ struct rte_memzone {
#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */
RTE_STD_C11
union {
phys_addr_t phys_addr; /**< deprecated - Start physical address. */
rte_iova_t iova; /**< Start IO address. */
};
rte_iova_t iova; /**< Start IO address. */
RTE_STD_C11
union {
void *addr; /**< Start virtual address. */

@ -276,37 +276,37 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
/* TX RING */
kni->tx_q = kni->m_tx_q->addr;
kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
dev_info.tx_phys = kni->m_tx_q->phys_addr;
dev_info.tx_phys = kni->m_tx_q->iova;
/* RX RING */
kni->rx_q = kni->m_rx_q->addr;
kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
dev_info.rx_phys = kni->m_rx_q->phys_addr;
dev_info.rx_phys = kni->m_rx_q->iova;
/* ALLOC RING */
kni->alloc_q = kni->m_alloc_q->addr;
kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
dev_info.alloc_phys = kni->m_alloc_q->phys_addr;
dev_info.alloc_phys = kni->m_alloc_q->iova;
/* FREE RING */
kni->free_q = kni->m_free_q->addr;
kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
dev_info.free_phys = kni->m_free_q->phys_addr;
dev_info.free_phys = kni->m_free_q->iova;
/* Request RING */
kni->req_q = kni->m_req_q->addr;
kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
dev_info.req_phys = kni->m_req_q->phys_addr;
dev_info.req_phys = kni->m_req_q->iova;
/* Response RING */
kni->resp_q = kni->m_resp_q->addr;
kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
dev_info.resp_phys = kni->m_resp_q->phys_addr;
dev_info.resp_phys = kni->m_resp_q->iova;
/* Req/Resp sync mem area */
kni->sync_addr = kni->m_sync_addr->addr;
dev_info.sync_va = kni->m_sync_addr->addr;
dev_info.sync_phys = kni->m_sync_addr->phys_addr;
dev_info.sync_phys = kni->m_sync_addr->iova;
kni->pktmbuf_pool = pktmbuf_pool;
kni->group_id = conf->group_id;