mem: rename address mapping function to IOVA

The function rte_mem_virt2phy() is kept and used in functions which
works only with physical addresses.
For all other calls this function is replaced by rte_mem_virt2iova()
which does a direct mapping (no conversion) in the VA case.

Note: the new function rte_mem_virt2iova() function matches the
behaviour implemented in rte_mem_virt2phy() by the commit
680f6c1260 ("mem: honor IOVA mode in virt2phy")

Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
This commit is contained in:
Thomas Monjalon 2017-11-04 17:15:04 +01:00
parent 7ba49d39f1
commit 62196f4e09
15 changed files with 52 additions and 31 deletions

View File

@ -1351,7 +1351,7 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
memset(&mcc->initfq.fqd.context_a, 0, memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a)); sizeof(mcc->initfq.fqd.context_a));
} else { } else {
phys_fq = rte_mem_virt2phy(fq); phys_fq = rte_mem_virt2iova(fq);
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
} }
} }

View File

@ -240,7 +240,7 @@ struct qm_portal {
#define EQCR_CARRYCLEAR(p) \ #define EQCR_CARRYCLEAR(p) \
(void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6))) (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
extern dma_addr_t rte_mem_virt2phy(const void *addr); extern dma_addr_t rte_mem_virt2iova(const void *addr);
/* Bit-wise logic to convert a ring pointer to a ring index */ /* Bit-wise logic to convert a ring pointer to a ring index */
static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e) static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)

View File

@ -2852,8 +2852,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n"); "Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Using rte_mem_virt2phy()\n"); "Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2phy(mz->addr); mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) { if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n"); "unable to map address to physical memory\n");
@ -2887,8 +2887,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n"); "Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Using rte_mem_virt2phy()\n"); "Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2phy(mz->addr); mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) { if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n"); "unable to map address to physical memory\n");

View File

@ -277,7 +277,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
req.vlan_tag_tbl_addr = rte_cpu_to_le_64( req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
rte_mem_virt2phy(vlan_table)); rte_mem_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
} }
req.mask = rte_cpu_to_le_32(mask); req.mask = rte_cpu_to_le_32(mask);
@ -318,7 +318,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
req.fid = rte_cpu_to_le_16(fid); req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr = req.vlan_tag_mask_tbl_addr =
rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table)); rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@ -644,7 +644,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
} }
rte_mem_lock_page(bp->hwrm_cmd_resp_addr); rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr = bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) { if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"Unable to map response buffer to physical memory.\n"); "Unable to map response buffer to physical memory.\n");
@ -670,7 +670,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
} }
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr = bp->hwrm_short_cmd_req_dma_addr =
rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr); rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) { if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr); rte_free(bp->hwrm_short_cmd_req_addr);
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
@ -1753,7 +1753,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
if (bp->hwrm_cmd_resp_addr == NULL) if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM; return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr = bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) { if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n"); "unable to map response address to physical memory\n");
@ -2622,7 +2622,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr[0] = req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf)); rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) { if (req.req_buf_page_addr[0] == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map buffer address to physical memory\n"); "unable to map buffer address to physical memory\n");
@ -3044,7 +3044,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
rte_mem_lock_page(buf); rte_mem_lock_page(buf);
if (buf == NULL) if (buf == NULL)
return -ENOMEM; return -ENOMEM;
dma_handle = rte_mem_virt2phy(buf); dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) { if (dma_handle == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n"); "unable to map response address to physical memory\n");
@ -3080,7 +3080,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
dma_handle = rte_mem_virt2phy(buf); dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) { if (dma_handle == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n"); "unable to map response address to physical memory\n");
@ -3141,7 +3141,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
dma_handle = rte_mem_virt2phy(buf); dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) { if (dma_handle == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n"); "unable to map response address to physical memory\n");
@ -3196,7 +3196,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids)); req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == 0) { if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK(); HWRM_UNLOCK();

View File

@ -177,10 +177,10 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n"); "Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Using rte_mem_virt2phy()\n"); "Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize()) for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz); rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2phy(mz->addr); mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) { if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map ring address to physical memory\n"); "unable to map ring address to physical memory\n");

View File

@ -197,8 +197,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n"); "Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD, RTE_LOG(WARNING, PMD,
"Using rte_mem_virt2phy()\n"); "Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2phy(mz->addr); mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) { if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD, RTE_LOG(ERR, PMD,
"unable to map vnic address to physical memory\n"); "unable to map vnic address to physical memory\n");

View File

@ -1790,7 +1790,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
m = m->next; m = m->next;
} }
phyaddr = rte_mem_virt2phy(g->sg); phyaddr = rte_mem_virt2iova(g->sg);
if (phyaddr == RTE_BAD_PHYS_ADDR) { if (phyaddr == RTE_BAD_PHYS_ADDR) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n"); PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed; goto xmit_failed;

View File

@ -1284,7 +1284,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,
__rte_crypto_op_reset(op, type); __rte_crypto_op_reset(op, type);
op->phys_addr = rte_mem_virt2phy(_op_data); op->phys_addr = rte_mem_virt2iova(_op_data);
op->mempool = mempool; op->mempool = mempool;
} }

View File

@ -58,6 +58,11 @@ rte_mem_virt2phy(const void *virtaddr)
(void)virtaddr; (void)virtaddr;
return RTE_BAD_IOVA; return RTE_BAD_IOVA;
} }
rte_iova_t
rte_mem_virt2iova(const void *virtaddr)
{
return rte_mem_virt2phy(virtaddr);
}
int int
rte_eal_hugepage_init(void) rte_eal_hugepage_init(void)

View File

@ -147,6 +147,16 @@ int rte_mem_lock_page(const void *virt);
*/ */
phys_addr_t rte_mem_virt2phy(const void *virt); phys_addr_t rte_mem_virt2phy(const void *virt);
/**
* Get IO virtual address of any mapped virtual address in the current process.
*
* @param virt
* The virtual address.
* @return
* The IO address or RTE_BAD_IOVA on error.
*/
rte_iova_t rte_mem_virt2iova(const void *virt);
/** /**
* Get the layout of the available physical memory. * Get the layout of the available physical memory.
* *

View File

@ -128,9 +128,6 @@ rte_mem_virt2phy(const void *virtaddr)
int page_size; int page_size;
off_t offset; off_t offset;
if (rte_eal_iova_mode() == RTE_IOVA_VA)
return (uintptr_t)virtaddr;
/* Cannot parse /proc/self/pagemap, no need to log errors everywhere */ /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
if (!phys_addrs_available) if (!phys_addrs_available)
return RTE_BAD_IOVA; return RTE_BAD_IOVA;
@ -180,6 +177,14 @@ rte_mem_virt2phy(const void *virtaddr)
return physaddr; return physaddr;
} }
rte_iova_t
rte_mem_virt2iova(const void *virtaddr)
{
if (rte_eal_iova_mode() == RTE_IOVA_VA)
return (uintptr_t)virtaddr;
return rte_mem_virt2phy(virtaddr);
}
/* /*
* For each hugepage in hugepg_tbl, fill the physaddr value. We find * For each hugepage in hugepg_tbl, fill the physaddr value. We find
* it by browsing the /proc/self/pagemap special file. * it by browsing the /proc/self/pagemap special file.

View File

@ -233,6 +233,7 @@ DPDK_17.11 {
rte_eal_using_phys_addrs; rte_eal_using_phys_addrs;
rte_eal_vfio_intr_mode; rte_eal_vfio_intr_mode;
rte_lcore_has_role; rte_lcore_has_role;
rte_mem_virt2iova;
rte_memcpy_ptr; rte_memcpy_ptr;
rte_vfio_enable; rte_vfio_enable;
rte_vfio_is_enabled; rte_vfio_is_enabled;

View File

@ -503,7 +503,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
for (off = 0; off + pg_sz <= len && for (off = 0; off + pg_sz <= len &&
mp->populated_size < mp->size; off += phys_len) { mp->populated_size < mp->size; off += phys_len) {
paddr = rte_mem_virt2phy(addr + off); paddr = rte_mem_virt2iova(addr + off);
if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) { if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
ret = -EINVAL; ret = -EINVAL;
@ -514,7 +514,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) { for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
phys_addr_t paddr_tmp; phys_addr_t paddr_tmp;
paddr_tmp = rte_mem_virt2phy(addr + off + phys_len); paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
if (paddr_tmp != paddr + phys_len) if (paddr_tmp != paddr + phys_len)
break; break;

View File

@ -526,7 +526,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t host_phys_addr; uint64_t host_phys_addr;
uint64_t size; uint64_t size;
host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr); host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
size = page_size - (guest_phys_addr & (page_size - 1)); size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size); size = RTE_MIN(size, reg_size);
@ -537,7 +537,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
while (reg_size > 0) { while (reg_size > 0) {
size = RTE_MIN(reg_size, page_size); size = RTE_MIN(reg_size, page_size);
host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t) host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr); host_user_addr);
add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);

View File

@ -144,9 +144,9 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
MEMPOOL_HEADER_SIZE(mp, mp->cache_size)) MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
GOTO_ERR(ret, out); GOTO_ERR(ret, out);
#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */ #ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
printf("get physical address of an object\n"); printf("get physical address of an object\n");
if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj)) if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))
GOTO_ERR(ret, out); GOTO_ERR(ret, out);
#endif #endif