net/thunderx: cleanup

Refactored features:
 - enable nicvf_qset_rbdr_precharge to handle secondary queue sets
 - rte_free already handles NULL pointer
 - check mempool flags to predict being contiguous in memory
 - prohibit to use mempool with multiple memory chunks
 - simplify local construct of accessing nb_rx_queues
 - enable NICVF_CAP_CQE_RX2 on CN88XX PASS2.0 hardware.
 - remove redundant check for RSS size in nicvf_eth_dev_init

Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
Signed-off-by: Kamil Rytarowski <kamil.rytarowski@caviumnetworks.com>
Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
This commit is contained in:
Kamil Rytarowski 2016-09-30 14:05:40 +02:00 committed by Bruce Richardson
parent 9966a00a06
commit 394014bca3
3 changed files with 26 additions and 27 deletions

View File

@ -141,7 +141,7 @@ nicvf_base_init(struct nicvf *nic)
return NICVF_ERR_BASE_INIT;
if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
@ -497,9 +497,9 @@ nicvf_qsize_rbdr_roundup(uint32_t val)
}
int
nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
rbdr_pool_get_handler handler,
void *opaque, uint32_t max_buffs)
nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
uint16_t ridx, rbdr_pool_get_handler handler,
uint32_t max_buffs)
{
struct rbdr_entry_t *desc, *desc0;
struct nicvf_rbdr *rbdr = nic->rbdr;
@ -514,7 +514,7 @@ nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
if (count >= max_buffs)
break;
desc0 = desc + count;
phy = handler(opaque);
phy = handler(dev, nic);
if (phy) {
desc0->full_addr = phy;
count++;

View File

@ -85,7 +85,7 @@ enum nicvf_err_e {
NICVF_ERR_RSS_GET_SZ, /* -8171 */
};
typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
struct nicvf_hw_rx_qstats {
uint64_t q_rx_bytes;
@ -194,8 +194,8 @@ int nicvf_qset_reclaim(struct nicvf *nic);
int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
rbdr_pool_get_handler handler, void *opaque,
int nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
uint16_t ridx, rbdr_pool_get_handler handler,
uint32_t max_buffs);
int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);

View File

@ -691,7 +691,7 @@ nicvf_configure_cpi(struct rte_eth_dev *dev)
int ret;
/* Count started rx queues */
for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
if (dev->data->rx_queue_state[qidx] ==
RTE_ETH_QUEUE_STATE_STARTED)
qcnt++;
@ -1023,12 +1023,9 @@ nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
static void
nicvf_dev_rx_queue_release(void *rx_queue)
{
struct nicvf_rxq *rxq = rx_queue;
PMD_INIT_FUNC_TRACE();
if (rxq)
rte_free(rxq);
rte_free(rx_queue);
}
static int
@ -1070,6 +1067,7 @@ nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
return nicvf_stop_tx_queue(dev, qidx);
}
static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
@ -1087,9 +1085,15 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
/* Mempool memory should be contiguous */
/* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
return -EINVAL;
}
/* Mempool memory must be physically contiguous */
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
@ -1212,15 +1216,16 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static nicvf_phys_addr_t
rbdr_rte_mempool_get(void *opaque)
rbdr_rte_mempool_get(void *dev, void *opaque)
{
uint16_t qidx;
uintptr_t mbuf;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque);
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
struct nicvf *nic __rte_unused = (struct nicvf *)opaque;
for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
rxq = nic->eth_dev->data->rx_queues[qidx];
for (qidx = 0; qidx < eth_dev->data->nb_rx_queues; qidx++) {
rxq = eth_dev->data->rx_queues[qidx];
/* Maintain equal buffer count across all pools */
if (rxq->precharge_cnt >= rxq->qlen_mask)
continue;
@ -1354,8 +1359,8 @@ nicvf_dev_start(struct rte_eth_dev *dev)
}
/* Fill rte_mempool buffers in RBDR pool and precharge it */
ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get,
dev, total_rxq_desc);
ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
total_rxq_desc);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret);
goto qset_rbdr_reclaim;
@ -1721,12 +1726,6 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto malloc_fail;
}
ret = nicvf_mbox_get_rss_size(nic);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to get rss table size");
goto malloc_fail;
}
PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, nic->vendor_id, nic->device_id,
nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],