net/bnxt: enable RSS for thor-based controllers

Make changes needed to support rss for thor-based controllers.

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Lance Richardson 2019-06-02 13:42:47 -04:00 committed by Ferruh Yigit
parent e2a8184f48
commit 38412304b5
5 changed files with 236 additions and 47 deletions

View File

@ -171,6 +171,24 @@ static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
* High level utility functions
*/
static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
{
if (!BNXT_CHIP_THOR(bp))
return 1;
return RTE_ALIGN_MUL_CEIL(bp->rx_nr_rings,
BNXT_RSS_ENTRIES_PER_CTX_THOR) /
BNXT_RSS_ENTRIES_PER_CTX_THOR;
}
static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
{
if (!BNXT_CHIP_THOR(bp))
return HW_HASH_INDEX_SIZE;
return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_THOR;
}
static void bnxt_free_mem(struct bnxt *bp)
{
bnxt_free_filter_mem(bp);
@ -290,13 +308,21 @@ static int bnxt_init_chip(struct bnxt *bp)
/* Alloc RSS context only if RSS mode is enabled */
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
int j, nr_ctxs = bnxt_rss_ctxts(bp);
rc = 0;
for (j = 0; j < nr_ctxs; j++) {
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, j);
if (rc)
break;
}
if (rc) {
PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
"HWRM vnic %d ctx %d alloc failure rc: %x\n",
i, j, rc);
goto err_out;
}
vnic->num_lb_ctxts = nr_ctxs;
}
/*
@ -470,7 +496,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = HW_HASH_INDEX_SIZE;
dev_info->reta_size = bnxt_rss_hash_tbl_size(bp);
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
@ -1004,12 +1030,21 @@ static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid)
/* Return rxq corresponding to a given rss table ring/group ID. */
static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr)
{
struct bnxt_rx_queue *rxq;
unsigned int i;
if (!BNXT_HAS_RING_GRPS(bp)) {
for (i = 0; i < bp->rx_nr_rings; i++) {
rxq = bp->eth_dev->data->rx_queues[i];
if (rxq->rx_ring->rx_ring_struct->fw_ring_id == fwr)
return rxq->index;
}
} else {
for (i = 0; i < bp->rx_nr_rings; i++) {
if (bp->grp_info[i].fw_grp_id == fwr)
return i;
}
}
return INVALID_HW_RING_ID;
}
@ -1021,7 +1056,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
uint16_t tbl_size = HW_HASH_INDEX_SIZE;
uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft;
int i;
@ -1053,6 +1088,16 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
if (BNXT_CHIP_THOR(bp)) {
vnic->rss_table[i * 2] =
rxq->rx_ring->rx_ring_struct->fw_ring_id;
vnic->rss_table[i * 2 + 1] =
rxq->cp_ring->cp_ring_struct->fw_ring_id;
} else {
vnic->rss_table[i] =
vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
vnic->rss_table[i] =
vnic->fw_grp_ids[reta_conf[idx].reta[sft]];
}
@ -1067,7 +1112,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
uint16_t tbl_size = HW_HASH_INDEX_SIZE;
uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft, i;
/* Retrieve from the default VNIC */
@ -1090,6 +1135,10 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
if (BNXT_CHIP_THOR(bp))
qid = bnxt_rss_to_qid(bp,
vnic->rss_table[i * 2]);
else
qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]);
if (qid == INVALID_HW_RING_ID) {

View File

@ -1638,9 +1638,11 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
return rc;
}
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
{
int rc = 0;
uint16_t ctx_id;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
@ -1648,38 +1650,40 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
if (!BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[ctx_idx] = ctx_id;
else if (ctx_idx == 0)
vnic->rss_rule = ctx_id;
HWRM_UNLOCK();
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
{
int rc = 0;
struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
if (vnic->rss_rule == (uint16_t)HWRM_NA_SIGNATURE) {
if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
return rc;
}
@ -1711,6 +1715,47 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
static int
bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int i;
int rc = 0;
int nr_ctxs = bp->max_ring_grps;
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
if (!(vnic->rss_table && vnic->hash_type))
return 0;
HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
req.hash_mode_flags = vnic->hash_mode;
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
for (i = 0; i < nr_ctxs; i++) {
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr +
i * HW_HASH_INDEX_SIZE);
req.ring_table_pair_index = i;
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (rc)
break;
}
HWRM_UNLOCK();
return rc;
}
int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
@ -1718,6 +1763,9 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_CHIP_THOR(bp))
return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
@ -2247,7 +2295,7 @@ void bnxt_free_tunnel_ports(struct bnxt *bp)
void bnxt_free_all_hwrm_resources(struct bnxt *bp)
{
int i;
int i, j;
if (bp->vnic_info == NULL)
return;
@ -2263,7 +2311,16 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
if (!BNXT_CHIP_THOR(bp)) {
for (j = 0; j < vnic->num_lb_ctxts; j++) {
bnxt_hwrm_vnic_ctx_free(bp, vnic,
vnic->fw_grp_ids[j]);
vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
}
} else {
bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
vnic->rss_rule = INVALID_HW_RING_ID;
}
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
@ -4037,11 +4094,88 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
return 0;
}
static int
bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
int nr_ctxs = bp->max_ring_grps;
struct bnxt_rx_queue **rxqs = bp->rx_queues;
uint16_t *ring_tbl = vnic->rss_table;
int max_rings = bp->rx_nr_rings;
int i, j, k, cnt;
int rc = 0;
HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
req.hash_mode_flags = vnic->hash_mode;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr);
req.hash_key_tbl_addr =
rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
for (i = 0, k = 0; i < nr_ctxs; i++) {
struct bnxt_rx_ring_info *rxr;
struct bnxt_cp_ring_info *cpr;
req.ring_table_pair_index = i;
req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
for (j = 0; j < 64; j++) {
uint16_t ring_id;
/* Find next active ring. */
for (cnt = 0; cnt < max_rings; cnt++) {
if (rx_queue_state[k] !=
RTE_ETH_QUEUE_STATE_STOPPED)
break;
if (++k == max_rings)
k = 0;
}
/* Return if no rings are active. */
if (cnt == max_rings)
return 0;
/* Add rx/cp ring pair to RSS table. */
rxr = rxqs[k]->rx_ring;
cpr = rxqs[k]->cp_ring;
ring_id = rxr->rx_ring_struct->fw_ring_id;
*ring_tbl++ = rte_cpu_to_le_16(ring_id);
ring_id = cpr->cp_ring_struct->fw_ring_id;
*ring_tbl++ = rte_cpu_to_le_16(ring_id);
if (++k == max_rings)
k = 0;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
BNXT_USE_CHIMP_MB);
HWRM_CHECK_RESULT();
if (rc)
break;
}
HWRM_UNLOCK();
return rc;
}
int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
unsigned int rss_idx, fw_idx, i;
if (vnic->rss_table && vnic->hash_type) {
if (!(vnic->rss_table && vnic->hash_type))
return 0;
if (BNXT_CHIP_THOR(bp))
return bnxt_vnic_rss_configure_thor(bp, vnic);
/*
* Fill the RSS hash & redirection table with
* ring group ids for all VNICs
@ -4050,19 +4184,15 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
rss_idx++, fw_idx++) {
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
fw_idx %= bp->rx_cp_nr_rings;
if (vnic->fw_grp_ids[fw_idx] !=
INVALID_HW_RING_ID)
if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID)
break;
fw_idx++;
}
if (i == bp->rx_cp_nr_rings)
return 0;
vnic->rss_table[rss_idx] =
vnic->fw_grp_ids[fw_idx];
vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
}
return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
}
return 0;
}
static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,

View File

@ -101,8 +101,10 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
int16_t fw_vf_id);
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
uint16_t ctx_idx);
int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic,
uint16_t ctx_idx);
int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);

View File

@ -113,14 +113,21 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz;
char mz_name[RTE_MEMZONE_NAMESIZE];
uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
HW_HASH_KEY_SIZE +
BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN);
uint32_t entry_length;
uint16_t max_vnics;
int i;
rte_iova_t mz_phys_addr;
entry_length = HW_HASH_KEY_SIZE +
BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN;
if (BNXT_CHIP_THOR(bp))
entry_length += BNXT_RSS_TBL_SIZE_THOR *
2 * sizeof(*vnic->rss_table);
else
entry_length += HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
entry_length = RTE_CACHE_LINE_ROUNDUP(entry_length);
max_vnics = bp->max_vnics;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,

View File

@ -18,6 +18,7 @@ struct bnxt_vnic_info {
uint16_t start_grp_id;
uint16_t end_grp_id;
uint16_t *fw_grp_ids;
uint16_t num_lb_ctxts;
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;