net/octeon_ep: support CN10K SoC

This patch adds the required functionality in the Octeon endpoint
driver to support the CN10K endpoint device. It adds the CN10K SoC
specific routines to configure, enable, and disable input and output
queues to establish basic data transfers.

Signed-off-by: Sathesh Edara <sedara@marvell.com>
This commit is contained in:
Sathesh Edara 2022-08-29 08:45:22 -07:00 committed by Jerin Jacob
parent 59d0913dae
commit d826133ae8
6 changed files with 622 additions and 57 deletions

View File

@ -0,0 +1,375 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#include <rte_common.h>
#include <rte_cycles.h>
#include "cnxk_ep_vf.h"
static void
cnxk_ep_vf_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
volatile uint64_t reg_val = 0ull;
/* Select ES, RO, NS, RDSIZE,DPTR Format#0 for IQs
* IS_64B is by default enabled.
*/
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(q_no));
reg_val |= CNXK_EP_R_IN_CTL_RDSIZE;
reg_val |= CNXK_EP_R_IN_CTL_IS_64B;
reg_val |= CNXK_EP_R_IN_CTL_ESR;
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(q_no));
}
static void
cnxk_ep_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
{
volatile uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(q_no));
reg_val &= ~(CNXK_EP_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_EP_R_OUT_CTL_ROR_P);
reg_val &= ~(CNXK_EP_R_OUT_CTL_NSR_P);
reg_val &= ~(CNXK_EP_R_OUT_CTL_ROR_I);
reg_val &= ~(CNXK_EP_R_OUT_CTL_NSR_I);
reg_val &= ~(CNXK_EP_R_OUT_CTL_ROR_D);
reg_val &= ~(CNXK_EP_R_OUT_CTL_NSR_D);
reg_val &= ~(CNXK_EP_R_OUT_CTL_ES_I | CNXK_EP_R_OUT_CTL_ES_D);
/* INFO/DATA ptr swap is required */
reg_val |= (CNXK_EP_R_OUT_CTL_ES_P);
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(q_no));
}
static void
cnxk_ep_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_iq_reg(otx_ep, q_no);
}
static void
cnxk_ep_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_oq_reg(otx_ep, q_no);
}
static void
cnxk_ep_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
cnxk_ep_vf_setup_global_input_regs(otx_ep);
cnxk_ep_vf_setup_global_output_regs(otx_ep);
}
static void
cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
/* Wait till IDLE to set to 1, not supposed to configure BADDR
* as long as IDLE is 0
*/
if (!(reg_val & CNXK_EP_R_IN_CTL_IDLE)) {
do {
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
rte_delay_ms(1);
} while ((!(reg_val & CNXK_EP_R_IN_CTL_IDLE)) && loop--);
}
if (!loop) {
otx_ep_err("IDLE bit is not set\n");
return;
}
/* Write the start of the input queue's ring and its size */
oct_ep_write64(iq->base_addr_dma, otx_ep->hw_addr + CNXK_EP_R_IN_INSTR_BADDR(iq_no));
oct_ep_write64(iq->nb_desc, otx_ep->hw_addr + CNXK_EP_R_IN_INSTR_RSIZE(iq_no));
/* Remember the doorbell & instruction count register addr
* for this queue
*/
iq->doorbell_reg = (uint8_t *)otx_ep->hw_addr + CNXK_EP_R_IN_INSTR_DBELL(iq_no);
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr + CNXK_EP_R_IN_CNTS(iq_no);
otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
loop = OTX_EP_BUSY_LOOP_COUNT;
do {
reg_val = rte_read32(iq->inst_cnt_reg);
rte_write32(reg_val, iq->inst_cnt_reg);
rte_delay_ms(1);
} while (reg_val != 0 && loop--);
if (!loop) {
otx_ep_err("INST CNT REGISTER is not zero\n");
return;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
* to raise
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
}
static void
cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
* as long as IDLE is 0
*/
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(oq_no));
while ((!(reg_val & CNXK_EP_R_OUT_CTL_IDLE)) && loop--) {
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(oq_no));
rte_delay_ms(1);
}
if (!loop) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
return;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + CNXK_EP_R_OUT_SLIST_BADDR(oq_no));
oct_ep_write64(droq->nb_desc, otx_ep->hw_addr + CNXK_EP_R_OUT_SLIST_RSIZE(oq_no));
oq_ctl = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(oq_no));
/* Clear the ISIZE and BSIZE (22-0) */
oq_ctl &= ~(OTX_EP_CLEAR_ISIZE_BSIZE);
/* Populate the BSIZE (15-0) */
oq_ctl |= (droq->buffer_size & OTX_EP_DROQ_BUFSZ_MASK);
oct_ep_write64(oq_ctl, otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(oq_no));
/* Mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg = (uint8_t *)otx_ep->hw_addr + CNXK_EP_R_OUT_CNTS(oq_no);
droq->pkts_credit_reg = (uint8_t *)otx_ep->hw_addr + CNXK_EP_R_OUT_SLIST_DBELL(oq_no);
rte_write64(OTX_EP_CLEAR_OUT_INT_LVLS, otx_ep->hw_addr + CNXK_EP_R_OUT_INT_LEVELS(oq_no));
/* Clear PKT_CNT register */
rte_write64(OTX_EP_CLEAR_SDP_OUT_PKT_CNT, (uint8_t *)otx_ep->hw_addr +
CNXK_EP_R_OUT_PKT_CNT(oq_no));
/* Clear the OQ doorbell */
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
loop = OTX_EP_BUSY_LOOP_COUNT;
while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
rte_delay_ms(1);
}
if (!loop) {
otx_ep_err("Packets credit register value is not cleared\n");
return;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
/* Clear the OQ_OUT_CNTS doorbell */
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
loop = OTX_EP_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
if (!loop) {
otx_ep_err("Packets sent register value is not cleared\n");
return;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
}
static int
cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
*/
oct_ep_write64(0xFFFFFFFF, otx_ep->hw_addr + CNXK_EP_R_IN_INSTR_DBELL(q_no));
while (((oct_ep_read64(otx_ep->hw_addr +
CNXK_EP_R_IN_INSTR_DBELL(q_no))) != 0ull) && loop--) {
rte_delay_ms(1);
}
if (!loop) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_ENABLE(q_no));
reg_val |= 0x1ull;
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_IN_ENABLE(q_no));
otx_ep_info("IQ[%d] enable done", q_no);
return 0;
}
static int
cnxk_ep_vf_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_OUT_ENABLE(q_no));
reg_val |= 0x1ull;
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_OUT_ENABLE(q_no));
otx_ep_info("OQ[%d] enable done", q_no);
return 0;
}
static int
cnxk_ep_vf_enable_io_queues(struct otx_ep_device *otx_ep)
{
uint32_t q_no = 0;
int ret;
for (q_no = 0; q_no < otx_ep->nb_tx_queues; q_no++) {
ret = cnxk_ep_vf_enable_iq(otx_ep, q_no);
if (ret)
return ret;
}
for (q_no = 0; q_no < otx_ep->nb_rx_queues; q_no++)
cnxk_ep_vf_enable_oq(otx_ep, q_no);
return 0;
}
static void
cnxk_ep_vf_disable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
uint64_t reg_val = 0ull;
/* Reset the doorbell register for this Input Queue. */
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_ENABLE(q_no));
reg_val &= ~0x1ull;
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_IN_ENABLE(q_no));
}
static void
cnxk_ep_vf_disable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
volatile uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_OUT_ENABLE(q_no));
reg_val &= ~0x1ull;
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_OUT_ENABLE(q_no));
}
static void
cnxk_ep_vf_disable_io_queues(struct otx_ep_device *otx_ep)
{
uint32_t q_no = 0;
for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
cnxk_ep_vf_disable_iq(otx_ep, q_no);
cnxk_ep_vf_disable_oq(otx_ep, q_no);
}
}
static const struct otx_ep_config default_cnxk_ep_conf = {
/* IQ attributes */
.iq = {
.max_iqs = OTX_EP_CFG_IO_QUEUES,
.instr_type = OTX_EP_64BYTE_INSTR,
.pending_list_size = (OTX_EP_MAX_IQ_DESCRIPTORS *
OTX_EP_CFG_IO_QUEUES),
},
/* OQ attributes */
.oq = {
.max_oqs = OTX_EP_CFG_IO_QUEUES,
.info_ptr = OTX_EP_OQ_INFOPTR_MODE,
.refill_threshold = OTX_EP_OQ_REFIL_THRESHOLD,
},
.num_iqdef_descs = OTX_EP_MAX_IQ_DESCRIPTORS,
.num_oqdef_descs = OTX_EP_MAX_OQ_DESCRIPTORS,
.oqdef_buf_size = OTX_EP_OQ_BUF_SIZE,
};
static const struct otx_ep_config*
cnxk_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
{
const struct otx_ep_config *default_conf = NULL;
default_conf = &default_cnxk_ep_conf;
return default_conf;
}
int
cnxk_ep_vf_setup_device(struct otx_ep_device *otx_ep)
{
uint64_t reg_val = 0ull;
/* If application does not provide its conf, use driver default conf */
if (otx_ep->conf == NULL) {
otx_ep->conf = cnxk_ep_get_defconf(otx_ep);
if (otx_ep->conf == NULL) {
otx_ep_err("SDP VF default config not found");
return -ENOENT;
}
otx_ep_info("Default config is used");
}
/* Get IOQs (RPVF] count */
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(0));
otx_ep->sriov_info.rings_per_vf =
((reg_val >> CNXK_EP_R_IN_CTL_RPVF_POS) & CNXK_EP_R_IN_CTL_RPVF_MASK);
otx_ep_info("SDP RPVF: %d", otx_ep->sriov_info.rings_per_vf);
otx_ep->fn_list.setup_iq_regs = cnxk_ep_vf_setup_iq_regs;
otx_ep->fn_list.setup_oq_regs = cnxk_ep_vf_setup_oq_regs;
otx_ep->fn_list.setup_device_regs = cnxk_ep_vf_setup_device_regs;
otx_ep->fn_list.enable_io_queues = cnxk_ep_vf_enable_io_queues;
otx_ep->fn_list.disable_io_queues = cnxk_ep_vf_disable_io_queues;
otx_ep->fn_list.enable_iq = cnxk_ep_vf_enable_iq;
otx_ep->fn_list.disable_iq = cnxk_ep_vf_disable_iq;
otx_ep->fn_list.enable_oq = cnxk_ep_vf_enable_oq;
otx_ep->fn_list.disable_oq = cnxk_ep_vf_disable_oq;
return 0;
}

View File

@ -0,0 +1,161 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2022 Marvell.
*/
#ifndef _CNXK_EP_VF_H_
#define _CNXK_EP_VF_H_
#include <rte_io.h>
#include "otx_ep_common.h"
#define CNXK_CONFIG_XPANSION_BAR 0x38
#define CNXK_CONFIG_PCIE_CAP 0x70
#define CNXK_CONFIG_PCIE_DEVCAP 0x74
#define CNXK_CONFIG_PCIE_DEVCTL 0x78
#define CNXK_CONFIG_PCIE_LINKCAP 0x7C
#define CNXK_CONFIG_PCIE_LINKCTL 0x80
#define CNXK_CONFIG_PCIE_SLOTCAP 0x84
#define CNXK_CONFIG_PCIE_SLOTCTL 0x88
#define CNXK_CONFIG_PCIE_FLTMSK 0x720
#define CNXK_EP_RING_OFFSET (0x1ULL << 17)
#define CNXK_EP_R_IN_CONTROL_START 0x10000
#define CNXK_EP_R_IN_ENABLE_START 0x10010
#define CNXK_EP_R_IN_INSTR_BADDR_START 0x10020
#define CNXK_EP_R_IN_INSTR_RSIZE_START 0x10030
#define CNXK_EP_R_IN_INSTR_DBELL_START 0x10040
#define CNXK_EP_R_IN_CNTS_START 0x10050
#define CNXK_EP_R_IN_INT_LEVELS_START 0x10060
#define CNXK_EP_R_IN_PKT_CNT_START 0x10080
#define CNXK_EP_R_IN_BYTE_CNT_START 0x10090
#define CNXK_EP_R_IN_CONTROL(ring) \
(CNXK_EP_R_IN_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_ENABLE(ring) \
(CNXK_EP_R_IN_ENABLE_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_INSTR_BADDR(ring) \
(CNXK_EP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_INSTR_RSIZE(ring) \
(CNXK_EP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_INSTR_DBELL(ring) \
(CNXK_EP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_CNTS(ring) \
(CNXK_EP_R_IN_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_INT_LEVELS(ring) \
(CNXK_EP_R_IN_INT_LEVELS_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_PKT_CNT(ring) \
(CNXK_EP_R_IN_PKT_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_IN_BYTE_CNT(ring) \
(CNXK_EP_R_IN_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
/** Rings per Virtual Function **/
#define CNXK_EP_R_IN_CTL_RPVF_MASK (0xF)
#define CNXK_EP_R_IN_CTL_RPVF_POS (48)
/* Number of instructions to be read in one MAC read request.
* setting to Max value(4)
*/
#define CNXK_EP_R_IN_CTL_IDLE (0x1ULL << 28)
#define CNXK_EP_R_IN_CTL_RDSIZE (0x3ULL << 25)
#define CNXK_EP_R_IN_CTL_IS_64B (0x1ULL << 24)
#define CNXK_EP_R_IN_CTL_D_NSR (0x1ULL << 8)
#define CNXK_EP_R_IN_CTL_D_ROR (0x1ULL << 5)
#define CNXK_EP_R_IN_CTL_NSR (0x1ULL << 3)
#define CNXK_EP_R_IN_CTL_ROR (0x1ULL << 0)
#define CNXK_EP_R_IN_CTL_ESR (0x1ull << 1)
#define CNXK_EP_R_IN_CTL_MASK \
(CNXK_EP_R_IN_CTL_RDSIZE \
| CNXK_EP_R_IN_CTL_IS_64B)
#define CNXK_EP_R_OUT_CNTS_START 0x10100
#define CNXK_EP_R_OUT_INT_LEVELS_START 0x10110
#define CNXK_EP_R_OUT_SLIST_BADDR_START 0x10120
#define CNXK_EP_R_OUT_SLIST_RSIZE_START 0x10130
#define CNXK_EP_R_OUT_SLIST_DBELL_START 0x10140
#define CNXK_EP_R_OUT_CONTROL_START 0x10150
/* WMARK need to be set; New in CN10K */
#define CNXK_EP_R_OUT_WMARK_START 0x10160
#define CNXK_EP_R_OUT_ENABLE_START 0x10170
#define CNXK_EP_R_OUT_PKT_CNT_START 0x10180
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_INT_LEVELS(ring) \
(CNXK_EP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_SLIST_BADDR(ring) \
(CNXK_EP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_SLIST_RSIZE(ring) \
(CNXK_EP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_SLIST_DBELL(ring) \
(CNXK_EP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_CONTROL(ring) \
(CNXK_EP_R_OUT_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_ENABLE(ring) \
(CNXK_EP_R_OUT_ENABLE_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_WMARK(ring) \
(CNXK_EP_R_OUT_WMARK_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_PKT_CNT(ring) \
(CNXK_EP_R_OUT_PKT_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
#define CNXK_EP_R_OUT_BYTE_CNT(ring) \
(CNXK_EP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
#define CNXK_EP_R_OUT_CTL_IDLE (1ULL << 40)
#define CNXK_EP_R_OUT_CTL_ES_I (1ull << 34)
#define CNXK_EP_R_OUT_CTL_NSR_I (1ULL << 33)
#define CNXK_EP_R_OUT_CTL_ROR_I (1ULL << 32)
#define CNXK_EP_R_OUT_CTL_ES_D (1ull << 30)
#define CNXK_EP_R_OUT_CTL_NSR_D (1ULL << 29)
#define CNXK_EP_R_OUT_CTL_ROR_D (1ULL << 28)
#define CNXK_EP_R_OUT_CTL_ES_P (1ull << 26)
#define CNXK_EP_R_OUT_CTL_NSR_P (1ULL << 25)
#define CNXK_EP_R_OUT_CTL_ROR_P (1ULL << 24)
#define CNXK_EP_R_OUT_CTL_IMODE (1ULL << 23)
#define PCI_DEVID_CNXK_EP_NET_VF 0xB903
int
cnxk_ep_vf_setup_device(struct otx_ep_device *sdpvf);
struct cnxk_ep_instr_64B {
/* Pointer where the input data is available. */
uint64_t dptr;
/* OTX_EP Instruction Header. */
union otx_ep_instr_ih ih;
/** Pointer where the response for a RAW mode packet
* will be written by OCTEON TX.
*/
uint64_t rptr;
/* Input Request Header. */
union otx_ep_instr_irh irh;
/* Additional headers available in a 64-byte instruction. */
uint64_t exhdr[4];
};
#endif /*_CNXK_EP_VF_H_ */

View File

@ -7,4 +7,5 @@ sources = files(
'otx_ep_rxtx.c',
'otx_ep_vf.c',
'otx2_ep_vf.c',
'cnxk_ep_vf.c',
)

View File

@ -17,13 +17,13 @@ otx2_vf_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
/* Select ES, RO, NS, RDSIZE,DPTR Format#0 for IQs
* IS_64B is by default enabled.
*/
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
reg_val |= SDP_VF_R_IN_CTL_RDSIZE;
reg_val |= SDP_VF_R_IN_CTL_IS_64B;
reg_val |= SDP_VF_R_IN_CTL_ESR;
otx2_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(q_no));
}
static void
@ -31,7 +31,7 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
{
volatile uint64_t reg_val = 0ull;
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
reg_val &= ~(SDP_VF_R_OUT_CTL_IMODE);
reg_val &= ~(SDP_VF_R_OUT_CTL_ROR_P);
@ -46,7 +46,7 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
/* INFO/DATA ptr swap is required */
reg_val |= (SDP_VF_R_OUT_CTL_ES_P);
otx2_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
static void
@ -79,46 +79,52 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
int loop = SDP_VF_BUSY_LOOP_COUNT;
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
/* Wait till IDLE to set to 1, not supposed to configure BADDR
* as long as IDLE is 0
*/
if (!(reg_val & SDP_VF_R_IN_CTL_IDLE)) {
do {
reg_val = otx2_read64(otx_ep->hw_addr +
SDP_VF_R_IN_CONTROL(iq_no));
} while (!(reg_val & SDP_VF_R_IN_CTL_IDLE));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
} while ((!(reg_val & SDP_VF_R_IN_CTL_IDLE)) && loop--);
}
if (!loop) {
otx_ep_err("IDLE bit is not set\n");
return;
}
/* Write the start of the input queue's ring and its size */
otx2_write64(iq->base_addr_dma, otx_ep->hw_addr +
SDP_VF_R_IN_INSTR_BADDR(iq_no));
otx2_write64(iq->nb_desc, otx_ep->hw_addr +
SDP_VF_R_IN_INSTR_RSIZE(iq_no));
oct_ep_write64(iq->base_addr_dma, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_BADDR(iq_no));
oct_ep_write64(iq->nb_desc, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(iq_no));
/* Remember the doorbell & instruction count register addr
* for this queue
*/
iq->doorbell_reg = (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_INSTR_DBELL(iq_no);
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS(iq_no);
iq->doorbell_reg = (uint8_t *)otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(iq_no);
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr + SDP_VF_R_IN_CNTS(iq_no);
otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
loop = SDP_VF_BUSY_LOOP_COUNT;
do {
reg_val = rte_read32(iq->inst_cnt_reg);
rte_write32(reg_val, iq->inst_cnt_reg);
} while (reg_val != 0);
} while (reg_val != 0 && loop--);
if (!loop) {
otx_ep_err("INST CNT REGISTER is not zero\n");
return;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
* to raise
*/
otx2_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
}
static void
@ -126,24 +132,28 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
* as log as IDLE is 0
* as long as IDLE is 0
*/
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
while (!(reg_val & SDP_VF_R_OUT_CTL_IDLE)) {
reg_val = otx2_read64(otx_ep->hw_addr +
SDP_VF_R_OUT_CONTROL(oq_no));
while ((!(reg_val & SDP_VF_R_OUT_CTL_IDLE)) && loop--) {
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
rte_delay_ms(1);
}
otx2_write64(droq->desc_ring_dma, otx_ep->hw_addr +
SDP_VF_R_OUT_SLIST_BADDR(oq_no));
otx2_write64(droq->nb_desc, otx_ep->hw_addr +
SDP_VF_R_OUT_SLIST_RSIZE(oq_no));
if (!loop) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
return;
}
oq_ctl = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(oq_no));
oct_ep_write64(droq->nb_desc, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(oq_no));
oq_ctl = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
/* Clear the ISIZE and BSIZE (22-0) */
oq_ctl &= ~(OTX_EP_CLEAR_ISIZE_BSIZE);
@ -151,44 +161,50 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
/* Populate the BSIZE (15-0) */
oq_ctl |= (droq->buffer_size & OTX_EP_DROQ_BUFSZ_MASK);
otx2_write64(oq_ctl, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
oct_ep_write64(oq_ctl, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(oq_no));
/* Mapped address of the pkt_sent and pkts_credit regs */
droq->pkts_sent_reg = (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_CNTS(oq_no);
droq->pkts_credit_reg = (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_SLIST_DBELL(oq_no);
droq->pkts_sent_reg = (uint8_t *)otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(oq_no);
droq->pkts_credit_reg = (uint8_t *)otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(oq_no);
rte_write64(OTX_EP_CLEAR_OUT_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_OUT_INT_LEVELS(oq_no));
rte_write64(OTX_EP_CLEAR_OUT_INT_LVLS, otx_ep->hw_addr + SDP_VF_R_OUT_INT_LEVELS(oq_no));
/* Clear PKT_CNT register */
rte_write64(OTX_EP_CLEAR_SDP_OUT_PKT_CNT, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_PKT_CNT(oq_no));
loop = OTX_EP_BUSY_LOOP_COUNT;
/* Clear the OQ doorbell */
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
rte_delay_ms(1);
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no,
rte_read32(droq->pkts_credit_reg));
if (!loop) {
otx_ep_err("Packets credit register value is not cleared\n");
return;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
/* Clear the OQ_OUT_CNTS doorbell */
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
rte_read32(droq->pkts_sent_reg));
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
loop = OTX_EP_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
rte_read32(droq->pkts_sent_reg));
if (!loop) {
otx_ep_err("Packets sent register value is not cleared\n");
return;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
}
static int
@ -200,10 +216,9 @@ otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
*/
otx2_write64(0xFFFFFFFF, otx_ep->hw_addr +
SDP_VF_R_IN_INSTR_DBELL(q_no));
oct_ep_write64(0xFFFFFFFF, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
while (((otx2_read64(otx_ep->hw_addr +
while (((oct_ep_read64(otx_ep->hw_addr +
SDP_VF_R_IN_INSTR_DBELL(q_no))) != 0ull) && loop--) {
rte_delay_ms(1);
}
@ -213,10 +228,10 @@ otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
return -EIO;
}
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
reg_val |= 0x1ull;
otx2_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
otx_ep_info("IQ[%d] enable done", q_no);
@ -228,9 +243,9 @@ otx2_vf_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
uint64_t reg_val = 0ull;
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
reg_val |= 0x1ull;
otx2_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
otx_ep_info("OQ[%d] enable done", q_no);
@ -261,10 +276,10 @@ otx2_vf_disable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
uint64_t reg_val = 0ull;
/* Reset the doorbell register for this Input Queue. */
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
reg_val &= ~0x1ull;
otx2_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
}
static void
@ -272,10 +287,10 @@ otx2_vf_disable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
volatile uint64_t reg_val = 0ull;
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
reg_val &= ~0x1ull;
otx2_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
}
static void
@ -336,7 +351,7 @@ otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
}
/* Get IOQs (RPVF] count */
reg_val = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(0));
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(0));
otx_ep->sriov_info.rings_per_vf = ((reg_val >> SDP_VF_R_IN_CTL_RPVF_POS)
& SDP_VF_R_IN_CTL_RPVF_MASK);

View File

@ -57,6 +57,10 @@
"%s():%u " fmt "\n", \
__func__, __LINE__, ##args)
/* IO Access */
#define oct_ep_read64(addr) rte_read64_relaxed((void *)(addr))
#define oct_ep_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
/* Input Request Header format */
union otx_ep_instr_irh {
uint64_t u64;

View File

@ -7,6 +7,7 @@
#include "otx_ep_common.h"
#include "otx_ep_vf.h"
#include "otx2_ep_vf.h"
#include "cnxk_ep_vf.h"
#include "otx_ep_rxtx.h"
#define OTX_EP_DEV(_eth_dev) \
@ -108,6 +109,11 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
case PCI_DEVID_CNXK_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
default:
otx_ep_err("Unsupported device\n");
ret = -EINVAL;
@ -140,6 +146,8 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
otx_epvf->max_tx_queues = ethdev_queues;
@ -521,6 +529,7 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};