net/bnxt: handle VF/PF initialization appropriately

1) For a VF, query the firmware to determine if a MAC address is
   already configured. If not configure a random default MAC address.

2) Do not initialize the default completion ring in
   bnxt_alloc_hwrm_rings().

3) While registering for async events with the firmware,
   use func_vf_cfg for a VF and use func_cfg for a PF.

4) Query the VNIC plcmode config using the bnxt_hwrm_vnic_plcmodes_qcfg
   before a VNIC is updated. Reconfigure the VNIC with the plcmode
   configuration queried earlier. Not doing this could overwrite
   the plcmodes in some cases.

5) Reorg the bnxt_handle_fwd_req to properly handle the forwarded
   requests. The previous code did not handle it completely.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Ajit Khaparde 2017-06-01 12:07:00 -05:00 committed by Ferruh Yigit
parent b7778e8a1c
commit 4535cad395
9 changed files with 293 additions and 87 deletions

View File

@ -10,7 +10,9 @@ Promiscuous mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS reta update = Y
SR-IOV = Y
Basic stats = Y
Extended stats = Y
Linux UIO = Y
Linux VFIO = Y
x86-64 = Y

View File

@ -65,56 +65,115 @@ void bnxt_handle_async_event(struct bnxt *bp,
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
{
struct hwrm_exec_fwd_resp_input *fwreq;
struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
struct input *fwd_cmd;
uint16_t logical_vf_id, error_code;
uint16_t fw_vf_id;
uint16_t vf_id;
uint16_t req_len;
int rc;
if (bp->pf.active_vfs <= 0) {
RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
return;
}
/* Qualify the fwd request */
if (fwd_cmpl->source_id < bp->pf.first_vf_id) {
RTE_LOG(ERR, PMD,
"FWD req's source_id 0x%x > first_vf_id 0x%x\n",
fwd_cmpl->source_id, bp->pf.first_vf_id);
error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED;
goto reject;
} else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT >
128 - sizeof(struct input)) {
RTE_LOG(ERR, PMD,
"FWD req's cmd len 0x%x > 108 bytes allowed\n",
fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT);
error_code = HWRM_ERR_CODE_INVALID_PARAMS;
goto reject;
}
fw_vf_id = rte_le_to_cpu_16(fwd_cmpl->source_id);
vf_id = fw_vf_id - bp->pf.first_vf_id;
req_len = (rte_le_to_cpu_16(fwd_cmpl->req_len_type) &
HWRM_FWD_REQ_CMPL_REQ_LEN_MASK) >>
HWRM_FWD_REQ_CMPL_REQ_LEN_SFT;
if (req_len > sizeof(fwreq->encap_request))
req_len = sizeof(fwreq->encap_request);
/* Locate VF's forwarded command */
logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id;
fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf +
(logical_vf_id * 128));
fwd_cmd = (struct input *)bp->pf.vf_info[vf_id].req_buf;
/* Provision the request */
switch (fwd_cmd->req_type) {
case HWRM_CFA_L2_FILTER_ALLOC:
case HWRM_CFA_L2_FILTER_FREE:
case HWRM_CFA_L2_FILTER_CFG:
case HWRM_CFA_L2_SET_RX_MASK:
break;
default:
error_code = HWRM_ERR_CODE_INVALID_PARAMS;
if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
RTE_LOG(ERR, PMD,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
bp->pf.first_vf_id, bp->pf.active_vfs);
goto reject;
}
/* Forward */
fwd_cmd->target_id = fwd_cmpl->source_id;
bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd);
return;
if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
/*
* In older firmware versions, the MAC had to be all zeros for
* the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
* zeros if it's being configured and has been ok'd by caller.
*/
if (fwd_cmd->req_type == HWRM_FUNC_VF_CFG) {
struct hwrm_func_vf_cfg_input *vfc = (void *)fwd_cmd;
if (vfc->enables &
HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR) {
bnxt_hwrm_func_vf_mac(bp, vf_id,
(const uint8_t *)"\x00\x00\x00\x00\x00");
}
}
if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
struct hwrm_cfa_l2_set_rx_mask_input *srm =
(void *)fwd_cmd;
srm->vlan_tag_tbl_addr = rte_cpu_to_le_64(0);
srm->num_vlan_tags = rte_cpu_to_le_32(0);
srm->mask &= ~rte_cpu_to_le_32(
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY |
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
}
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
RTE_LOG(ERR, PMD,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
}
return;
}
reject:
/* TODO: Encap the reject error resp into the hwrm_err_iput? */
/* Use the error_code for the reject cmd */
RTE_LOG(ERR, PMD,
"Error 0x%x found in the forward request\n", error_code);
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
RTE_LOG(ERR, PMD,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
}
return;
}
/* For the default completion ring only */
int bnxt_alloc_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
int rc;
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
0, HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
if (BNXT_PF(bp))
rc = bnxt_hwrm_func_cfg_def_cp(bp);
else
rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
err_out:
return rc;
}
void bnxt_free_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;

View File

@ -33,6 +33,7 @@
#ifndef _BNXT_CPR_H_
#define _BNXT_CPR_H_
#include <stdbool.h>
#include <rte_io.h>
@ -56,6 +57,19 @@
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
((cpr)->cp_doorbell))
#define B_CP_DB_ARM(cpr) rte_write32((DB_KEY_CP), ((cpr)->cp_doorbell))
#define B_CP_DB_DISARM(cpr) (*(uint32_t *)((cpr)->cp_doorbell) = \
DB_KEY_CP | DB_IRQ_DIS)
#define B_CP_DB_IDX_ARM(cpr, cons) \
(*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
(cons)))
#define B_CP_DB_IDX_DISARM(cpr, cons) do { \
rte_smp_wmb(); \
(*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
(cons)); \
} while (0)
#define B_CP_DIS_DB(cpr, raw_cons) \
rte_write32((DB_CP_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
@ -75,6 +89,8 @@ struct bnxt_cp_ring_info {
uint32_t hw_stats_ctx_id;
struct bnxt_ring *cp_ring_struct;
uint16_t cp_cons;
bool v;
};
#define RX_CMP_L2_ERRORS \
@ -82,6 +98,7 @@ struct bnxt_cp_ring_info {
struct bnxt;
int bnxt_alloc_def_cp_ring(struct bnxt *bp);
void bnxt_free_def_cp_ring(struct bnxt *bp);
int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);

View File

@ -228,28 +228,31 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
rc);
RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
"HWRM vnic ctx alloc failure rc: %x\n", rc);
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
rc);
RTE_LOG(ERR, PMD,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
}
if (vnic->rss_table && vnic->hash_type) {
@ -269,8 +272,8 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
"HWRM vnic set RSS failure rc: %x\n",
rc);
"HWRM vnic %d set RSS failure rc: %x\n",
i, rc);
goto err_out;
}
}
@ -638,7 +641,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
RTE_LOG(ERR, PMD,
"Failed to retrieve link rc = 0x%x!", rc);
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
@ -1010,7 +1013,8 @@ static bool bnxt_vf_pciid(uint16_t id)
if (id == BROADCOM_DEV_ID_57304_VF ||
id == BROADCOM_DEV_ID_57406_VF ||
id == BROADCOM_DEV_ID_5731X_VF ||
id == BROADCOM_DEV_ID_5741X_VF)
id == BROADCOM_DEV_ID_5741X_VF ||
id == BROADCOM_DEV_ID_57414_VF)
return true;
return false;
}
@ -1066,7 +1070,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;
if (version_printed++ == 0)
RTE_LOG(INFO, PMD, "%s", bnxt_version);
RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
@ -1207,6 +1211,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (rc)
goto error_free_int;
rc = bnxt_alloc_def_cp_ring(bp);
if (rc)
goto error_free_int;
bnxt_enable_int(bp);
return 0;

View File

@ -126,7 +126,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
"HWRM filter cannot be freed rc = %d\n",
rc);
}
filter->fw_l2_filter_id = -1;
filter->fw_l2_filter_id = UINT64_MAX;
}
STAILQ_INIT(&bp->free_filter_list);

View File

@ -33,6 +33,8 @@
#include <unistd.h>
#include <unistd.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
@ -286,24 +288,6 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
return rc;
}
int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
{
int rc;
struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
memcpy(req.encap_request, fwd_cmd,
sizeof(req.encap_request));
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
@ -905,12 +889,71 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic,
struct bnxt_plcmodes_cfg *pmode)
{
int rc = 0;
struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
pmode->flags = rte_le_to_cpu_32(resp->flags);
/* dflt_vnic bit doesn't exist in the _cfg command */
pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
return rc;
}
static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic,
struct bnxt_plcmodes_cfg *pmode)
{
int rc = 0;
struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
req.flags = rte_cpu_to_le_32(pmode->flags);
req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
req.enables = rte_cpu_to_le_32(
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0;
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
struct bnxt_plcmodes_cfg pmodes;
rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
if (rc)
return rc;
HWRM_PREP(req, VNIC_CFG, -1, resp);
@ -954,6 +997,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_CHECK_RESULT;
rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
return rc;
}
@ -1773,6 +1818,25 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
(num_vfs + 1));
}
static void add_random_mac_if_needed(struct bnxt *bp,
struct hwrm_func_cfg_input *cfg_req,
int vf)
{
struct ether_addr mac;
if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
return;
if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
cfg_req->enables |=
rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
eth_random_addr(cfg_req->dflt_mac_addr);
bp->pf.vf_info[vf].random_mac = true;
} else {
memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
}
}
static void reserve_resources_from_vf(struct bnxt *bp,
struct hwrm_func_cfg_input *cfg_req,
int vf)
@ -1914,6 +1978,8 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
bp->pf.active_vfs = 0;
for (i = 0; i < num_vfs; i++) {
add_random_mac_if_needed(bp, &req, i);
HWRM_PREP(req, FUNC_CFG, -1, resp);
req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
@ -2019,6 +2085,23 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
return rc;
}
int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
{
struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_vf_cfg_input req = {0};
int rc;
HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
req.enables = rte_cpu_to_le_32(
HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}
int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
void *encaped, size_t ec_size)
{
@ -2040,3 +2123,42 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
return rc;
}
int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
struct ether_addr *mac)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
int rc;
HWRM_PREP(req, FUNC_QCFG, -1, resp);
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
return rc;
}
int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
void *encaped, size_t ec_size)
{
int rc = 0;
struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (ec_size > sizeof(req.encap_request))
return -1;
HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
memcpy(req.encap_request, encaped, ec_size);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
return rc;
}

View File

@ -51,7 +51,8 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
int bnxt_hwrm_set_filter(struct bnxt *bp,
struct bnxt_vnic_info *vnic,
struct bnxt_filter_info *filter);
int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd);
int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
void *encaped, size_t ec_size);
int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
void *encaped, size_t ec_size);
@ -62,6 +63,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_func_reset(struct bnxt *bp);
int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);

View File

@ -66,16 +66,26 @@ static void bnxt_int_handler(void *param)
/* Handle any async event */
bnxt_handle_async_event(bp, cmp);
break;
case CMPL_BASE_TYPE_HWRM_FWD_RESP:
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
/* Handle HWRM forwarded responses */
bnxt_handle_fwd_req(bp, cmp);
break;
default:
/* Ignore any other events */
if (cmp->type & rte_cpu_to_le_16(0x01)) {
if (!CMP_VALID(cmp, raw_cons,
cpr->cp_ring_struct))
goto no_more;
}
RTE_LOG(INFO, PMD,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
}
};
no_more:
cpr->cp_raw_cons = raw_cons;
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
@ -102,14 +112,15 @@ void bnxt_disable_int(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
/* Only the default completion ring */
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
if (cpr != NULL && cpr->cp_doorbell != NULL)
B_CP_DB_DISARM(cpr);
}
void bnxt_enable_int(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
B_CP_DB_ARM(cpr);
}
int bnxt_setup_int(struct bnxt *bp)
@ -136,7 +147,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;
setup_exit:
RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed");
RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
return rc;
}

View File

@ -213,21 +213,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
unsigned int i;
int rc = 0;
/* Default completion ring */
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
0, HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
cpr->cp_doorbell = pci_dev->mem_resource[2].addr;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@ -259,7 +244,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
if (bnxt_init_one_rx_ring(rxq)) {
RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}