net/bnxt: use dynamic log type

This patch implements driver specific log type doing away with
usage of RTE_LOG() for logging.

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Ajit Khaparde 2018-01-26 09:31:55 -08:00 committed by Ferruh Yigit
parent c771d289cb
commit 3e92fd4e4e
13 changed files with 300 additions and 288 deletions

View File

@ -334,4 +334,12 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
extern int bnxt_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
__func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt, ## args)
#endif

View File

@ -58,7 +58,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
bnxt_link_update_op(bp->eth_dev, 1);
break;
default:
RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
PMD_DRV_LOG(DEBUG, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@ -74,7 +74,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;
if (bp->pf.active_vfs <= 0) {
RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
return;
}
@ -93,7 +93,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
@ -130,7 +130,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@ -141,7 +141,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));

View File

@ -58,6 +58,7 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
int bnxt_logtype_driver;
#define PCI_VENDOR_ID_BROADCOM 0x14E4
@ -223,25 +224,25 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
goto err_out;
}
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}
rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
goto err_out;
}
rc = bnxt_mq_rx_configure(bp);
if (rc) {
RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
goto err_out;
}
@ -251,14 +252,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
@ -266,14 +267,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
@ -294,7 +295,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"HWRM vnic %d set RSS failure rc: %x\n",
i, rc);
goto err_out;
@ -310,7 +311,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
goto err_out;
}
@ -320,10 +321,9 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
intr_vector);
PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
RTE_LOG(ERR, PMD, "At most %d intr queues supported",
PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@ -337,13 +337,13 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->eth_dev->data->nb_rx_queues *
sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
__func__, intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}
@ -359,14 +359,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
}
if (!bp->link_info.link_up) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
}
@ -537,13 +537,13 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
(uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Num Queues Requested: Tx %d, Rx %d\n",
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
bp->max_stat_ctx, bp->max_ring_grps);
@ -567,13 +567,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = &eth_dev->data->dev_link;
if (link->link_status)
RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
RTE_LOG(INFO, PMD, "Port %d Link Down\n",
PMD_DRV_LOG(INFO, "Port %d Link Down\n",
eth_dev->data->port_id);
}
@ -590,7 +590,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc;
if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
@ -729,25 +729,25 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;
if (BNXT_VF(bp)) {
RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}
if (!vnic) {
RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"MAC addr already existed for pool %d\n", pool);
return -EINVAL;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
@ -770,7 +770,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
@ -861,7 +861,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
RTE_LOG(ERR, PMD, "The configured hash table lookup size "
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@ -893,7 +893,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
RTE_LOG(ERR, PMD, "The configured hash table lookup size "
PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@ -924,7 +924,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
RTE_LOG(ERR, PMD, "Hash type NONE\n");
PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
@ -1013,7 +1013,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
if (hash_types) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
@ -1062,7 +1062,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
return -ENOTSUP;
}
@ -1122,10 +1122,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
RTE_LOG(ERR, PMD, "Only one port allowed\n");
PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@ -1137,10 +1137,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
RTE_LOG(ERR, PMD, "Only one port allowed\n");
PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@ -1151,7 +1151,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
bp->geneve_port_cnt++;
break;
default:
RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@ -1171,11 +1171,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@ -1188,11 +1188,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@ -1204,7 +1204,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->geneve_fw_dst_port_id;
break;
default:
RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
@ -1261,7 +1261,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@ -1279,7 +1279,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"Del Vlan filter for %d\n",
vlan_id);
}
@ -1334,7 +1334,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
}
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@ -1354,7 +1354,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"Added Vlan filter for %d\n", vlan_id);
cont:
filter = temp_filter;
@ -1389,7 +1389,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_filter);
}
@ -1403,12 +1403,12 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_strip);
}
if (mask & ETH_VLAN_EXTEND_MASK)
RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
return 0;
}
@ -1444,7 +1444,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (rc)
break;
filter->mac_index = 0;
RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
}
@ -1547,7 +1547,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
ETHER_MIN_MTU, max_dev_mtu);
return -EINVAL;
}
@ -1565,7 +1565,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
eth_dev->data->mtu = new_mtu;
RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
@ -1592,7 +1592,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
int rc;
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
return -ENOTSUP;
}
@ -1753,13 +1753,13 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
if (efilter->ether_type == ETHER_TYPE_IPv4 ||
efilter->ether_type == ETHER_TYPE_IPv6) {
RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in"
PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@ -1767,7 +1767,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
if (vnic == NULL) {
RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@ -1818,7 +1818,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return 0;
if (arg == NULL) {
RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@ -1835,7 +1835,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@ -1879,11 +1879,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
next);
bnxt_free_filter(bp, filter1);
} else if (ret == 0) {
RTE_LOG(ERR, PMD, "No matching filter found\n");
PMD_DRV_LOG(ERR, "No matching filter found\n");
}
break;
default:
RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
goto error;
}
@ -1902,7 +1902,7 @@ parse_ntuple_filter(struct bnxt *bp,
uint32_t en = 0;
if (nfilter->queue >= bp->rx_nr_rings) {
RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
return -EINVAL;
}
@ -1914,7 +1914,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
break;
default:
RTE_LOG(ERR, PMD, "invalid dst_port mask.");
PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}
@ -1932,7 +1932,7 @@ parse_ntuple_filter(struct bnxt *bp,
en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
break;
default:
RTE_LOG(ERR, PMD, "invalid protocol mask.");
PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}
@ -1944,7 +1944,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
break;
default:
RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}
@ -1956,7 +1956,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
break;
default:
RTE_LOG(ERR, PMD, "invalid src_ip mask.");
PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}
@ -1968,7 +1968,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
break;
default:
RTE_LOG(ERR, PMD, "invalid src_port mask.");
PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}
@ -2021,18 +2021,18 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
int ret;
if (nfilter->flags != RTE_5TUPLE_FLAGS) {
RTE_LOG(ERR, PMD, "only 5tuple is supported.");
PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}
if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
return -EINVAL;
}
bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@ -2059,7 +2059,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
bfilter->dst_id == mfilter->dst_id) {
RTE_LOG(ERR, PMD, "filter exists.\n");
PMD_DRV_LOG(ERR, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
} else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
@ -2068,12 +2068,12 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n");
RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n");
PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
RTE_LOG(ERR, PMD, "filter doesn't exist.");
PMD_DRV_LOG(ERR, "filter doesn't exist.");
ret = -ENOENT;
goto free_filter;
}
@ -2118,7 +2118,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
return 0;
if (arg == NULL) {
RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@ -2135,7 +2135,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
filter_op);
break;
default:
RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
@ -2337,7 +2337,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (vnic == NULL) {
RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
}
@ -2441,7 +2441,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
/* FALLTHROUGH */
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Not enough resources for a new flow.\n");
return -ENOMEM;
}
@ -2453,12 +2453,12 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
RTE_LOG(ERR, PMD, "Flow already exists.\n");
PMD_DRV_LOG(ERR, "Flow already exists.\n");
ret = -EEXIST;
goto free_filter;
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
RTE_LOG(ERR, PMD, "Flow does not exist.\n");
PMD_DRV_LOG(ERR, "Flow does not exist.\n");
ret = -ENOENT;
goto free_filter;
}
@ -2505,10 +2505,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
/* FALLTHROUGH */
RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
ret = -EINVAL;
break;
}
@ -2529,7 +2529,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"filter type: %d: To be implemented\n", filter_type);
break;
case RTE_ETH_FILTER_FDIR:
@ -2547,7 +2547,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &bnxt_flow_ops;
break;
default:
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
@ -2841,8 +2841,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;
RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
__func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
@ -2860,8 +2860,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;
RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", __func__, bp->pdev->addr.domain,
PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
@ -2929,13 +2929,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;
RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", __func__, bp->pdev->addr.domain,
PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
return -EINVAL;
}
@ -3056,7 +3056,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
@ -3067,7 +3067,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
@ -3103,7 +3103,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;
if (version_printed++ == 0)
RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@ -3120,7 +3120,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = bnxt_init_board(eth_dev);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
@ -3151,13 +3151,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@ -3186,13 +3186,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@ -3207,7 +3207,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"hwrm resource allocation failure rc: %x\n", rc);
goto error_free;
}
@ -3216,31 +3216,31 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
RTE_LOG(ERR, PMD, "hwrm queue qportcfg failed\n");
PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
goto error_free;
}
rc = bnxt_hwrm_func_qcfg(bp);
if (rc) {
RTE_LOG(ERR, PMD, "hwrm func qcfg failed\n");
PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
goto error_free;
}
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
if (bp->max_tx_rings == 0) {
RTE_LOG(ERR, PMD, "No TX rings available!\n");
PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
@ -3252,7 +3252,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
/* 1 ring is for default completion ring */
RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n");
PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
rc = -ENOSPC;
goto error_free;
}
@ -3260,7 +3260,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
@ -3273,7 +3273,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
} else {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Firmware too old for VF mailbox functionality\n");
memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
}
@ -3293,20 +3293,20 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to register driver");
rc = -EBUSY;
goto error_free;
}
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
rc = bnxt_hwrm_func_reset(bp);
if (rc) {
RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
rc = -EIO;
goto error_free;
}
@ -3318,13 +3318,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
goto error_free;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to allocate PF resources\n");
goto error_free;
}
@ -3433,6 +3433,15 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}
RTE_INIT(bnxt_init_log);
static void
bnxt_init_log(void)
{
bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
if (bnxt_logtype_driver >= 0)
rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
}
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");

View File

@ -56,7 +56,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
RTE_LOG(ERR, PMD, "No more free filter resources\n");
PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@ -77,7 +77,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
vf);
return NULL;
}
@ -145,11 +145,11 @@ void bnxt_free_filter_mem(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"HWRM filter cannot be freed rc = %d\n",
rc);
}
@ -172,7 +172,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@ -187,7 +187,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
RTE_LOG(ERR, PMD, "No more free filter resources\n");
PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@ -281,7 +281,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
/* FALLTHROUGH */
/* need ntuple match, reset exact match */
if (!use_ntuple) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"VLAN flow cannot use NTUPLE filter\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@ -292,7 +292,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
RTE_LOG(ERR, PMD, "Unknown Flow type");
PMD_DRV_LOG(ERR, "Unknown Flow type");
use_ntuple |= 1;
}
item++;
@ -329,7 +329,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
int dflt_vnic;
use_ntuple = bnxt_filter_type_check(pattern, error);
RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
if (use_ntuple < 0)
return use_ntuple;
@ -791,7 +791,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
return f0;
//This flow needs DST MAC which is not same as port/l2
RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
filter1 = bnxt_get_unused_filter(bp);
if (filter1 == NULL)
return NULL;
@ -828,7 +828,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
int rc;
if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Cannot create flow on RSS queues");
@ -857,7 +857,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
@ -875,7 +875,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
RTE_LOG(DEBUG, PMD, "VNIC found\n");
PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
@ -990,7 +990,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
return -ENOMEM;
}
@ -1092,13 +1092,13 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
if (ret != 0) {
RTE_LOG(ERR, PMD, "Not a validate flow.\n");
PMD_DRV_LOG(ERR, "Not a validate flow.\n");
goto free_flow;
}
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
goto free_flow;
}
@ -1109,15 +1109,15 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = bnxt_match_filter(bp, filter);
if (ret == -EEXIST) {
RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
/* Clear the filter that was created as part of
* validate_and_parse_flow() above
*/
bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
} else if (ret == -EXDEV) {
RTE_LOG(DEBUG, PMD, "Flow with same pattern exists");
RTE_LOG(DEBUG, PMD, "Updating with different destination\n");
PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
update_flow = true;
}
@ -1145,7 +1145,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = -EXDEV;
goto free_flow;
}
RTE_LOG(ERR, PMD, "Successfully created flow.\n");
PMD_DRV_LOG(ERR, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
return flow;
}
@ -1181,7 +1181,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,
ret = bnxt_match_filter(bp, filter);
if (ret == 0)
RTE_LOG(ERR, PMD, "Could not find matching flow\n");
PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)

View File

@ -79,7 +79,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
return sizeof(void *) * 8 - 1;
}
@ -161,7 +161,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}
if (i >= HWRM_CMD_TIMEOUT) {
RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@ -194,8 +194,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
#define HWRM_CHECK_RESULT() do {\
if (rc) { \
RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
__func__, rc); \
PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
} \
@ -204,18 +203,15 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
RTE_LOG(ERR, PMD, \
"%s error %d:%d:%08x:%04x\n", \
__func__, \
PMD_DRV_LOG(ERR, \
"error %d:%d:%08x:%04x\n", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
} \
else { \
RTE_LOG(ERR, PMD, \
"%s error %d\n", __func__, rc); \
} else { \
PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
@ -369,7 +365,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
RTE_LOG(DEBUG, PMD,
PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
conf->pool_map[j].vlan_id, j);
@ -545,7 +541,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_table == NULL)
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
@ -556,7 +552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_as_table == NULL)
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
@ -588,7 +584,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
RTE_LOG(INFO, PMD, "PTP SUPPORTED");
PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
HWRM_UNLOCK();
bnxt_hwrm_ptp_qcfg(bp);
}
@ -676,13 +672,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_CHECK_RESULT();
RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
(resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
my_version = HWRM_VERSION_MAJOR << 16;
@ -694,28 +690,28 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
fw_version |= resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}
if (my_version != fw_version) {
RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"Firmware API version is newer than driver.\n");
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"The driver may be missing features.\n");
} else {
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"Firmware API version is older than driver.\n");
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"Not all driver features may be functional.\n");
}
}
if (bp->max_req_len > resp->max_req_win_len) {
RTE_LOG(ERR, PMD, "Unsupported request length\n");
PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
@ -738,7 +734,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@ -750,7 +746,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
RTE_LOG(DEBUG, PMD, "Short command supported\n");
PMD_DRV_LOG(DEBUG, "Short command supported\n");
rte_free(bp->hwrm_short_cmd_req_addr);
@ -765,7 +761,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@ -814,7 +810,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info.auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}
req.flags = rte_cpu_to_le_32(conf->phy_flags);
@ -853,7 +849,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
RTE_LOG(INFO, PMD, "Force Link Down\n");
PMD_DRV_LOG(INFO, "Force Link Down\n");
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@ -971,7 +967,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
default:
RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
@ -985,22 +981,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
}
@ -1032,19 +1028,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
@ -1168,7 +1164,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
@ -1188,7 +1184,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@ -1258,7 +1254,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct bnxt_plcmodes_cfg pmodes;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@ -1323,7 +1319,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_QCFG);
@ -1375,7 +1371,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
HWRM_UNLOCK();
RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
@ -1388,7 +1384,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
bp->hwrm_cmd_resp_addr;
if (vnic->rss_rule == 0xffff) {
RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
@ -1412,7 +1408,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
}
@ -1854,7 +1850,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@ -1890,7 +1886,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@ -2032,7 +2028,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
default:
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
@ -2056,20 +2052,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
@ -2141,7 +2137,7 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
@ -2161,7 +2157,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
@ -2175,7 +2171,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Get link config failed with rc %d\n", rc);
goto exit;
}
@ -2229,7 +2225,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info.media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
}
@ -2243,7 +2239,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Set link config failed with rc %d\n", rc);
}
@ -2420,11 +2416,11 @@ static void reserve_resources_from_vf(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
}
@ -2455,11 +2451,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_16(resp->vlan);
@ -2495,7 +2491,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}
@ -2522,7 +2518,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
size_t req_buf_sz;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}
@ -2588,9 +2584,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
if (rc || resp->error_code) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Failed to initizlie VF %d\n", i);
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
@ -2740,7 +2736,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
}
@ -3162,7 +3158,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@ -3198,7 +3194,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@ -3259,7 +3255,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@ -3316,19 +3312,19 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
HWRM_UNLOCK();
RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
HWRM_UNLOCK();
RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
@ -3459,7 +3455,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
RTE_LOG(ERR, PMD, "No default VNIC\n");
PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
@ -3549,7 +3545,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;
RTE_LOG(ERR, PMD, "Clear EM filter\n");
PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE);
req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);

View File

@ -84,7 +84,7 @@ static void bnxt_int_handler(void *param)
cpr->cp_ring_struct))
goto no_more;
}
RTE_LOG(INFO, PMD,
PMD_DRV_LOG(INFO,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
@ -154,7 +154,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;
setup_exit:
RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
return rc;
}

View File

@ -176,15 +176,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;
}
@ -326,7 +326,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
if (ring == NULL) {
RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
goto err_out;
}
@ -336,7 +336,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cp_ring->fw_ring_id);
if (rc)
goto err_out;
RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell =
(char *)pci_dev->mem_resource[2].addr +
@ -347,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}

View File

@ -75,7 +75,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@ -92,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@ -121,7 +121,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = bp->rx_cp_nr_rings;
break;
default:
RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@ -135,7 +135,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@ -166,7 +166,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@ -312,14 +312,14 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;
if (queue_idx >= bp->max_rx_rings) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
return -ENOSPC;
}
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@ -332,7 +332,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@ -341,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
@ -357,7 +357,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;

View File

@ -95,9 +95,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
}
if (rxbd == NULL)
RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
if (rx_buf == NULL)
RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
rx_buf->mbuf = mbuf;
@ -234,7 +234,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"agg mbuf alloc failed: prod=0x%x\n", next);
break;
}
@ -512,7 +512,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
goto rx;
}
@ -601,7 +601,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxr->rx_prod = i;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
} else {
RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
break;
}
}
@ -744,7 +744,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"init'ed rx ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@ -752,7 +752,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
RTE_LOG(DEBUG, PMD, "%s\n", __func__);
ring = rxr->ag_ring_struct;
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
@ -761,7 +760,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"init'ed AG ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@ -769,7 +768,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
PMD_DRV_LOG(DEBUG, "AGG Done!\n");
if (rxr->tpa_info) {
for (i = 0; i < BNXT_TPA_MAX; i++) {
@ -781,7 +780,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
return 0;
}

View File

@ -237,7 +237,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return 0;
}
@ -272,7 +272,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
RTE_LOG(ERR, PMD, "Device Initialization not complete!\n");
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return;
}
@ -289,7 +289,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
uint64_t tx_drop_pkts;
if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
return 0;
}
@ -371,11 +371,11 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
bnxt_hwrm_port_clr_stats(bp);
if (BNXT_VF(bp))
RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
if (!BNXT_SINGLE_PF(bp))
RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
RTE_LOG(ERR, PMD, "Operation not supported\n");
PMD_DRV_LOG(ERR, "Operation not supported\n");
}
int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
@ -394,7 +394,7 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
RTE_LOG(ERR, PMD, "id value isn't valid");
PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@ -420,7 +420,7 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
RTE_LOG(ERR, PMD, "id value isn't valid");
PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,

View File

@ -109,14 +109,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;
if (queue_idx >= bp->max_tx_rings) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
return -ENOSPC;
}
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
rc = -EINVAL;
goto out;
}
@ -131,7 +131,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
rc = -ENOMEM;
goto out;
}
@ -149,14 +149,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
"txr")) {
RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
}
if (bnxt_init_one_tx_ring(txq)) {
RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;

View File

@ -107,7 +107,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
temp = STAILQ_NEXT(temp, next);
}
RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
return -EINVAL;
}
@ -118,7 +118,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@ -194,13 +194,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"unable to map vnic address to physical memory\n");
return -ENOMEM;
}
@ -241,7 +241,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
@ -260,7 +260,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}

View File

@ -85,7 +85,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set Tx loopback on non-PF port %d!\n",
port);
return -ENOTSUP;
@ -127,7 +127,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set all queues drop on non-PF port!\n");
return -ENOTSUP;
}
@ -140,7 +140,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
return rc;
}
}
@ -151,7 +151,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
break;
}
}
@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set VF %d mac address on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@ -224,7 +224,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
return -EINVAL;
}
@ -262,7 +262,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set mac spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@ -314,7 +314,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set VLAN spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@ -333,7 +333,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
}
return rc;
@ -367,7 +367,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set VF %d stripq on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@ -377,7 +377,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
return rc;
}
@ -407,7 +407,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;
if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
@ -430,7 +430,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf.vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
return rc;
}
@ -442,7 +442,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set VLAN table on non-PF port!\n");
return -EINVAL;
}
@ -455,7 +455,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@ -518,9 +518,9 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"VLAN anti-spoof table is full\n");
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"VF %d cannot add VLAN %u\n",
i, vlan);
rc = -1;
@ -585,7 +585,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to get VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@ -612,7 +612,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to reset VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@ -638,7 +638,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to query VF %d RX stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@ -665,7 +665,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to query VF %d TX drops on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@ -697,7 +697,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to config VF %d MAC on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@ -773,7 +773,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set VF %d vlan insert on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@ -807,7 +807,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
RTE_LOG(ERR, PMD,
PMD_DRV_LOG(ERR,
"Attempt to set persist stats on non-PF port %d!\n",
port);
return -EINVAL;