Unify prints in mlx5core.

All prints in mlx5core should use on of the macros:
mlx5_core_err/dbg/warn

Submitted by:	slavash@
MFC after:	3 days
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2019-10-02 09:48:01 +00:00
parent c9bb26aef1
commit a2f4f59ca8
10 changed files with 186 additions and 141 deletions

View File

@ -1489,7 +1489,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev_get(dev);
if (cmd_if_rev != CMD_IF_REV) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);
mlx5_core_err(dev,
"Driver cmdif rev(%d) differs from firmware's(%d)\n",
CMD_IF_REV, cmd_if_rev);
return -EINVAL;
}
@ -1501,13 +1503,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->log_sz = cmd_l >> 4 & 0xf;
cmd->log_stride = cmd_l & 0xf;
if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz);
mlx5_core_err(dev,
"firmware reports too many outstanding commands %d\n",
1 << cmd->log_sz);
err = -EINVAL;
goto err_free_page;
}
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n");
mlx5_core_err(dev,
"command queue size overflow\n");
err = -EINVAL;
goto err_free_page;
}
@ -1518,7 +1523,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
if (cmd->cmdif_rev > CMD_IF_REV) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev);
mlx5_core_err(dev,
"driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev);
err = -ENOTSUPP;
goto err_free_page;
}
@ -1534,7 +1541,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
if (cmd_l & 0xfff) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n");
mlx5_core_err(dev, "invalid command queue address\n");
err = -ENOMEM;
goto err_free_page;
}
@ -1551,7 +1558,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
err = create_msg_cache(dev);
if (err) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n");
mlx5_core_err(dev, "failed to create command cache\n");
goto err_free_page;
}
return 0;

View File

@ -53,13 +53,18 @@ do { \
mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
#define mlx5_core_err(_dev, format, ...) \
device_printf((&(_dev)->pdev->dev)->bsddev, "ERR: ""%s:%d:(pid %d): " format, \
#define mlx5_core_err(_dev, format, ...) \
device_printf((_dev)->pdev->dev.bsddev, "ERR: ""%s:%d:(pid %d): " format, \
__func__, __LINE__, curthread->td_proc->p_pid, \
##__VA_ARGS__)
#define mlx5_core_warn(_dev, format, ...) \
device_printf((&(_dev)->pdev->dev)->bsddev, "WARN: ""%s:%d:(pid %d): " format, \
#define mlx5_core_warn(_dev, format, ...) \
device_printf((_dev)->pdev->dev.bsddev, "WARN: ""%s:%d:(pid %d): " format, \
__func__, __LINE__, curthread->td_proc->p_pid, \
##__VA_ARGS__)
#define mlx5_core_info(_dev, format, ...) \
device_printf((_dev)->pdev->dev.bsddev, "INFO: ""%s:%d:(pid %d): " format, \
__func__, __LINE__, curthread->td_proc->p_pid, \
##__VA_ARGS__)

View File

@ -673,7 +673,6 @@ static void mlx5_port_module_event(struct mlx5_core_dev *dev,
unsigned int module_status;
unsigned int error_type;
struct mlx5_eqe_port_module_event *module_event_eqe;
struct pci_dev *pdev = dev->pdev;
module_event_eqe = &eqe->data.port_module_event;
@ -687,19 +686,19 @@ static void mlx5_port_module_event(struct mlx5_core_dev *dev,
dev->priv.pme_stats.status_counters[module_status]++;
switch (module_status) {
case MLX5_MODULE_STATUS_PLUGGED_ENABLED:
device_printf((&pdev->dev)->bsddev,
"INFO: Module %u, status: plugged and enabled\n",
mlx5_core_info(dev,
"Module %u, status: plugged and enabled\n",
module_num);
break;
case MLX5_MODULE_STATUS_UNPLUGGED:
device_printf((&pdev->dev)->bsddev,
"INFO: Module %u, status: unplugged\n", module_num);
mlx5_core_info(dev,
"Module %u, status: unplugged\n", module_num);
break;
case MLX5_MODULE_STATUS_ERROR:
device_printf((&pdev->dev)->bsddev,
"ERROR: Module %u, status: error, %s\n",
mlx5_core_err(dev,
"Module %u, status: error, %s\n",
module_num,
mlx5_port_module_event_error_type_to_string(error_type));
if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
@ -707,8 +706,8 @@ static void mlx5_port_module_event(struct mlx5_core_dev *dev,
break;
default:
device_printf((&pdev->dev)->bsddev,
"INFO: Module %u, unknown status\n", module_num);
mlx5_core_info(dev,
"Module %u, unknown status\n", module_num);
}
/* store module status */
if (module_num < MLX5_MAX_PORTS)

View File

@ -139,7 +139,6 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_base *node)
node = parent;
if (node->type != FS_TYPE_NAMESPACE) {
printf("mlx5_core: WARN: ""mlx5: flow steering node %s is not in tree or garbaged\n", node->name);
return NULL;
}
@ -477,7 +476,7 @@ static int connect_prev_fts(struct fs_prio *locked_prio,
err = fs_set_star_rule(dev, iter, next_ft);
if (err) {
mlx5_core_warn(dev,
"mlx5: flow steering can't connect prev and next\n");
"mlx5: flow steering can't connect prev and next\n");
goto unlock;
} else {
/* Assume ft's prio is locked */
@ -605,7 +604,9 @@ static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
root = find_root(&prio->base);
if (!root)
printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
mlx5_core_err(dev,
"flow steering failed to find root of priority %s",
prio->base.name);
/* In order to ensure atomic deletion, first update
* prev ft to point on the next ft.
@ -765,11 +766,13 @@ static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns,
int log_table_sz;
int ft_size;
char gen_name[20];
struct mlx5_flow_root_namespace *root =
find_root(&ns->base);
struct mlx5_flow_root_namespace *root = find_root(&ns->base);
struct mlx5_core_dev *dev = fs_get_dev(&ns->base);
if (!root) {
printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of namespace %s", ns->base.name);
mlx5_core_err(dev,
"flow steering failed to find root of namespace %s",
ns->base.name);
return ERR_PTR(-ENODEV);
}
@ -987,12 +990,16 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
struct fs_prio *prio;
struct mlx5_flow_root_namespace *root;
bool is_shared_prio;
struct mlx5_core_dev *dev;
fs_get_parent(prio, ft);
root = find_root(&prio->base);
dev = fs_get_dev(&prio->base);
if (!root) {
printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
mlx5_core_err(dev,
"flow steering failed to find root of priority %s",
prio->base.name);
return -ENODEV;
}

View File

@ -324,7 +324,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %ums.\n",
mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
}

View File

@ -69,7 +69,7 @@ mlx5_fwdump_prep(struct mlx5_core_dev *mdev)
error = mlx5_vsc_find_cap(mdev);
if (error != 0) {
/* Inability to create a firmware dump is not fatal. */
device_printf((&mdev->pdev->dev)->bsddev, "WARN: "
mlx5_core_warn(mdev,
"mlx5_fwdump_prep failed %d\n", error);
return;
}
@ -153,13 +153,13 @@ mlx5_fwdump(struct mlx5_core_dev *mdev)
uint32_t i, ri;
int error;
dev_info(&mdev->pdev->dev, "Issuing FW dump\n");
mlx5_core_info(mdev, "Issuing FW dump\n");
mtx_lock(&mdev->dump_lock);
if (mdev->dump_data == NULL)
goto failed;
if (mdev->dump_valid) {
/* only one dump */
dev_warn(&mdev->pdev->dev,
mlx5_core_warn(mdev,
"Only one FW dump can be captured aborting FW dump\n");
goto failed;
}

View File

@ -78,9 +78,11 @@ static int lock_sem_sw_reset(struct mlx5_core_dev *dev)
ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
if (ret) {
if (ret == -EBUSY)
mlx5_core_dbg(dev, "SW reset FW semaphore already locked, another function will handle the reset\n");
mlx5_core_dbg(dev,
"SW reset FW semaphore already locked, another function will handle the reset\n");
else
mlx5_core_warn(dev, "SW reset semaphore lock return %d\n", ret);
mlx5_core_warn(dev,
"SW reset semaphore lock return %d\n", ret);
}
/* Unlock GW access */
@ -216,11 +218,12 @@ static void reset_fw_if_needed(struct mlx5_core_dev *dev)
if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
fatal_error == MLX5_SENSOR_NIC_DISABLED ||
fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.\n");
mlx5_core_warn(dev,
"Not issuing FW reset. Either it's already done or won't help.\n");
return;
}
mlx5_core_warn(dev, "Issuing FW Reset\n");
mlx5_core_info(dev, "Issuing FW Reset\n");
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
@ -251,8 +254,8 @@ mlx5_health_allow_reset(struct mlx5_core_dev *dev)
*/
health->last_reset_req = ticks ? : -1;
if (!ret)
mlx5_core_warn(dev, "Firmware reset elided due to "
"auto-reset frequency threshold.\n");
mlx5_core_warn(dev,
"Firmware reset elided due to auto-reset frequency threshold.\n");
return (ret);
}
@ -313,7 +316,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
} while (!time_after(jiffies, end));
if (!sensor_nic_disabled(dev)) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %ums.\n",
mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
@ -321,7 +324,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
if (!lock)
unlock_sem_sw_reset(dev);
mlx5_core_err(dev, "system error event triggered\n");
mlx5_core_info(dev, "System error event triggered\n");
err_state_done:
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
@ -342,9 +345,11 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
* MLX5_NIC_IFC_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset is already progress\n");
mlx5_core_warn(dev,
"NIC SW reset is already progress\n");
else
mlx5_core_warn(dev, "Communication with FW over the PCI link is down\n");
mlx5_core_warn(dev,
"Communication with FW over the PCI link is down\n");
} else {
mlx5_core_warn(dev, "NIC mode %d\n", nic_mode);
}
@ -372,7 +377,8 @@ static void health_recover(struct work_struct *work)
mtx_lock(&Giant); /* XXX newbus needs this */
if (sensor_pci_no_comm(dev)) {
dev_err(&dev->pdev->dev, "health recovery flow aborted, PCI reads still not working\n");
mlx5_core_err(dev,
"health recovery flow aborted, PCI reads still not working\n");
recover = false;
}
@ -384,13 +390,14 @@ static void health_recover(struct work_struct *work)
}
if (nic_mode != MLX5_NIC_IFC_DISABLED) {
dev_err(&dev->pdev->dev, "health recovery flow aborted, unexpected NIC IFC mode %d.\n",
nic_mode);
mlx5_core_err(dev,
"health recovery flow aborted, unexpected NIC IFC mode %d.\n",
nic_mode);
recover = false;
}
if (recover) {
dev_err(&dev->pdev->dev, "starting health recovery flow\n");
mlx5_core_info(dev, "Starting health recovery flow\n");
mlx5_recover_device(dev);
}
@ -425,12 +432,13 @@ static void health_care(struct work_struct *work)
spin_lock_irqsave(&health->wq_lock, flags);
if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) {
mlx5_core_warn(dev, "Scheduling recovery work with %lums delay\n",
recover_delay);
mlx5_core_warn(dev,
"Scheduling recovery work with %lums delay\n",
recover_delay);
schedule_delayed_work(&health->recover_work, recover_delay);
} else {
dev_err(&dev->pdev->dev,
"new health works are not permitted at this stage\n");
mlx5_core_err(dev,
"new health works are not permitted at this stage\n");
}
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@ -455,7 +463,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
queue_work(health->wq, &health->work);
else
dev_err(&dev->pdev->dev,
mlx5_core_err(dev,
"new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@ -509,18 +517,23 @@ print_health_info(struct mlx5_core_dev *dev)
return (0);
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
mlx5_core_info(dev, "assert_var[%d] 0x%08x\n", i,
ioread32be(h->assert_var + i));
printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str);
printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id));
printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index));
printf("mlx5_core: INFO: ""synd 0x%x: %s\n", synd, hsynd_str(synd));
printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_core_info(dev, "assert_exit_ptr 0x%08x\n",
ioread32be(&h->assert_exit_ptr));
mlx5_core_info(dev, "assert_callra 0x%08x\n",
ioread32be(&h->assert_callra));
snprintf(fw_str, sizeof(fw_str), "%d.%d.%d",
fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
mlx5_core_info(dev, "fw_ver %s\n", fw_str);
mlx5_core_info(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_core_info(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_core_info(dev, "synd 0x%x: %s\n",
ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
mlx5_core_info(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
fw = ioread32be(&h->fw_ver);
printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw);
mlx5_core_info(dev, "raw fw_ver 0x%08x\n", fw);
return synd;
}
@ -540,31 +553,38 @@ static void health_watchdog(struct work_struct *work)
err = mlx5_pci_read_power_status(dev, &power, &status);
if (err < 0) {
mlx5_core_warn(dev, "Failed reading power status: %d\n", err);
mlx5_core_warn(dev, "Failed reading power status: %d\n",
err);
return;
}
dev->pwr_value = power;
if (dev->pwr_status != status) {
device_t bsddev = dev->pdev->dev.bsddev;
switch (status) {
case 0:
dev->pwr_status = status;
device_printf(bsddev, "PCI power is not published by the PCIe slot.\n");
mlx5_core_info(dev,
"PCI power is not published by the PCIe slot.\n");
break;
case 1:
dev->pwr_status = status;
device_printf(bsddev, "PCIe slot advertised sufficient power (%uW).\n", power);
mlx5_core_info(dev,
"PCIe slot advertised sufficient power (%uW).\n",
power);
break;
case 2:
dev->pwr_status = status;
device_printf(bsddev, "WARN: Detected insufficient power on the PCIe slot (%uW).\n", power);
mlx5_core_warn(dev,
"Detected insufficient power on the PCIe slot (%uW).\n",
power);
break;
default:
dev->pwr_status = 0;
device_printf(bsddev, "WARN: Unknown power state detected(%d).\n", status);
mlx5_core_warn(dev,
"Unknown power state detected(%d).\n",
status);
break;
}
}
@ -580,8 +600,8 @@ mlx5_trigger_health_watchdog(struct mlx5_core_dev *dev)
if (!test_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags))
queue_work(health->wq_watchdog, &health->work_watchdog);
else
dev_err(&dev->pdev->dev,
"scheduling watchdog is not permitted at this stage\n");
mlx5_core_err(dev,
"scheduling watchdog is not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@ -611,7 +631,8 @@ static void poll_health(unsigned long data)
fatal_error = check_fatal_sensors(dev);
if (fatal_error && !health->fatal_error) {
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
mlx5_core_err(dev,
"Fatal error %u detected\n", fatal_error);
dev->priv.health.fatal_error = fatal_error;
print_health_info(dev);
mlx5_trigger_health_work(dev);

View File

@ -172,24 +172,25 @@ static struct mlx5_profile profiles[] = {
static int set_dma_caps(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n");
return err;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n");
return err;
}
}
@ -243,16 +244,17 @@ static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
static int request_bar(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
mlx5_core_err(dev, "Missing registers BAR, aborting\n");
return -ENODEV;
}
err = pci_request_regions(pdev, DRIVER_NAME);
if (err)
device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n");
return err;
}
@ -319,7 +321,7 @@ enum {
MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
};
static u16 to_fw_pkey_sz(u32 size)
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
case 128:
@ -335,7 +337,7 @@ static u16 to_fw_pkey_sz(u32 size)
case 4096:
return 5;
default:
printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
return 0;
}
}
@ -430,7 +432,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
to_fw_pkey_sz(128));
to_fw_pkey_sz(dev, 128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@ -544,11 +546,11 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
mlx5_core_dbg(dev, "Only ISSI 0 is supported\n");
return 0;
}
printf("mlx5_core: ERR: ""failed to query ISSI\n");
mlx5_core_err(dev, "failed to query ISSI\n");
return err;
}
@ -563,7 +565,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out));
if (err) {
printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err);
mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err);
return err;
}
@ -850,13 +852,13 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = mlx5_pci_enable_device(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
}
err = request_bar(pdev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
mlx5_core_err(dev, "error requesting BARs, aborting\n");
goto err_disable;
}
@ -864,7 +866,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = set_dma_caps(pdev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
goto err_clr_master;
}
@ -872,7 +874,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
err = -ENOMEM;
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
@ -895,28 +897,27 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
struct pci_dev *pdev = dev->pdev;
int err;
err = mlx5_vsc_find_cap(dev);
if (err)
dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n");
mlx5_core_err(dev, "Unable to find vendor specific capabilities\n");
err = mlx5_query_hca_caps(dev);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
mlx5_core_err(dev, "query hca failed\n");
goto out;
}
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
mlx5_core_err(dev, "query board id failed\n");
goto out;
}
err = mlx5_eq_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize eq\n");
mlx5_core_err(dev, "failed to initialize eq\n");
goto out;
}
@ -924,7 +925,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = mlx5_init_cq_table(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize cq table\n");
mlx5_core_err(dev, "failed to initialize cq table\n");
goto err_eq_cleanup;
}
@ -938,7 +939,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
#ifdef RATELIMIT
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
mlx5_core_err(dev, "Failed to init rate limiting\n");
goto err_tables_cleanup;
}
#endif
@ -976,17 +977,16 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
bool boot)
{
struct pci_dev *pdev = dev->pdev;
int err;
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
__func__);
mlx5_core_warn(dev, "interface is up, NOP\n");
goto out;
}
device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n",
fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
/*
* On load removing any previous indication of internal error,
@ -996,103 +996,103 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_cmd_init(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
goto out_err;
}
err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
if (err) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
goto err_cmd_cleanup;
}
err = mlx5_core_enable_hca(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
mlx5_core_err(dev, "enable hca failed\n");
goto err_cmd_cleanup;
}
err = mlx5_core_set_issi(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
mlx5_core_err(dev, "failed to set issi\n");
goto err_disable_hca;
}
err = mlx5_pagealloc_start(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
mlx5_core_err(dev, "mlx5_pagealloc_start failed\n");
goto err_disable_hca;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
mlx5_core_err(dev, "failed to allocate boot pages\n");
goto err_pagealloc_stop;
}
err = set_hca_ctrl(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
mlx5_core_err(dev, "set_hca_ctrl failed\n");
goto reclaim_boot_pages;
}
err = handle_hca_cap(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
mlx5_core_err(dev, "handle_hca_cap failed\n");
goto reclaim_boot_pages;
}
err = handle_hca_cap_atomic(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n");
mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto reclaim_boot_pages;
}
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
mlx5_core_err(dev, "failed to allocate init pages\n");
goto reclaim_boot_pages;
}
err = mlx5_cmd_init_hca(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
mlx5_core_err(dev, "init hca failed\n");
goto reclaim_boot_pages;
}
mlx5_start_health_poll(dev);
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
mlx5_core_err(dev, "sw objs init failed\n");
goto err_stop_poll;
}
err = mlx5_enable_msix(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
mlx5_core_err(dev, "enable msix failed\n");
goto err_cleanup_once;
}
err = mlx5_alloc_uuars(dev, &priv->uuari);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
mlx5_core_err(dev, "Failed allocating uar, aborting\n");
goto err_disable_msix;
}
err = mlx5_start_eqs(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
mlx5_core_err(dev, "Failed to start pages and async EQs\n");
goto err_free_uar;
}
err = alloc_comp_eqs(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
mlx5_core_err(dev, "Failed to alloc completion EQs\n");
goto err_stop_eqs;
}
if (map_bf_area(dev))
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
mlx5_core_err(dev, "Failed to map blue flame area\n");
err = mlx5_init_fs(dev);
if (err) {
@ -1108,13 +1108,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_fpga_device_start(dev);
if (err) {
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
mlx5_core_err(dev, "fpga device start failed %d\n", err);
goto err_mpfs;
}
err = mlx5_register_device(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
mlx5_core_err(dev, "mlx5_register_device failed %d\n", err);
goto err_fpga;
}
@ -1153,7 +1153,7 @@ err_cleanup_once:
err_stop_poll:
mlx5_stop_health_poll(dev, boot);
if (mlx5_cmd_teardown_hca(dev)) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
goto out_err;
}
@ -1186,7 +1186,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__);
mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__);
if (cleanup)
mlx5_cleanup_once(dev);
goto out;
@ -1208,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
goto out;
}
mlx5_pagealloc_stop(dev);
@ -1276,7 +1276,9 @@ static int init_one(struct pci_dev *pdev,
priv->pci_dev_data = id->driver_data;
if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) {
device_printf(bsddev, "WARN: selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
device_printf(bsddev,
"WARN: selected profile out of range, selecting default (%d)\n",
MLX5_DEFAULT_PROF);
mlx5_prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profiles[mlx5_prof_sel];
@ -1342,13 +1344,13 @@ static int init_one(struct pci_dev *pdev,
mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW);
err = mlx5_pci_init(dev, priv);
if (err) {
device_printf(bsddev, "ERR: mlx5_pci_init failed %d\n", err);
mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err);
goto clean_dev;
}
err = mlx5_health_init(dev);
if (err) {
device_printf(bsddev, "ERR: mlx5_health_init failed %d\n", err);
mlx5_core_err(dev, "mlx5_health_init failed %d\n", err);
goto close_pci;
}
@ -1356,7 +1358,7 @@ static int init_one(struct pci_dev *pdev,
err = mlx5_load_one(dev, priv, true);
if (err) {
device_printf(bsddev, "ERR: mlx5_load_one failed %d\n", err);
mlx5_core_err(dev, "mlx5_load_one failed %d\n", err);
goto clean_health;
}
@ -1386,7 +1388,7 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_priv *priv = &dev->priv;
if (mlx5_unload_one(dev, priv, true)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_core_err(dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
return;
}
@ -1407,7 +1409,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev, false);
mlx5_unload_one(dev, priv, false);
@ -1425,12 +1427,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err = 0;
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_core_info(dev,"%s was called\n", __func__);
err = mlx5_pci_enable_device(dev);
if (err) {
dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
, __func__, err);
mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n"
,err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@ -1458,29 +1460,31 @@ static void wait_vital(struct pci_dev *pdev)
msleep(1000);
for (i = 0; i < niter; i++) {
if (pci_read_config_word(pdev, 2, &did)) {
dev_warn(&pdev->dev, "failed reading config word\n");
mlx5_core_warn(dev, "failed reading config word\n");
break;
}
if (did == pdev->device) {
dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
mlx5_core_info(dev,
"device ID correctly read after %d iterations\n", i);
break;
}
msleep(50);
}
if (i == niter)
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
mlx5_core_warn(dev, "could not read device ID\n");
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
mlx5_core_info(dev,
"Counter value 0x%x after %d iterations\n", count, i);
break;
}
msleep(50);
}
if (i == niter)
dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
mlx5_core_warn(dev, "could not read device ID\n");
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@ -1489,16 +1493,16 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
struct mlx5_priv *priv = &dev->priv;
int err;
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_core_info(dev,"%s was called\n", __func__);
wait_vital(pdev);
err = mlx5_load_one(dev, priv, false);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
, __func__, err);
mlx5_core_err(dev,
"mlx5_load_one failed with error code: %d\n" ,err);
else
dev_info(&pdev->dev, "%s: device recovered\n", __func__);
mlx5_core_info(dev,"device recovered\n");
}
static const struct pci_error_handlers mlx5_err_handler = {

View File

@ -33,6 +33,8 @@
#include <dev/mlx5/mpfs.h>
#include <dev/mlx5/driver.h>
#include "mlx5_core.h"
#define MPFS_LOCK(dev) spin_lock(&(dev)->mpfs.spinlock)
#define MPFS_UNLOCK(dev) spin_unlock(&(dev)->mpfs.spinlock)
@ -119,7 +121,7 @@ mlx5_mpfs_destroy(struct mlx5_core_dev *dev)
num = bitmap_weight(dev->mpfs.bitmap, MLX5_MPFS_TABLE_MAX);
if (num != 0)
dev_err(&dev->pdev->dev, "Leaking %u MPFS MAC table entries\n", num);
mlx5_core_err(dev, "Leaking %u MPFS MAC table entries\n", num);
spin_lock_destroy(&dev->mpfs.spinlock);
}

View File

@ -618,7 +618,7 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_access_ptys);
static int mtu_to_ib_mtu(int mtu)
static int mtu_to_ib_mtu(struct mlx5_core_dev *dev, int mtu)
{
switch (mtu) {
case 256: return 1;
@ -627,7 +627,7 @@ static int mtu_to_ib_mtu(int mtu)
case 2048: return 4;
case 4096: return 5;
default:
printf("mlx5_core: WARN: ""invalid mtu\n");
mlx5_core_warn(dev, "invalid mtu\n");
return -1;
}
}
@ -661,11 +661,11 @@ int mlx5_core_access_pmtu(struct mlx5_core_dev *dev,
if (!write) {
pmtu->local_port = MLX5_GET(pmtu_reg, out, local_port);
pmtu->max_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
pmtu->max_mtu = mtu_to_ib_mtu(dev, MLX5_GET(pmtu_reg, out,
max_mtu));
pmtu->admin_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
pmtu->admin_mtu = mtu_to_ib_mtu(dev, MLX5_GET(pmtu_reg, out,
admin_mtu));
pmtu->oper_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
pmtu->oper_mtu = mtu_to_ib_mtu(dev, MLX5_GET(pmtu_reg, out,
oper_mtu));
}