net/ena/base: add device argument to logging macros

Some platforms may make use of the device argument to make the logs more
verbose and specific for the appropriate device.

As it's not used by the ENA DPDK PMD for the logging, the type is just
defined, but never used.

It may be reconsidered to change this in the future by adding port ID
to the message logs, but as for now the logging behavior won't change.

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
This commit is contained in:
Michal Krawczyk 2021-05-11 08:45:38 +02:00 committed by Ferruh Yigit
parent b4f8decd38
commit ac2fd8a5ea
5 changed files with 236 additions and 161 deletions

View File

@ -70,7 +70,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
ena_trc_err("dma address has more bits that the device supports\n");
ena_trc_err(ena_dev, "DMA address has more bits than the device supports\n");
return ENA_COM_INVAL;
}
@ -82,6 +82,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
@ -89,7 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
sq->mem_handle);
if (!sq->entries) {
ena_trc_err("memory allocation failed\n");
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -104,6 +105,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
@ -111,7 +113,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
cq->mem_handle);
if (!cq->entries) {
ena_trc_err("memory allocation failed\n");
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -136,7 +138,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq->mem_handle);
if (!aenq->entries) {
ena_trc_err("memory allocation failed\n");
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -157,7 +159,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
ena_trc_err("aenq handlers pointer is NULL\n");
ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
return ENA_COM_INVAL;
}
@ -177,18 +179,21 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
u16 command_id, bool capture)
{
if (unlikely(command_id >= admin_queue->q_depth)) {
ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
ena_trc_err(admin_queue->ena_dev,
"Command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, admin_queue->q_depth);
return NULL;
}
if (unlikely(!admin_queue->comp_ctx)) {
ena_trc_err("Completion context is NULL\n");
ena_trc_err(admin_queue->ena_dev,
"Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
ena_trc_err("Completion context is occupied\n");
ena_trc_err(admin_queue->ena_dev,
"Completion context is occupied\n");
return NULL;
}
@ -218,7 +223,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
ena_trc_dbg("admin queue is full.\n");
ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@ -262,13 +267,14 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
struct ena_com_dev *ena_dev = admin_queue->ena_dev;
size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
if (unlikely(!admin_queue->comp_ctx)) {
ena_trc_err("memory allocation failed\n");
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -341,7 +347,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
ena_trc_err("memory allocation failed\n");
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
}
@ -366,7 +372,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
ena_trc_err("bounce buffer memory allocation failed\n");
ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -431,7 +437,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
ena_trc_err("memory allocation failed\n");
ena_trc_err(ena_dev, "Memory allocation failed\n");
return ENA_COM_NO_MEM;
}
@ -452,7 +458,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
if (unlikely(!comp_ctx)) {
ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
ena_trc_err(admin_queue->ena_dev,
"comp_ctx is NULL. Changing the admin queue running state\n");
admin_queue->running_state = false;
return;
}
@ -504,10 +511,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
admin_queue->stats.completed_cmd += comp_num;
}
static int ena_com_comp_status_to_errno(u8 comp_status)
static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
u8 comp_status)
{
if (unlikely(comp_status != 0))
ena_trc_err("admin command failed[%u]\n", comp_status);
ena_trc_err(admin_queue->ena_dev,
"Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@ -554,7 +563,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
break;
if (ENA_TIME_EXPIRE(timeout)) {
ena_trc_err("Wait for completion (polling) timeout\n");
ena_trc_err(admin_queue->ena_dev,
"Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
admin_queue->stats.no_completion++;
@ -570,7 +580,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
ena_trc_err("Command was aborted\n");
ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@ -579,9 +589,10 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
"Invalid comp status %d\n", comp_ctx->status);
admin_queue->ena_dev, "Invalid comp status %d\n",
comp_ctx->status);
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@ -623,7 +634,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@ -645,7 +656,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->header_location_ctrl =
llq_default_cfg->llq_header_location;
} else {
ena_trc_err("Invalid header location control, supported: 0x%x\n",
ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
@ -660,12 +671,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
} else {
ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
llq_default_cfg->llq_stride_ctrl,
supported_feat,
llq_info->desc_stride_ctrl);
@ -689,11 +700,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
llq_info->desc_list_entry_size = 256;
} else {
ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
llq_default_cfg->llq_ring_entry_size,
supported_feat,
llq_info->desc_list_entry_size);
@ -702,7 +714,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
ena_trc_err("illegal entry size %d\n",
ena_trc_err(ena_dev, "Illegal entry size %d\n",
llq_info->desc_list_entry_size);
return -EINVAL;
}
@ -726,12 +738,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
} else {
ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
supported_feat);
return -EINVAL;
}
ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
llq_default_cfg->llq_num_decs_before_header,
supported_feat,
llq_info->descs_num_before_header);
@ -750,7 +762,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@ -776,13 +788,15 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
if (comp_ctx->status == ENA_CMD_COMPLETED) {
ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
ena_trc_err(admin_queue->ena_dev,
"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
ena_trc_err(admin_queue->ena_dev,
"The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
}
/* Check if shifted to polling mode.
@ -796,7 +810,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
}
}
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@ -844,7 +858,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num,
offset,
read_resp->req_id,
@ -854,7 +868,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
ena_trc_err("Read failure: wrong offset provided\n");
ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@ -913,7 +927,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("failed to destroy io sq error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@ -969,7 +983,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
ena_trc_err("Reg read timeout occurred\n");
ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
@ -1009,7 +1023,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
ena_trc_dbg("Feature %d isn't supported\n", feature_id);
ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
return ENA_COM_UNSUPPORTED;
}
@ -1028,7 +1042,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@ -1045,7 +1059,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
ena_trc_err("Failed to submit get_feature command %d error: %d\n",
ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
feature_id, ret);
return ret;
@ -1157,7 +1171,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1 << log_size,
1 << get_resp.u.ind_table.min_size,
1 << get_resp.u.ind_table.max_size);
@ -1261,7 +1275,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
}
@ -1272,7 +1286,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@ -1290,7 +1304,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@ -1324,7 +1338,7 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
if (unlikely(!intr_delay_resolution)) {
ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
@ -1360,10 +1374,12 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp, comp_size);
if (IS_ERR(comp_ctx)) {
if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
ena_trc_dbg("Failed to submit command [%ld]\n",
ena_trc_dbg(admin_queue->ena_dev,
"Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
else
ena_trc_err("Failed to submit command [%ld]\n",
ena_trc_err(admin_queue->ena_dev,
"Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
return PTR_ERR(comp_ctx);
@ -1372,11 +1388,11 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
if (unlikely(ret)) {
if (admin_queue->running_state)
ena_trc_err("Failed to process command. ret = %d\n",
ret);
ena_trc_err(admin_queue->ena_dev,
"Failed to process command. ret = %d\n", ret);
else
ena_trc_dbg("Failed to process command. ret = %d\n",
ret);
ena_trc_dbg(admin_queue->ena_dev,
"Failed to process command. ret = %d\n", ret);
}
return ret;
}
@ -1405,7 +1421,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@ -1415,7 +1431,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@ -1434,7 +1450,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@ -1444,7 +1460,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
ena_trc_err("Invalid queue number %d but the max is %d\n",
ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
qid, ENA_TOTAL_NUM_QUEUES);
return ENA_COM_INVAL;
}
@ -1510,7 +1526,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@ -1534,7 +1550,7 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
{
u16 depth = ena_dev->aenq.q_depth;
ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
/* Init head_db to mark that all entries in the queue
* are initially available
@ -1552,12 +1568,12 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
if (ret) {
ena_trc_info("Can't get aenq configuration\n");
ena_trc_info(ena_dev, "Can't get aenq configuration\n");
return ret;
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
return ENA_COM_UNSUPPORTED;
@ -1578,7 +1594,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
ena_trc_err("Failed to config AENQ ret: %d\n", ret);
ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@ -1589,17 +1605,17 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
int width;
if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
ena_trc_err("Reg read timeout occurred\n");
ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
ena_trc_dbg("ENA dma width: %d\n", width);
ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
ena_trc_err("DMA width illegal value: %d\n", width);
ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
return ENA_COM_INVAL;
}
@ -1623,16 +1639,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
(ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
ena_trc_err("Reg read timeout occurred\n");
ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
ena_trc_info("ena device version: %d.%d\n",
ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
>> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
@ -1648,7 +1664,7 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
/* Validate the ctrl version without the implementation ID */
if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
return -1;
}
@ -1786,12 +1802,12 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
ena_trc_err("Reg read timeout occurred\n");
ena_trc_err(ena_dev, "Reg read timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
ena_trc_err("Device isn't ready, abort com init\n");
ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
return ENA_COM_NO_DEVICE;
}
@ -1869,7 +1885,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return ENA_COM_INVAL;
}
@ -1928,7 +1944,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@ -2090,7 +2106,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
ENA_TOUCH(timestamp); /* In case debug is disabled */
ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
aenq_common->group,
aenq_common->syndrom,
timestamp);
@ -2137,19 +2153,19 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
(cap == ENA_MMIO_READ_TIMEOUT))) {
ena_trc_err("Reg read32 timeout occurred\n");
ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
return ENA_COM_TIMER_EXPIRED;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
ena_trc_err("Device isn't ready, can't reset device\n");
ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
return ENA_COM_INVAL;
}
timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
if (timeout == 0) {
ena_trc_err("Invalid timeout value\n");
ena_trc_err(ena_dev, "Invalid timeout value\n");
return ENA_COM_INVAL;
}
@ -2165,7 +2181,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
ena_trc_err("Reset indication didn't turn on\n");
ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
return rc;
}
@ -2173,7 +2189,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
ena_trc_err("Reset indication didn't turn off\n");
ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
return rc;
}
@ -2210,7 +2226,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
ena_trc_err("Failed to get stats. error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
return ret;
}
@ -2253,7 +2269,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return ENA_COM_UNSUPPORTED;
}
@ -2272,7 +2288,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
sizeof(resp));
if (unlikely(ret))
ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@ -2286,7 +2302,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
ena_trc_err("Failed to get offload capabilities %d\n", ret);
ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
return ret;
}
@ -2306,7 +2322,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
ena_trc_dbg("Feature %d isn't supported\n",
ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return ENA_COM_UNSUPPORTED;
}
@ -2318,7 +2334,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
ena_trc_err("Func hash %d isn't supported by device, abort\n",
ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return ENA_COM_UNSUPPORTED;
}
@ -2336,7 +2352,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@ -2348,7 +2364,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
ena_trc_err("Failed to set hash function %d. error: %d\n",
ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return ENA_COM_INVAL;
}
@ -2380,7 +2396,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
return ENA_COM_UNSUPPORTED;
}
@ -2388,7 +2404,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
case ENA_ADMIN_TOEPLITZ:
if (key) {
if (key_len != sizeof(hash_key->key)) {
ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n",
key_len, sizeof(hash_key->key));
return ENA_COM_INVAL;
}
@ -2401,7 +2417,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
rss->hash_init_val = init_val;
break;
default:
ena_trc_err("Invalid hash function (%d)\n", func);
ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func);
return ENA_COM_INVAL;
}
@ -2486,7 +2502,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
ena_trc_dbg("Feature %d isn't supported\n",
ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return ENA_COM_UNSUPPORTED;
}
@ -2505,7 +2521,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@ -2516,7 +2532,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
ena_trc_err("Failed to set hash input. error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@ -2566,7 +2582,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
return ENA_COM_UNSUPPORTED;
@ -2592,7 +2608,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
ena_trc_err("Invalid proto num (%u)\n", proto);
ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
return ENA_COM_INVAL;
}
@ -2604,7 +2620,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
proto, hash_fields, supported_fields);
}
@ -2645,14 +2661,14 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
ena_trc_dbg("Feature %d isn't supported\n",
ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
if (ret) {
ena_trc_err("Failed to convert host indirection table to device table\n");
ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
return ret;
}
@ -2669,7 +2685,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@ -2683,7 +2699,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
ena_trc_err("Failed to set indirect table. error: %d\n", ret);
ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@ -2848,7 +2864,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@ -2856,7 +2872,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
ena_trc_err("memory address set failed\n");
ena_trc_err(ena_dev, "Memory address set failed\n");
return ret;
}
@ -2869,7 +2885,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
ena_trc_err("Failed to set host attributes: %d\n", ret);
ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
return ret;
}
@ -2881,12 +2897,13 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
}
static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
u32 coalesce_usecs,
u32 intr_delay_resolution,
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
return ENA_COM_FAULT;
}
@ -2895,11 +2912,11 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
return 0;
}
int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
u32 tx_coalesce_usecs)
{
return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
return ena_com_update_nonadaptive_moderation_interval(ena_dev,
tx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_tx_interval);
}
@ -2907,7 +2924,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de
int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
u32 rx_coalesce_usecs)
{
return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
return ena_com_update_nonadaptive_moderation_interval(ena_dev,
rx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_rx_interval);
}
@ -2923,12 +2941,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == ENA_COM_UNSUPPORTED) {
ena_trc_dbg("Feature %d isn't supported\n",
ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
rc);
ena_trc_err(ena_dev,
"Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@ -2976,7 +2994,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
ena_trc_err("the size of the LLQ entry is smaller than needed\n");
ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}

View File

@ -300,6 +300,7 @@ struct ena_com_dev {
void __iomem *mem_bar;
void *dmadev;
void *bus;
ena_netdev *net_device;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@ -944,6 +945,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
* @io_sq: IO submit queue struct
*
* @return - ena_com_dev struct extracted from io_sq
*/
static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
{
return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
}
/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
* @io_sq: IO submit queue struct
*
* @return - ena_com_dev struct extracted from io_sq
*/
static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
{
return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
}
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;

View File

@ -57,12 +57,14 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
if (is_llq_max_tx_burst_exists(io_sq)) {
if (unlikely(!io_sq->entries_in_tx_burst_left)) {
ena_trc_err("Error: trying to send more packets than tx burst allows\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Error: trying to send more packets than tx burst allows\n");
return ENA_COM_NO_SPACE;
}
io_sq->entries_in_tx_burst_left--;
ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n",
ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
"Decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
}
@ -101,12 +103,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
llq_info->descs_num_before_header * io_sq->desc_entry_size;
if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
ena_trc_err("trying to write header larger than llq entry can accommodate\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Trying to write header larger than llq entry can accommodate\n");
return ENA_COM_FAULT;
}
if (unlikely(!bounce_buffer)) {
ena_trc_err("bounce buffer is NULL\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Bounce buffer is NULL\n");
return ENA_COM_FAULT;
}
@ -124,7 +128,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
ena_trc_err("bounce buffer is NULL\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Bounce buffer is NULL\n");
return NULL;
}
@ -149,7 +154,8 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
if (unlikely(rc)) {
ena_trc_err("failed to write bounce buffer to device\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to write bounce buffer to device\n");
return rc;
}
@ -182,7 +188,8 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
if (unlikely(rc)) {
ena_trc_err("failed to write bounce buffer to device\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to write bounce buffer to device\n");
return rc;
}
@ -252,7 +259,8 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
"ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
@ -335,8 +343,9 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
}
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
@ -357,7 +366,8 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_proto,
ena_rx_ctx->l4_proto,
ena_rx_ctx->l3_csum_err,
@ -386,23 +396,26 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
u64 addr_hi;
ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
"wrong Q type");
ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
/* num_bufs +1 for potential meta desc */
if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
ena_trc_dbg("Not enough space in the tx queue\n");
ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
"Not enough space in the tx queue\n");
return ENA_COM_NO_MEM;
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
ena_trc_err("header size is too large %d max header: %d\n",
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Header size is too large %d max header: %d\n",
header_len, io_sq->tx_max_header_size);
return ENA_COM_INVAL;
}
if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
&& !buffer_to_push)) {
ena_trc_err("push header wasn't provided on LLQ mode\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Push header wasn't provided on LLQ mode\n");
return ENA_COM_INVAL;
}
@ -412,7 +425,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
ena_trc_err("failed to create and store tx meta desc\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to create and store tx meta desc\n");
return rc;
}
@ -420,7 +434,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(!num_bufs && !header_len)) {
rc = ena_com_close_bounce_buffer(io_sq);
if (rc)
ena_trc_err("failed to write buffers to LLQ\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to write buffers to LLQ\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
}
@ -481,7 +496,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (likely(i != 0)) {
rc = ena_com_sq_update_tail(io_sq);
if (unlikely(rc)) {
ena_trc_err("failed to update sq tail\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to update sq tail\n");
return rc;
}
@ -513,13 +529,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_sq_update_tail(io_sq);
if (unlikely(rc)) {
ena_trc_err("failed to update sq tail of the last descriptor\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed to update sq tail of the last descriptor\n");
return rc;
}
rc = ena_com_close_bounce_buffer(io_sq);
if (rc)
ena_trc_err("failed when closing bounce buffer\n");
ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq),
"Failed when closing bounce buffer\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
@ -537,7 +555,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 i = 0;
ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
"wrong Q type");
ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type");
nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
if (nb_hw_desc == 0) {
@ -545,11 +563,13 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n",
ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
"Fetch rx packet: queue %d completed desc: %d\n",
io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n",
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
"Too many RX cdescs (%d) > MAX(%d)\n",
nb_hw_desc, ena_rx_ctx->max_bufs);
return ENA_COM_NO_SPACE;
}
@ -573,13 +593,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__,
ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
"[%s][QID#%d] Updating SQ head to: %d\n", __func__,
io_sq->qid, io_sq->next_to_comp);
/* Get rx flags from the last pkt */
ena_com_rx_set_flags(ena_rx_ctx, cdesc);
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
ena_rx_ctx->descs = nb_hw_desc;
return 0;
}
@ -590,7 +612,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
struct ena_eth_io_rx_desc *desc;
ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
"wrong Q type");
ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type");
if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return ENA_COM_NO_SPACE;
@ -610,6 +632,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id;
ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
"[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
__func__, io_sq->qid, req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);

View File

@ -143,7 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n",
ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
"Queue: %d num_descs: %d num_entries_needed: %d\n",
io_sq->qid, num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
@ -154,14 +155,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
"Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq),
"Reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
@ -179,7 +182,8 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (unlikely(need_update)) {
ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq),
"Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
@ -243,7 +247,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE16(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
ena_trc_err("Invalid req id %d\n", cdesc->req_id);
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
"Invalid req id %d\n", cdesc->req_id);
return ENA_COM_INVAL;
}

View File

@ -32,6 +32,7 @@ typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
typedef struct rte_eth_dev ena_netdev;
typedef uint64_t dma_addr_t;
#ifndef ETIME
#define ETIME ETIMEDOUT
@ -98,29 +99,33 @@ extern int ena_logtype_com;
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
#define ena_trc_log(level, fmt, arg...) \
rte_log(RTE_LOG_ ## level, ena_logtype_com, \
"[ENA_COM: %s]" fmt, __func__, ##arg)
#define ena_trc_log(dev, level, fmt, arg...) \
( \
ENA_TOUCH(dev), \
rte_log(RTE_LOG_ ## level, ena_logtype_com, \
"[ENA_COM: %s]" fmt, __func__, ##arg) \
)
#define ena_trc_dbg(format, arg...) ena_trc_log(DEBUG, format, ##arg)
#define ena_trc_info(format, arg...) ena_trc_log(INFO, format, ##arg)
#define ena_trc_warn(format, arg...) ena_trc_log(WARNING, format, ##arg)
#define ena_trc_err(format, arg...) ena_trc_log(ERR, format, ##arg)
#define ena_trc_dbg(dev, format, arg...) ena_trc_log(dev, DEBUG, format, ##arg)
#define ena_trc_info(dev, format, arg...) ena_trc_log(dev, INFO, format, ##arg)
#define ena_trc_warn(dev, format, arg...) \
ena_trc_log(dev, WARNING, format, ##arg)
#define ena_trc_err(dev, format, arg...) ena_trc_log(dev, ERR, format, ##arg)
#else
#define ena_trc_dbg(format, arg...) do { } while (0)
#define ena_trc_info(format, arg...) do { } while (0)
#define ena_trc_warn(format, arg...) do { } while (0)
#define ena_trc_err(format, arg...) do { } while (0)
#define ena_trc_dbg(dev, format, arg...) ENA_TOUCH(dev)
#define ena_trc_info(dev, format, arg...) ENA_TOUCH(dev)
#define ena_trc_warn(dev, format, arg...) ENA_TOUCH(dev)
#define ena_trc_err(dev, format, arg...) ENA_TOUCH(dev)
#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
#define ENA_WARN(cond, format, arg...) \
do { \
if (unlikely(cond)) { \
ena_trc_err( \
"Warn failed on %s:%s:%d:" format, \
__FILE__, __func__, __LINE__, ##arg); \
} \
} while (0)
#define ENA_WARN(cond, dev, format, arg...) \
do { \
if (unlikely(cond)) { \
ena_trc_err(dev, \
"Warn failed on %s:%s:%d:" format, \
__FILE__, __func__, __LINE__, ##arg); \
} \
} while (0)
/* Spinlock related methods */
#define ena_spinlock_t rte_spinlock_t