diff --git a/sys/contrib/ena-com/ena_com.c b/sys/contrib/ena-com/ena_com.c index 266e39859102..8c63c1a03f76 100644 --- a/sys/contrib/ena-com/ena_com.c +++ b/sys/contrib/ena-com/ena_com.c @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -70,9 +70,9 @@ #define ENA_REGS_ADMIN_INTR_MASK 1 -#define ENA_MIN_POLL_US 100 +#define ENA_MIN_ADMIN_POLL_US 100 -#define ENA_MAX_POLL_US 5000 +#define ENA_MAX_ADMIN_POLL_US 5000 /*****************************************************************************/ /*****************************************************************************/ @@ -106,7 +106,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, dma_addr_t addr) { if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { - ena_trc_err("dma address has more bits that the device supports\n"); + ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n"); return ENA_COM_INVAL; } @@ -116,16 +116,17 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, return 0; } -static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) +static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) { - struct ena_com_admin_sq *sq = &queue->sq; - u16 size = ADMIN_SQ_SIZE(queue->q_depth); + struct ena_com_dev *ena_dev = admin_queue->ena_dev; + struct ena_com_admin_sq *sq = &admin_queue->sq; + u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); - ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, + ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr, sq->mem_handle); if (!sq->entries) { - ena_trc_err("memory allocation failed\n"); + ena_trc_err(ena_dev, "Memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -138,16 +139,17 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) return 0; } -static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) +static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) { - struct ena_com_admin_cq *cq = &queue->cq; - u16 size = ADMIN_CQ_SIZE(queue->q_depth); + struct ena_com_dev *ena_dev = admin_queue->ena_dev; + struct ena_com_admin_cq *cq = &admin_queue->cq; + u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); - ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr, + ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr, cq->mem_handle); if (!cq->entries) { - ena_trc_err("memory allocation failed\n"); + ena_trc_err(ena_dev, "Memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -157,22 +159,22 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) return 0; } -static int ena_com_admin_init_aenq(struct ena_com_dev *dev, +static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, struct ena_aenq_handlers *aenq_handlers) { - struct ena_com_aenq *aenq = &dev->aenq; + struct ena_com_aenq *aenq = &ena_dev->aenq; u32 addr_low, addr_high, aenq_caps; u16 size; - dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; + ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); - ENA_MEM_ALLOC_COHERENT(dev->dmadev, size, + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr, aenq->mem_handle); if (!aenq->entries) { - ena_trc_err("memory allocation failed\n"); + ena_trc_err(ena_dev, "Memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -182,18 +184,18 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev, addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); - ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); - ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); + ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); aenq_caps = 0; - aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; + aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; - ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); + ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); if (unlikely(!aenq_handlers)) { - ena_trc_err("aenq handlers pointer is NULL\n"); + ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n"); return ENA_COM_INVAL; } @@ -209,31 +211,34 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, ATOMIC32_DEC(&queue->outstanding_cmds); } -static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, +static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue, u16 command_id, bool capture) { - if (unlikely(command_id >= queue->q_depth)) { - ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", - command_id, queue->q_depth); + if (unlikely(command_id >= admin_queue->q_depth)) { + ena_trc_err(admin_queue->ena_dev, + "Command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, admin_queue->q_depth); return NULL; } - if (unlikely(!queue->comp_ctx)) { - ena_trc_err("Completion context is NULL\n"); + if (unlikely(!admin_queue->comp_ctx)) { + ena_trc_err(admin_queue->ena_dev, + "Completion context is NULL\n"); return NULL; } - if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { - ena_trc_err("Completion context is occupied\n"); + if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { + ena_trc_err(admin_queue->ena_dev, + "Completion context is occupied\n"); return NULL; } if (capture) { - ATOMIC32_INC(&queue->outstanding_cmds); - queue->comp_ctx[command_id].occupied = true; + ATOMIC32_INC(&admin_queue->outstanding_cmds); + admin_queue->comp_ctx[command_id].occupied = true; } - return &queue->comp_ctx[command_id]; + return &admin_queue->comp_ctx[command_id]; } static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, @@ -254,7 +259,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu /* In case of queue FULL */ cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - ena_trc_dbg("admin queue is full.\n"); + ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(ENA_COM_NO_SPACE); } @@ -296,20 +301,21 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu return comp_ctx; } -static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) +static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) { - size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); + struct ena_com_dev *ena_dev = admin_queue->ena_dev; + size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); struct ena_comp_ctx *comp_ctx; u16 i; - queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size); - if (unlikely(!queue->comp_ctx)) { - ena_trc_err("memory allocation failed\n"); + admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size); + if (unlikely(!admin_queue->comp_ctx)) { + ena_trc_err(ena_dev, "Memory allocation failed\n"); return ENA_COM_NO_MEM; } - for (i = 0; i < queue->q_depth; i++) { - comp_ctx = get_comp_ctxt(queue, i, false); + for (i = 0; i < admin_queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(admin_queue, i, false); if (comp_ctx) ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); } @@ -377,7 +383,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, } if (!io_sq->desc_addr.virt_addr) { - ena_trc_err("memory allocation failed\n"); + ena_trc_err(ena_dev, "Memory allocation failed\n"); return ENA_COM_NO_MEM; } } @@ -402,7 +408,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size); if (!io_sq->bounce_buf_ctrl.base_buffer) { - ena_trc_err("bounce buffer memory allocation failed\n"); + ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -467,7 +473,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, } if (!io_cq->cdesc_addr.virt_addr) { - ena_trc_err("memory allocation failed\n"); + ena_trc_err(ena_dev, "Memory allocation failed\n"); return ENA_COM_NO_MEM; } @@ -488,7 +494,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); if (unlikely(!comp_ctx)) { - ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n"); + ena_trc_err(admin_queue->ena_dev, + "comp_ctx is NULL. Changing the admin queue running state\n"); admin_queue->running_state = false; return; } @@ -540,10 +547,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu admin_queue->stats.completed_cmd += comp_num; } -static int ena_com_comp_status_to_errno(u8 comp_status) +static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, + u8 comp_status) { if (unlikely(comp_status != 0)) - ena_trc_err("admin command failed[%u]\n", comp_status); + ena_trc_err(admin_queue->ena_dev, + "Admin command failed[%u]\n", comp_status); switch (comp_status) { case ENA_ADMIN_SUCCESS: @@ -557,15 +566,17 @@ static int ena_com_comp_status_to_errno(u8 comp_status) case ENA_ADMIN_ILLEGAL_PARAMETER: case ENA_ADMIN_UNKNOWN_ERROR: return ENA_COM_INVAL; + case ENA_ADMIN_RESOURCE_BUSY: + return ENA_COM_TRY_AGAIN; } return ENA_COM_INVAL; } -static inline void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) +static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) { - delay_us = ENA_MAX32(ENA_MIN_POLL_US, delay_us); - delay_us = ENA_MIN32(delay_us * (1 << exp), ENA_MAX_POLL_US); + delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); + delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); ENA_USLEEP(delay_us); } @@ -588,7 +599,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c break; if (ENA_TIME_EXPIRE(timeout)) { - ena_trc_err("Wait for completion (polling) timeout\n"); + ena_trc_err(admin_queue->ena_dev, + "Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); admin_queue->stats.no_completion++; @@ -599,11 +611,12 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - ena_delay_exponential_backoff_us(exp++, admin_queue->ena_dev->ena_min_poll_delay_us); + ena_delay_exponential_backoff_us(exp++, + admin_queue->ena_dev->ena_min_poll_delay_us); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { - ena_trc_err("Command was aborted\n"); + ena_trc_err(admin_queue->ena_dev, "Command was aborted\n"); ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags); admin_queue->stats.aborted_cmd++; ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); @@ -612,15 +625,16 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c } ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED, - "Invalid comp status %d\n", comp_ctx->status); + admin_queue->ena_dev, "Invalid comp status %d\n", + comp_ctx->status); - ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); + ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; } -/** +/* * Set the LLQ configurations of the firmware * * The driver provides only the enabled feature values to the device, @@ -645,13 +659,9 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; - if (llq_info->disable_meta_caching) - cmd.u.llq.accel_mode.u.set.enabled_flags |= - BIT(ENA_ADMIN_DISABLE_META_CACHING); - - if (llq_info->max_entries_in_tx_burst) - cmd.u.llq.accel_mode.u.set.enabled_flags |= - BIT(ENA_ADMIN_LIMIT_TX_BURST); + cmd.u.llq.accel_mode.u.set.enabled_flags = + BIT(ENA_ADMIN_DISABLE_META_CACHING) | + BIT(ENA_ADMIN_LIMIT_TX_BURST); ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, @@ -660,7 +670,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - ena_trc_err("Failed to set LLQ configurations: %d\n", ret); + ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret); return ret; } @@ -670,6 +680,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, struct ena_llq_configurations *llq_default_cfg) { struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + struct ena_admin_accel_mode_get llq_accel_mode_get; u16 supported_feat; int rc; @@ -681,7 +692,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->header_location_ctrl = llq_default_cfg->llq_header_location; } else { - ena_trc_err("Invalid header location control, supported: 0x%x\n", + ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n", supported_feat); return -EINVAL; } @@ -696,12 +707,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; } else { - ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n", + ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n", supported_feat); return -EINVAL; } - ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_stride_ctrl, supported_feat, llq_info->desc_stride_ctrl); @@ -725,11 +736,12 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; llq_info->desc_list_entry_size = 256; } else { - ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat); + ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n", + supported_feat); return -EINVAL; } - ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_ring_entry_size, supported_feat, llq_info->desc_list_entry_size); @@ -738,7 +750,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, /* The desc list entry size should be whole multiply of 8 * This requirement comes from __iowrite64_copy() */ - ena_trc_err("illegal entry size %d\n", + ena_trc_err(ena_dev, "Illegal entry size %d\n", llq_info->desc_list_entry_size); return -EINVAL; } @@ -762,29 +774,31 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; } else { - ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n", + ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n", supported_feat); return -EINVAL; } - ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_num_decs_before_header, supported_feat, llq_info->descs_num_before_header); } /* Check for accelerated queue supported */ - llq_info->disable_meta_caching = - llq_features->accel_mode.u.get.supported_flags & - BIT(ENA_ADMIN_DISABLE_META_CACHING); + llq_accel_mode_get = llq_features->accel_mode.u.get; - if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) + llq_info->disable_meta_caching = + !!(llq_accel_mode_get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + + if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) llq_info->max_entries_in_tx_burst = - llq_features->accel_mode.u.get.max_tx_burst_size / + llq_accel_mode_get.max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value; rc = ena_com_set_llq(ena_dev); if (rc) - ena_trc_err("Cannot set LLQ configuration: %d\n", rc); + ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc); return rc; } @@ -810,13 +824,15 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); if (comp_ctx->status == ENA_CMD_COMPLETED) { - ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", + ena_trc_err(admin_queue->ena_dev, + "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); /* Check if fallback to polling is enabled */ if (admin_queue->auto_polling) admin_queue->polling = true; } else { - ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n", + ena_trc_err(admin_queue->ena_dev, + "The ena device didn't send a completion for the admin cmd %d status %d\n", comp_ctx->cmd_opcode, comp_ctx->status); } /* Check if shifted to polling mode. @@ -830,7 +846,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com } } - ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); + ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; @@ -878,7 +894,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) } if (unlikely(i == timeout)) { - ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", mmio_read->seq_num, offset, read_resp->req_id, @@ -888,7 +904,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) } if (read_resp->reg_off != offset) { - ena_trc_err("Read failure: wrong offset provided\n"); + ena_trc_err(ena_dev, "Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -947,7 +963,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) - ena_trc_err("failed to destroy io sq error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret); return ret; } @@ -1003,7 +1019,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { - ena_trc_err("Reg read timeout occurred\n"); + ena_trc_err(ena_dev, "Reg read timeout occurred\n"); return ENA_COM_TIMER_EXPIRED; } @@ -1043,7 +1059,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - ena_trc_dbg("Feature %d isn't supported\n", feature_id); + ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id); return ENA_COM_UNSUPPORTED; } @@ -1062,7 +1078,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, &get_cmd.control_buffer.address, control_buf_dma_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } @@ -1079,7 +1095,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - ena_trc_err("Failed to submit get_feature command %d error: %d\n", + ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n", feature_id, ret); return ret; @@ -1110,13 +1126,9 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key)); /* The key buffer is stored in the device in an array of - * uint32 elements. Therefore the number of elements can be derived - * by dividing the buffer length by the size of each array element. - * In current implementation each element is sized at uint32_t - * so it's actually a division by 4 but if the element size changes, - * there is no need to rewrite this code. + * uint32 elements. */ - hash_key->keys_num = sizeof(hash_key->key) / sizeof(hash_key->key[0]); + hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; } static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) @@ -1189,13 +1201,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, int ret; ret = ena_com_get_feature(ena_dev, &get_resp, - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0); if (unlikely(ret)) return ret; if ((get_resp.u.ind_table.min_size > log_size) || (get_resp.u.ind_table.max_size < log_size)) { - ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1 << log_size, 1 << get_resp.u.ind_table.min_size, 1 << get_resp.u.ind_table.max_size); @@ -1299,7 +1311,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, &create_cmd.sq_ba, io_sq->desc_addr.phys_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } } @@ -1310,7 +1322,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - ena_trc_err("Failed to create IO SQ. error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret); return ret; } @@ -1328,7 +1340,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, cmd_completion.llq_descriptors_offset); } - ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); return ret; } @@ -1362,7 +1374,7 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; if (unlikely(!intr_delay_resolution)) { - ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } @@ -1398,23 +1410,25 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, comp, comp_size); if (IS_ERR(comp_ctx)) { if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE)) - ena_trc_dbg("Failed to submit command [%ld]\n", + ena_trc_dbg(admin_queue->ena_dev, + "Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); else - ena_trc_err("Failed to submit command [%ld]\n", + ena_trc_err(admin_queue->ena_dev, + "Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); - return PTR_ERR(comp_ctx); + return (int)PTR_ERR(comp_ctx); } ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); if (unlikely(ret)) { if (admin_queue->running_state) - ena_trc_err("Failed to process command. ret = %d\n", - ret); + ena_trc_err(admin_queue->ena_dev, + "Failed to process command. ret = %d\n", ret); else - ena_trc_dbg("Failed to process command. ret = %d\n", - ret); + ena_trc_dbg(admin_queue->ena_dev, + "Failed to process command. ret = %d\n", ret); } return ret; } @@ -1443,7 +1457,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, &create_cmd.cq_ba, io_cq->cdesc_addr.phys_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } @@ -1453,7 +1467,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - ena_trc_err("Failed to create IO CQ. error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret); return ret; } @@ -1472,7 +1486,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.numa_node_register_offset); - ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); return ret; } @@ -1482,7 +1496,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, struct ena_com_io_cq **io_cq) { if (qid >= ENA_TOTAL_NUM_QUEUES) { - ena_trc_err("Invalid queue number %d but the max is %d\n", + ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n", qid, ENA_TOTAL_NUM_QUEUES); return ENA_COM_INVAL; } @@ -1548,7 +1562,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != ENA_COM_NO_DEVICE))) - ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret); return ret; } @@ -1572,7 +1586,7 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) { u16 depth = ena_dev->aenq.q_depth; - ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); + ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n"); /* Init head_db to mark that all entries in the queue * are initially available @@ -1590,12 +1604,12 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); if (ret) { - ena_trc_info("Can't get aenq configuration\n"); + ena_trc_info(ena_dev, "Can't get aenq configuration\n"); return ret; } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { - ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", + ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", get_resp.u.aenq.supported_groups, groups_flag); return ENA_COM_UNSUPPORTED; @@ -1616,7 +1630,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) sizeof(resp)); if (unlikely(ret)) - ena_trc_err("Failed to config AENQ ret: %d\n", ret); + ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret); return ret; } @@ -1624,20 +1638,20 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) int ena_com_get_dma_width(struct ena_com_dev *ena_dev) { u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); - int width; + u32 width; if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { - ena_trc_err("Reg read timeout occurred\n"); + ena_trc_err(ena_dev, "Reg read timeout occurred\n"); return ENA_COM_TIMER_EXPIRED; } width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; - ena_trc_dbg("ENA dma width: %d\n", width); + ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width); if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { - ena_trc_err("DMA width illegal value: %d\n", width); + ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width); return ENA_COM_INVAL; } @@ -1661,16 +1675,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { - ena_trc_err("Reg read timeout occurred\n"); + ena_trc_err(ena_dev, "Reg read timeout occurred\n"); return ENA_COM_TIMER_EXPIRED; } - ena_trc_info("ena device version: %d.%d\n", + ena_trc_info(ena_dev, "ENA device version: %d.%d\n", (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n", + ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) @@ -1686,13 +1700,29 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { - ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -1; } return 0; } +static void +ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev, + struct ena_com_admin_queue *admin_queue) + +{ + if (!admin_queue->comp_ctx) + return; + + ENA_WAIT_EVENTS_DESTROY(admin_queue); + ENA_MEM_FREE(ena_dev->dmadev, + admin_queue->comp_ctx, + (admin_queue->q_depth * sizeof(struct ena_comp_ctx))); + + admin_queue->comp_ctx = NULL; +} + void ena_com_admin_destroy(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; @@ -1701,12 +1731,8 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) struct ena_com_aenq *aenq = &ena_dev->aenq; u16 size; - ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event); - if (admin_queue->comp_ctx) - ENA_MEM_FREE(ena_dev->dmadev, - admin_queue->comp_ctx, - (admin_queue->q_depth * sizeof(struct ena_comp_ctx))); - admin_queue->comp_ctx = NULL; + ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue); + size = ADMIN_SQ_SIZE(admin_queue->q_depth); if (sq->entries) ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries, @@ -1822,12 +1848,12 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { - ena_trc_err("Reg read timeout occurred\n"); + ena_trc_err(ena_dev, "Reg read timeout occurred\n"); return ENA_COM_TIMER_EXPIRED; } if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { - ena_trc_err("Device isn't ready, abort com init\n"); + ena_trc_err(ena_dev, "Device isn't ready, abort com init\n"); return ENA_COM_NO_DEVICE; } @@ -1905,7 +1931,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { - ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", + ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n", ctx->qid, ENA_TOTAL_NUM_QUEUES); return ENA_COM_INVAL; } @@ -1964,7 +1990,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) struct ena_com_io_cq *io_cq; if (qid >= ENA_TOTAL_NUM_QUEUES) { - ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", + ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n", qid, ENA_TOTAL_NUM_QUEUES); return; } @@ -1997,6 +2023,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, sizeof(get_resp.u.dev_attr)); + ena_dev->supported_features = get_resp.u.dev_attr.supported_features; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { @@ -2063,17 +2090,6 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, else return rc; - rc = ena_com_get_feature(ena_dev, &get_resp, - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0); - if (!rc) - memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table, - sizeof(get_resp.u.ind_table)); - else if (rc == ENA_COM_UNSUPPORTED) - memset(&get_feat_ctx->ind_table, 0x0, - sizeof(get_feat_ctx->ind_table)); - else - return rc; - return 0; } @@ -2085,10 +2101,10 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) /* ena_handle_specific_aenq_event: * return the handler that is relevant to the specific event group */ -static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, +static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev, u16 group) { - struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; + struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) return aenq_handlers->handlers[group]; @@ -2100,11 +2116,11 @@ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, * handles the aenq incoming events. * pop events from the queue and apply the specific handler */ -void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) +void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) { struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; - struct ena_com_aenq *aenq = &dev->aenq; + struct ena_com_aenq *aenq = &ena_dev->aenq; u64 timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; @@ -2125,13 +2141,14 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) timestamp = (u64)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32); - ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n", + + ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n", aenq_common->group, - aenq_common->syndrom, + aenq_common->syndrome, timestamp); /* Handle specific event*/ - handler_cb = ena_com_get_specific_aenq_cb(dev, + handler_cb = ena_com_get_specific_aenq_cb(ena_dev, aenq_common->group); handler_cb(data, aenq_e); /* call the actual event handler*/ @@ -2156,8 +2173,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head, - dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head, + ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); mmiowb(); } #ifdef ENA_EXTENDED_STATS @@ -2193,19 +2210,19 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { - ena_trc_err("Reg read32 timeout occurred\n"); + ena_trc_err(ena_dev, "Reg read32 timeout occurred\n"); return ENA_COM_TIMER_EXPIRED; } if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { - ena_trc_err("Device isn't ready, can't reset device\n"); + ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n"); return ENA_COM_INVAL; } timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; if (timeout == 0) { - ena_trc_err("Invalid timeout value\n"); + ena_trc_err(ena_dev, "Invalid timeout value\n"); return ENA_COM_INVAL; } @@ -2221,7 +2238,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, rc = wait_for_reset_state(ena_dev, timeout, ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (rc != 0) { - ena_trc_err("Reset indication didn't turn on\n"); + ena_trc_err(ena_dev, "Reset indication didn't turn on\n"); return rc; } @@ -2229,7 +2246,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { - ena_trc_err("Reset indication didn't turn off\n"); + ena_trc_err(ena_dev, "Reset indication didn't turn off\n"); return rc; } @@ -2266,7 +2283,22 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - ena_trc_err("Failed to get stats. error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret); + + return ret; +} + +int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, + struct ena_admin_eni_stats *stats) +{ + struct ena_com_stats_ctx ctx; + int ret; + + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); + if (likely(ret == 0)) + memcpy(stats, &ctx.get_resp.u.eni_stats, + sizeof(ctx.get_resp.u.eni_stats)); return ret; } @@ -2280,8 +2312,8 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, memset(&ctx, 0x0, sizeof(ctx)); ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); if (likely(ret == 0)) - memcpy(stats, &ctx.get_resp.basic_stats, - sizeof(ctx.get_resp.basic_stats)); + memcpy(stats, &ctx.get_resp.u.basic_stats, + sizeof(ctx.get_resp.u.basic_stats)); return ret; } @@ -2308,7 +2340,7 @@ int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff, &get_cmd->u.control_buffer.address, phys_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); goto free_ext_stats_mem; } get_cmd->u.control_buffer.length = len; @@ -2339,7 +2371,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU); + ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU); return ENA_COM_UNSUPPORTED; } @@ -2349,7 +2381,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = 0; cmd.feat_common.feature_id = ENA_ADMIN_MTU; - cmd.u.mtu.mtu = mtu; + cmd.u.mtu.mtu = (u32)mtu; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, @@ -2358,7 +2390,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) sizeof(resp)); if (unlikely(ret)) - ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret); + ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret); return ret; } @@ -2372,7 +2404,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, ret = ena_com_get_feature(ena_dev, &resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (unlikely(ret)) { - ena_trc_err("Failed to get offload capabilities %d\n", ret); + ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret); return ret; } @@ -2392,7 +2424,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { - ena_trc_dbg("Feature %d isn't supported\n", + ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_FUNCTION); return ENA_COM_UNSUPPORTED; } @@ -2404,7 +2436,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) return ret; if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { - ena_trc_err("Func hash %d isn't supported by device, abort\n", + ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n", rss->hash_func); return ENA_COM_UNSUPPORTED; } @@ -2422,7 +2454,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->hash_key_dma_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } @@ -2434,7 +2466,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) { - ena_trc_err("Failed to set hash function %d. error: %d\n", + ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n", rss->hash_func, ret); return ENA_COM_INVAL; } @@ -2466,7 +2498,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return rc; if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { - ena_trc_err("Flow hash function %d isn't supported\n", func); + ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func); return ENA_COM_UNSUPPORTED; } @@ -2474,20 +2506,20 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, case ENA_ADMIN_TOEPLITZ: if (key) { if (key_len != sizeof(hash_key->key)) { - ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n", + ena_trc_err(ena_dev, "key len (%hu) doesn't equal the supported size (%zu)\n", key_len, sizeof(hash_key->key)); return ENA_COM_INVAL; } memcpy(hash_key->key, key, key_len); rss->hash_init_val = init_val; - hash_key->keys_num = key_len / sizeof(hash_key->key[0]); + hash_key->key_parts = key_len / sizeof(hash_key->key[0]); } break; case ENA_ADMIN_CRC32: rss->hash_init_val = init_val; break; default: - ena_trc_err("Invalid hash function (%d)\n", func); + ena_trc_err(ena_dev, "Invalid hash function (%d)\n", func); return ENA_COM_INVAL; } @@ -2535,7 +2567,8 @@ int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) ena_dev->rss.hash_key; if (key) - memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); + memcpy(key, hash_key->key, + (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); return 0; } @@ -2572,7 +2605,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { - ena_trc_dbg("Feature %d isn't supported\n", + ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); return ENA_COM_UNSUPPORTED; } @@ -2591,7 +2624,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->hash_ctrl_dma_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } cmd.control_buffer.length = sizeof(*hash_ctrl); @@ -2602,7 +2635,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) - ena_trc_err("Failed to set hash input. error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret); return ret; } @@ -2652,7 +2685,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) available_fields = hash_ctrl->selected_fields[i].fields & hash_ctrl->supported_fields[i].fields; if (available_fields != hash_ctrl->selected_fields[i].fields) { - ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", i, hash_ctrl->supported_fields[i].fields, hash_ctrl->selected_fields[i].fields); return ENA_COM_UNSUPPORTED; @@ -2678,7 +2711,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, int rc; if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { - ena_trc_err("Invalid proto num (%u)\n", proto); + ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto); return ENA_COM_INVAL; } @@ -2690,7 +2723,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, /* Make sure all the fields are supported */ supported_fields = hash_ctrl->supported_fields[proto].fields; if ((hash_fields & supported_fields) != hash_fields) { - ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n", + ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n", proto, hash_fields, supported_fields); } @@ -2730,15 +2763,15 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) int ret; if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { - ena_trc_dbg("Feature %d isn't supported\n", - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { + ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); return ENA_COM_UNSUPPORTED; } ret = ena_com_ind_tbl_convert_to_device(ena_dev); if (ret) { - ena_trc_err("Failed to convert host indirection table to device table\n"); + ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n"); return ret; } @@ -2747,7 +2780,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; - cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG; cmd.u.ind_table.size = rss->tbl_log_size; cmd.u.ind_table.inline_index = 0xFFFFFFFF; @@ -2755,11 +2788,11 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->rss_ind_tbl_dma_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } - cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * + cmd.control_buffer.length = (u32)(1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); ret = ena_com_execute_admin_command(admin_queue, @@ -2769,7 +2802,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - ena_trc_err("Failed to set indirect table. error: %d\n", ret); + ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret); return ret; } @@ -2781,11 +2814,11 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) u32 tbl_size; int i, rc; - tbl_size = (1ULL << rss->tbl_log_size) * + tbl_size = (u32)(1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); rc = ena_com_get_feature_ex(ena_dev, &get_resp, - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, rss->rss_ind_tbl_dma_addr, tbl_size, 0); if (unlikely(rc)) @@ -2934,7 +2967,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) &cmd.u.host_attr.debug_ba, host_attr->debug_area_dma_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } @@ -2942,7 +2975,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) &cmd.u.host_attr.os_info_ba, host_attr->host_info_dma_addr); if (unlikely(ret)) { - ena_trc_err("memory address set failed\n"); + ena_trc_err(ena_dev, "Memory address set failed\n"); return ret; } @@ -2955,7 +2988,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - ena_trc_err("Failed to set host attributes: %d\n", ret); + ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret); return ret; } @@ -2967,12 +3000,13 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) ENA_ADMIN_INTERRUPT_MODERATION); } -static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, +static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev, + u32 coalesce_usecs, u32 intr_delay_resolution, u32 *intr_moder_interval) { if (!intr_delay_resolution) { - ena_trc_err("Illegal interrupt delay granularity value\n"); + ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n"); return ENA_COM_FAULT; } @@ -2981,11 +3015,11 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, return 0; } - int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, u32 tx_coalesce_usecs) { - return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, + return ena_com_update_nonadaptive_moderation_interval(ena_dev, + tx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_tx_interval); } @@ -2993,7 +3027,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, u32 rx_coalesce_usecs) { - return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, + return ena_com_update_nonadaptive_moderation_interval(ena_dev, + rx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_rx_interval); } @@ -3009,12 +3044,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == ENA_COM_UNSUPPORTED) { - ena_trc_dbg("Feature %d isn't supported\n", + ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { - ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n", - rc); + ena_trc_err(ena_dev, + "Failed to get interrupt moderation admin cmd. rc: %d\n", rc); } /* no moderation supported, disable adaptive support */ @@ -3062,7 +3097,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); if (unlikely(ena_dev->tx_max_header_size == 0)) { - ena_trc_err("the size of the LLQ entry is smaller than needed\n"); + ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n"); return -EINVAL; } diff --git a/sys/contrib/ena-com/ena_com.h b/sys/contrib/ena-com/ena_com.h index b94728310fc9..414301bdaf91 100644 --- a/sys/contrib/ena-com/ena_com.h +++ b/sys/contrib/ena-com/ena_com.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -328,6 +328,7 @@ struct ena_com_dev { void __iomem *mem_bar; void *dmadev; void *bus; + ena_netdev *net_device; enum ena_admin_placement_policy_type tx_mem_queue_type; u32 tx_max_header_size; @@ -365,7 +366,6 @@ struct ena_com_dev_get_features_ctx { struct ena_admin_feature_offload_desc offload; struct ena_admin_ena_hw_hints hw_hints; struct ena_admin_feature_llq_desc llq; - struct ena_admin_feature_rss_ind_table ind_table; }; struct ena_com_create_io_ctx { @@ -550,7 +550,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); * This method goes over the async event notification queue and calls the proper * aenq handler. */ -void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); +void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data); /* ena_com_abort_admin_commands - Abort all the outstanding admin commands. * @ena_dev: ENA communication layer struct @@ -630,6 +630,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats); +/* ena_com_get_eni_stats - Get extended network interface statistics + * @ena_dev: ENA communication layer struct + * @stats: stats return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, + struct ena_admin_eni_stats *stats); + /* ena_com_set_dev_mtu - Configure the device mtu. * @ena_dev: ENA communication layer struct * @mtu: mtu value @@ -963,6 +972,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq_features, struct ena_llq_configurations *llq_default_config); +/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq. + * @io_sq: IO submit queue struct + * + * @return - ena_com_dev struct extracted from io_sq + */ +static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq) +{ + return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]); +} + +/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq. + * @io_sq: IO submit queue struct + * + * @return - ena_com_dev struct extracted from io_sq + */ +static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq) +{ + return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]); +} + static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; diff --git a/sys/contrib/ena-com/ena_defs/ena_admin_defs.h b/sys/contrib/ena-com/ena_defs/ena_admin_defs.h index 52cdb9e5e394..edfdad3473d7 100644 --- a/sys/contrib/ena-com/ena_defs/ena_admin_defs.h +++ b/sys/contrib/ena-com/ena_defs/ena_admin_defs.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -36,6 +36,8 @@ #define ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN 32 #define ENA_ADMIN_EXTRA_PROPERTIES_COUNT 32 +#define ENA_ADMIN_RSS_KEY_PARTS 10 + enum ena_admin_aq_opcode { ENA_ADMIN_CREATE_SQ = 1, ENA_ADMIN_DESTROY_SQ = 2, @@ -58,6 +60,7 @@ enum ena_admin_aq_completion_status { ENA_ADMIN_RESOURCE_BUSY = 7, }; +/* subcommands for the set/get feature admin commands */ enum ena_admin_aq_feature_id { ENA_ADMIN_DEVICE_ATTRIBUTES = 1, ENA_ADMIN_MAX_QUEUES_NUM = 2, @@ -68,7 +71,7 @@ enum ena_admin_aq_feature_id { ENA_ADMIN_MAX_QUEUES_EXT = 7, ENA_ADMIN_RSS_HASH_FUNCTION = 10, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG = 12, ENA_ADMIN_MTU = 14, ENA_ADMIN_RSS_HASH_INPUT = 18, ENA_ADMIN_INTERRUPT_MODERATION = 20, @@ -122,6 +125,8 @@ enum ena_admin_completion_policy_type { enum ena_admin_get_stats_type { ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, + /* extra HW stats for specific network interface */ + ENA_ADMIN_GET_STATS_TYPE_ENI = 2, }; enum ena_admin_get_stats_scope { @@ -198,7 +203,7 @@ struct ena_admin_acq_common_desc { uint16_t extended_status; /* indicates to the driver which AQ entry has been consumed by the - * device and could be reused + * device and could be reused */ uint16_t sq_head_indx; }; @@ -243,8 +248,8 @@ struct ena_admin_aq_create_sq_cmd { */ uint8_t sq_caps_3; - /* associated completion queue id. This CQ must be created prior to - * SQ creation + /* associated completion queue id. This CQ must be created prior to SQ + * creation */ uint16_t cq_idx; @@ -383,7 +388,7 @@ struct ena_admin_aq_get_stats_cmd { uint16_t queue_idx; /* device id, value 0xFFFF means mine. only privileged device can get - * stats of other device + * stats of other device */ uint16_t device_id; }; @@ -415,10 +420,43 @@ struct ena_admin_basic_stats { uint32_t tx_drops_high; }; +/* ENI Statistics Command. */ +struct ena_admin_eni_stats { + /* The number of packets shaped due to inbound aggregate BW + * allowance being exceeded + */ + uint64_t bw_in_allowance_exceeded; + + /* The number of packets shaped due to outbound aggregate BW + * allowance being exceeded + */ + uint64_t bw_out_allowance_exceeded; + + /* The number of packets shaped due to PPS allowance being exceeded */ + uint64_t pps_allowance_exceeded; + + /* The number of packets shaped due to connection tracking + * allowance being exceeded and leading to failure in establishment + * of new connections + */ + uint64_t conntrack_allowance_exceeded; + + /* The number of packets shaped due to linklocal packet rate + * allowance being exceeded + */ + uint64_t linklocal_allowance_exceeded; +}; + struct ena_admin_acq_get_stats_resp { struct ena_admin_acq_common_desc acq_common_desc; - struct ena_admin_basic_stats basic_stats; + union { + uint64_t raw[7]; + + struct ena_admin_basic_stats basic_stats; + + struct ena_admin_eni_stats eni_stats; + } u; }; struct ena_admin_get_set_feature_common_desc { @@ -432,8 +470,8 @@ struct ena_admin_get_set_feature_common_desc { uint8_t feature_id; /* The driver specifies the max feature version it supports and the - * device responds with the currently supported feature version. The - * field is zero based + * device responds with the currently supported feature version. The + * field is zero based */ uint8_t feature_version; @@ -445,7 +483,9 @@ struct ena_admin_device_attr_feature_desc { uint32_t device_version; - /* bitmap of ena_admin_aq_feature_id */ + /* bitmap of ena_admin_aq_feature_id, which represents supported + * subcommands for the set/get feature admin commands. + */ uint32_t supported_features; uint32_t reserved3; @@ -531,32 +571,30 @@ struct ena_admin_feature_llq_desc { uint32_t max_llq_depth; - /* specify the header locations the device supports. bitfield of - * enum ena_admin_llq_header_location. + /* specify the header locations the device supports. bitfield of enum + * ena_admin_llq_header_location. */ uint16_t header_location_ctrl_supported; /* the header location the driver selected to use. */ uint16_t header_location_ctrl_enabled; - /* if inline header is specified - this is the size of descriptor - * list entry. If header in a separate ring is specified - this is - * the size of header ring entry. bitfield of enum - * ena_admin_llq_ring_entry_size. specify the entry sizes the device - * supports + /* if inline header is specified - this is the size of descriptor list + * entry. If header in a separate ring is specified - this is the size + * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size. + * specify the entry sizes the device supports */ uint16_t entry_size_ctrl_supported; /* the entry size the driver selected to use. */ uint16_t entry_size_ctrl_enabled; - /* valid only if inline header is specified. First entry associated - * with the packet includes descriptors and header. Rest of the - * entries occupied by descriptors. This parameter defines the max - * number of descriptors precedding the header in the first entry. - * The field is bitfield of enum - * ena_admin_llq_num_descs_before_header and specify the values the - * device supports + /* valid only if inline header is specified. First entry associated with + * the packet includes descriptors and header. Rest of the entries + * occupied by descriptors. This parameter defines the max number of + * descriptors precedding the header in the first entry. The field is + * bitfield of enum ena_admin_llq_num_descs_before_header and specify + * the values the device supports */ uint16_t desc_num_before_header_supported; @@ -564,7 +602,7 @@ struct ena_admin_feature_llq_desc { uint16_t desc_num_before_header_enabled; /* valid only if inline was chosen. bitfield of enum - * ena_admin_llq_stride_ctrl + * ena_admin_llq_stride_ctrl */ uint16_t descriptors_stride_ctrl_supported; @@ -574,8 +612,8 @@ struct ena_admin_feature_llq_desc { /* reserved */ uint32_t reserved1; - /* accelerated low latency queues requirment. driver needs to - * support those requirments in order to use accelerated llq + /* accelerated low latency queues requirement. driver needs to + * support those requirements in order to use accelerated llq */ struct ena_admin_accel_mode_req accel_mode; }; @@ -599,8 +637,8 @@ struct ena_admin_queue_ext_feature_fields { uint32_t max_tx_header_size; - /* Maximum Descriptors number, including meta descriptor, allowed for - * a single Tx packet + /* Maximum Descriptors number, including meta descriptor, allowed for a + * single Tx packet */ uint16_t max_per_packet_tx_descs; @@ -623,8 +661,8 @@ struct ena_admin_queue_feature_desc { uint32_t max_header_size; - /* Maximum Descriptors number, including meta descriptor, allowed for - * a single Tx packet + /* Maximum Descriptors number, including meta descriptor, allowed for a + * single Tx packet */ uint16_t max_packet_tx_descs; @@ -720,11 +758,11 @@ enum ena_admin_hash_functions { }; struct ena_admin_feature_rss_flow_hash_control { - uint32_t keys_num; + uint32_t key_parts; uint32_t reserved; - uint32_t key[10]; + uint32_t key[ENA_ADMIN_RSS_KEY_PARTS]; }; struct ena_admin_feature_rss_flow_hash_function { @@ -859,11 +897,12 @@ struct ena_admin_host_info { uint16_t reserved; - /* 0 : mutable_rss_table_size + /* 0 : reserved * 1 : rx_offset * 2 : interrupt_moderation - * 3 : map_rx_buf_bidirectional - * 31:4 : reserved + * 3 : rx_buf_mirroring + * 4 : rss_configurable_function_key + * 31:5 : reserved */ uint32_t driver_supported_features; }; @@ -945,7 +984,7 @@ struct ena_admin_queue_ext_feature_desc { struct ena_admin_queue_ext_feature_fields max_queue_ext; uint32_t raw[10]; - } ; + }; }; struct ena_admin_get_feat_resp { @@ -1028,7 +1067,7 @@ struct ena_admin_set_feat_resp { struct ena_admin_aenq_common_desc { uint16_t group; - uint16_t syndrom; + uint16_t syndrome; /* 0 : phase * 7:1 : reserved - MBZ @@ -1052,7 +1091,7 @@ enum ena_admin_aenq_group { ENA_ADMIN_AENQ_GROUPS_NUM = 5, }; -enum ena_admin_aenq_notification_syndrom { +enum ena_admin_aenq_notification_syndrome { ENA_ADMIN_SUSPEND = 0, ENA_ADMIN_RESUME = 1, ENA_ADMIN_UPDATE_HINTS = 2, @@ -1181,13 +1220,14 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) #define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 #define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) -#define ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK BIT(0) #define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1 #define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) -#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT 3 -#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK BIT(3) +#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3) +#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4 +#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4) /* feature_rss_ind_table */ #define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0) @@ -1609,16 +1649,6 @@ static inline void set_ena_admin_host_info_bus(struct ena_admin_host_info *p, ui p->bdf |= (val << ENA_ADMIN_HOST_INFO_BUS_SHIFT) & ENA_ADMIN_HOST_INFO_BUS_MASK; } -static inline uint32_t get_ena_admin_host_info_mutable_rss_table_size(const struct ena_admin_host_info *p) -{ - return p->driver_supported_features & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK; -} - -static inline void set_ena_admin_host_info_mutable_rss_table_size(struct ena_admin_host_info *p, uint32_t val) -{ - p->driver_supported_features |= val & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK; -} - static inline uint32_t get_ena_admin_host_info_rx_offset(const struct ena_admin_host_info *p) { return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK) >> ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT; @@ -1639,14 +1669,24 @@ static inline void set_ena_admin_host_info_interrupt_moderation(struct ena_admin p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT) & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; } -static inline uint32_t get_ena_admin_host_info_map_rx_buf_bidirectional(const struct ena_admin_host_info *p) +static inline uint32_t get_ena_admin_host_info_rx_buf_mirroring(const struct ena_admin_host_info *p) { - return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK) >> ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT; + return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK) >> ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT; } -static inline void set_ena_admin_host_info_map_rx_buf_bidirectional(struct ena_admin_host_info *p, uint32_t val) +static inline void set_ena_admin_host_info_rx_buf_mirroring(struct ena_admin_host_info *p, uint32_t val) { - p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT) & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK; + p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT) & ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK; +} + +static inline uint32_t get_ena_admin_host_info_rss_configurable_function_key(const struct ena_admin_host_info *p) +{ + return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK) >> ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT; +} + +static inline void set_ena_admin_host_info_rss_configurable_function_key(struct ena_admin_host_info *p, uint32_t val) +{ + p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT) & ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; } static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p) diff --git a/sys/contrib/ena-com/ena_defs/ena_common_defs.h b/sys/contrib/ena-com/ena_defs/ena_common_defs.h index 88b90d44a79a..ee49ff6e2776 100644 --- a/sys/contrib/ena-com/ena_defs/ena_common_defs.h +++ b/sys/contrib/ena-com/ena_defs/ena_common_defs.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. diff --git a/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h b/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h index 14f44d0d9a86..817375a947c5 100644 --- a/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h +++ b/sys/contrib/ena-com/ena_defs/ena_eth_io_defs.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. diff --git a/sys/contrib/ena-com/ena_defs/ena_gen_info.h b/sys/contrib/ena-com/ena_defs/ena_gen_info.h index 83ed024ae4cc..726750a67d4e 100644 --- a/sys/contrib/ena-com/ena_defs/ena_gen_info.h +++ b/sys/contrib/ena-com/ena_defs/ena_gen_info.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -30,5 +30,5 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#define ENA_GEN_DATE "Mon Apr 20 15:41:59 DST 2020" -#define ENA_GEN_COMMIT "daa45ac" +#define ENA_GEN_DATE "Fri Sep 18 17:09:00 IDT 2020" +#define ENA_GEN_COMMIT "0f80d82" diff --git a/sys/contrib/ena-com/ena_defs/ena_regs_defs.h b/sys/contrib/ena-com/ena_defs/ena_regs_defs.h index 53ac662b6189..bdd91ef2e026 100644 --- a/sys/contrib/ena-com/ena_defs/ena_regs_defs.h +++ b/sys/contrib/ena-com/ena_defs/ena_regs_defs.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. diff --git a/sys/contrib/ena-com/ena_eth_com.c b/sys/contrib/ena-com/ena_eth_com.c index 58ddb82246fd..47ca4e4afdb6 100644 --- a/sys/contrib/ena-com/ena_eth_com.c +++ b/sys/contrib/ena-com/ena_eth_com.c @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -85,12 +85,14 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, if (is_llq_max_tx_burst_exists(io_sq)) { if (unlikely(!io_sq->entries_in_tx_burst_left)) { - ena_trc_err("Error: trying to send more packets than tx burst allows\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Error: trying to send more packets than tx burst allows\n"); return ENA_COM_NO_SPACE; } io_sq->entries_in_tx_burst_left--; - ena_trc_dbg("decreasing entries_in_tx_burst_left of queue %d to %d\n", + ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), + "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid, io_sq->entries_in_tx_burst_left); } @@ -129,12 +131,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, llq_info->descs_num_before_header * io_sq->desc_entry_size; if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { - ena_trc_err("trying to write header larger than llq entry can accommodate\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Trying to write header larger than llq entry can accommodate\n"); return ENA_COM_FAULT; } if (unlikely(!bounce_buffer)) { - ena_trc_err("bounce buffer is NULL\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Bounce buffer is NULL\n"); return ENA_COM_FAULT; } @@ -152,7 +156,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) bounce_buffer = pkt_ctrl->curr_bounce_buf; if (unlikely(!bounce_buffer)) { - ena_trc_err("bounce buffer is NULL\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Bounce buffer is NULL\n"); return NULL; } @@ -177,7 +182,8 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) rc = ena_com_write_bounce_buffer_to_dev(io_sq, pkt_ctrl->curr_bounce_buf); if (unlikely(rc)) { - ena_trc_err("failed to write bounce buffer to device\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed to write bounce buffer to device\n"); return rc; } @@ -210,7 +216,8 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) rc = ena_com_write_bounce_buffer_to_dev(io_sq, pkt_ctrl->curr_bounce_buf); if (unlikely(rc)) { - ena_trc_err("failed to write bounce buffer to device\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed to write bounce buffer to device\n"); return rc; } @@ -280,7 +287,8 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; - ena_trc_dbg("ena q_id: %d packets were completed. first desc idx %u descs# %d\n", + ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), + "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", io_cq->qid, *first_cdesc_idx, count); } else { io_cq->cur_rx_pkt_cdesc_count += count; @@ -296,6 +304,9 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq, struct ena_eth_io_tx_meta_desc *meta_desc = NULL; meta_desc = get_sq_desc(io_sq); + if (unlikely(!meta_desc)) + return ENA_COM_FAULT; + memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; @@ -303,7 +314,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq, meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; /* bits 0-9 of the mss */ - meta_desc->word2 |= (ena_meta->mss << + meta_desc->word2 |= ((u32)ena_meta->mss << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; /* bits 10-13 of the mss */ @@ -313,7 +324,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq, /* Extended meta desc */ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; - meta_desc->len_ctrl |= (io_sq->phase << + meta_desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; @@ -326,7 +337,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq, ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; - meta_desc->word2 |= (ena_meta->l4_hdr_len << + meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; @@ -348,20 +359,23 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, *have_meta = true; return ena_com_create_meta(io_sq, ena_meta); - } else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { + } + + if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { *have_meta = true; /* Cache the meta desc */ memcpy(&io_sq->cached_tx_meta, ena_meta, sizeof(struct ena_com_tx_meta)); return ena_com_create_meta(io_sq, ena_meta); - } else { - *have_meta = false; - return ENA_COM_OK; } + + *have_meta = false; + return ENA_COM_OK; } -static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, - struct ena_eth_io_rx_cdesc_base *cdesc) +static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, + struct ena_com_rx_ctx *ena_rx_ctx, + struct ena_eth_io_rx_cdesc_base *cdesc) { ena_rx_ctx->l3_proto = cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; @@ -382,7 +396,8 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; - ena_trc_dbg("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n", + ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), + "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err, @@ -411,23 +426,26 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, u64 addr_hi; ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, - "wrong Q type"); + ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type"); /* num_bufs +1 for potential meta desc */ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { - ena_trc_dbg("Not enough space in the tx queue\n"); + ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), + "Not enough space in the tx queue\n"); return ENA_COM_NO_MEM; } if (unlikely(header_len > io_sq->tx_max_header_size)) { - ena_trc_err("header size is too large %d max header: %d\n", + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Header size is too large %d max header: %d\n", header_len, io_sq->tx_max_header_size); return ENA_COM_INVAL; } if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) { - ena_trc_err("push header wasn't provided on LLQ mode\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Push header wasn't provided on LLQ mode\n"); return ENA_COM_INVAL; } @@ -437,7 +455,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); if (unlikely(rc)) { - ena_trc_err("failed to create and store tx meta desc\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed to create and store tx meta desc\n"); return rc; } @@ -445,7 +464,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, if (unlikely(!num_bufs && !header_len)) { rc = ena_com_close_bounce_buffer(io_sq); if (rc) - ena_trc_err("failed to write buffers to LLQ\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed to write buffers to LLQ\n"); *nb_hw_desc = io_sq->tail - start_tail; return rc; } @@ -459,16 +479,16 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, if (!have_meta) desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; - desc->buff_addr_hi_hdr_sz |= (header_len << + desc->buff_addr_hi_hdr_sz |= ((u32)header_len << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; - desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK; desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; /* Bits 0-9 */ - desc->meta_ctrl |= (ena_tx_ctx->req_id << + desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; @@ -506,7 +526,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, if (likely(i != 0)) { rc = ena_com_sq_update_tail(io_sq); if (unlikely(rc)) { - ena_trc_err("failed to update sq tail\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed to update sq tail\n"); return rc; } @@ -516,7 +537,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); - desc->len_ctrl |= (io_sq->phase << + desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK; } @@ -538,13 +559,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, rc = ena_com_sq_update_tail(io_sq); if (unlikely(rc)) { - ena_trc_err("failed to update sq tail of the last descriptor\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed to update sq tail of the last descriptor\n"); return rc; } rc = ena_com_close_bounce_buffer(io_sq); if (rc) - ena_trc_err("failed when closing bounce buffer\n"); + ena_trc_err(ena_com_io_sq_to_ena_dev(io_sq), + "Failed when closing bounce buffer\n"); *nb_hw_desc = io_sq->tail - start_tail; return rc; @@ -556,12 +579,13 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, { struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; struct ena_eth_io_rx_cdesc_base *cdesc = NULL; + u16 q_depth = io_cq->q_depth; u16 cdesc_idx = 0; u16 nb_hw_desc; u16 i = 0; ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, - "wrong Q type"); + ena_com_io_cq_to_ena_dev(io_cq), "wrong Q type"); nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); if (nb_hw_desc == 0) { @@ -569,11 +593,13 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, return 0; } - ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n", + ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), + "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc); if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { - ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n", + ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq), + "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs); return ENA_COM_NO_SPACE; } @@ -582,21 +608,30 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ena_rx_ctx->pkt_offset = cdesc->offset; do { - ena_buf->len = cdesc->length; - ena_buf->req_id = cdesc->req_id; - ena_buf++; - } while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i))); + ena_buf[i].len = cdesc->length; + ena_buf[i].req_id = cdesc->req_id; + if (unlikely(ena_buf[i].req_id >= q_depth)) + return ENA_COM_EIO; + + if (++i >= nb_hw_desc) + break; + + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); + + } while (1); /* Update SQ head ptr */ io_sq->next_to_comp += nb_hw_desc; - ena_trc_dbg("[%s][QID#%d] Updating SQ head to: %d\n", __func__, + ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), + "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, io_sq->next_to_comp); /* Get rx flags from the last pkt */ - ena_com_rx_set_flags(ena_rx_ctx, cdesc); + ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); ena_rx_ctx->descs = nb_hw_desc; + return 0; } @@ -607,7 +642,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, struct ena_eth_io_rx_desc *desc; ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, - "wrong Q type"); + ena_com_io_sq_to_ena_dev(io_sq), "wrong Q type"); if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) return ENA_COM_NO_SPACE; @@ -621,12 +656,16 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->length = ena_buf->len; desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | - ENA_ETH_IO_RX_DESC_LAST_MASK | - (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | - ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + ENA_ETH_IO_RX_DESC_LAST_MASK | + ENA_ETH_IO_RX_DESC_COMP_REQ_MASK | + (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); desc->req_id = req_id; + ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), + "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", + __func__, io_sq->qid, req_id); + desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); diff --git a/sys/contrib/ena-com/ena_eth_com.h b/sys/contrib/ena-com/ena_eth_com.h index 4b91221ea093..85675bb004b9 100644 --- a/sys/contrib/ena-com/ena_eth_com.h +++ b/sys/contrib/ena-com/ena_eth_com.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -171,7 +171,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, llq_info->descs_per_entry); } - ena_trc_dbg("queue: %d num_descs: %d num_entries_needed: %d\n", + ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), + "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs, num_entries_needed); return num_entries_needed > io_sq->entries_in_tx_burst_left; @@ -182,14 +183,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; u16 tail = io_sq->tail; - ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n", + ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), + "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr); if (is_llq_max_tx_burst_exists(io_sq)) { - ena_trc_dbg("reset available entries in tx burst for queue %d to %d\n", - io_sq->qid, max_entries_in_tx_burst); + ena_trc_dbg(ena_com_io_sq_to_ena_dev(io_sq), + "Reset available entries in tx burst for queue %d to %d\n", + io_sq->qid, max_entries_in_tx_burst); io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; } @@ -207,7 +210,8 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); if (unlikely(need_update)) { - ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n", + ena_trc_dbg(ena_com_io_cq_to_ena_dev(io_cq), + "Write completion queue doorbell for queue %d: head: %d\n", io_cq->qid, head); ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg); io_cq->last_head_update = head; @@ -271,7 +275,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, *req_id = READ_ONCE16(cdesc->req_id); if (unlikely(*req_id >= io_cq->q_depth)) { - ena_trc_err("Invalid req id %d\n", cdesc->req_id); + ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq), + "Invalid req id %d\n", cdesc->req_id); return ENA_COM_INVAL; } diff --git a/sys/contrib/ena-com/ena_plat.h b/sys/contrib/ena-com/ena_plat.h index f096a5ff88d1..8fe1ec9aa731 100644 --- a/sys/contrib/ena-com/ena_plat.h +++ b/sys/contrib/ena-com/ena_plat.h @@ -1,5 +1,5 @@ /*- - * BSD LICENSE + * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates. * All rights reserved. @@ -110,22 +110,33 @@ extern struct ena_bus_space ebs; extern int ena_log_level; -#define ena_trace_raw(level, fmt, args...) \ +#define container_of(ptr, type, member) \ + ({ \ + const __typeof(((type *)0)->member) *__p = (ptr); \ + (type *)((uintptr_t)__p - offsetof(type, member)); \ + }) + +#define ena_trace_raw(ctx, level, fmt, args...) \ do { \ + ((void)(ctx)); \ if (((level) & ena_log_level) != (level)) \ break; \ printf(fmt, ##args); \ } while (0) -#define ena_trace(level, fmt, args...) \ - ena_trace_raw(level, "%s() [TID:%d]: " \ +#define ena_trace(ctx, level, fmt, args...) \ + ena_trace_raw(ctx, level, "%s() [TID:%d]: " \ fmt, __func__, curthread->td_tid, ##args) -#define ena_trc_dbg(format, arg...) ena_trace(ENA_DBG, format, ##arg) -#define ena_trc_info(format, arg...) ena_trace(ENA_INFO, format, ##arg) -#define ena_trc_warn(format, arg...) ena_trace(ENA_WARNING, format, ##arg) -#define ena_trc_err(format, arg...) ena_trace(ENA_ALERT, format, ##arg) +#define ena_trc_dbg(ctx, format, arg...) \ + ena_trace(ctx, ENA_DBG, format, ##arg) +#define ena_trc_info(ctx, format, arg...) \ + ena_trace(ctx, ENA_INFO, format, ##arg) +#define ena_trc_warn(ctx, format, arg...) \ + ena_trace(ctx, ENA_WARNING, format, ##arg) +#define ena_trc_err(ctx, format, arg...) \ + ena_trace(ctx, ENA_ALERT, format, ##arg) #define unlikely(x) __predict_false(!!(x)) #define likely(x) __predict_true(!!(x)) @@ -136,19 +147,10 @@ extern int ena_log_level; #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) unlikely((x) <= (unsigned long)MAX_ERRNO) -#define ENA_ASSERT(cond, format, arg...) \ - do { \ - if (unlikely(!(cond))) { \ - ena_trc_err( \ - "Assert failed on %s:%s:%d:" format, \ - __FILE__, __func__, __LINE__, ##arg); \ - } \ - } while (0) - -#define ENA_WARN(cond, format, arg...) \ +#define ENA_WARN(cond, ctx, format, arg...) \ do { \ if (unlikely((cond))) { \ - ena_trc_warn(format, ##arg); \ + ena_trc_warn(ctx, format, ##arg); \ } \ } while (0) @@ -187,6 +189,7 @@ static inline long PTR_ERR(const void *ptr) #define ENA_COM_NO_DEVICE ENODEV #define ENA_COM_PERMISSION EPERM #define ENA_COM_TIMER_EXPIRED ETIMEDOUT +#define ENA_COM_EIO EIO #define ENA_MSLEEP(x) pause_sbt("ena", SBT_1MS * (x), SBT_1MS, 0) #define ENA_USLEEP(x) pause_sbt("ena", SBT_1US * (x), SBT_1US, 0) @@ -235,10 +238,17 @@ static inline long PTR_ERR(const void *ptr) cv_init(&((waitqueue).wq), "cv"); \ mtx_init(&((waitqueue).mtx), "wq", NULL, MTX_DEF); \ } while (0) -#define ENA_WAIT_EVENT_DESTROY(waitqueue) \ +#define ENA_WAIT_EVENTS_DESTROY(admin_queue) \ do { \ - cv_destroy(&((waitqueue).wq)); \ - mtx_destroy(&((waitqueue).mtx)); \ + struct ena_comp_ctx *comp_ctx; \ + int i; \ + for (i = 0; i < admin_queue->q_depth; i++) { \ + comp_ctx = get_comp_ctxt(admin_queue, i, false); \ + if (comp_ctx != NULL) { \ + cv_destroy(&((comp_ctx->wait_event).wq)); \ + mtx_destroy(&((comp_ctx->wait_event).mtx)); \ + } \ + } \ } while (0) #define ENA_WAIT_EVENT_CLEAR(waitqueue) \ cv_init(&((waitqueue).wq), (waitqueue).wq.cv_description) @@ -283,6 +293,7 @@ typedef uint32_t ena_atomic32_t; #define ENA_PRIu64 PRIu64 typedef uint64_t ena_time_t; +typedef struct ifnet ena_netdev; void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error); diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c index b2310391c596..4d853d968351 100644 --- a/sys/dev/ena/ena.c +++ b/sys/dev/ena/ena.c @@ -226,14 +226,14 @@ ena_dma_alloc(device_t dmadev, bus_size_t size, NULL, /* lockarg */ &dma->tag); if (unlikely(error != 0)) { - ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); + ena_trace(NULL, ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); goto fail_tag; } error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); if (unlikely(error != 0)) { - ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", + ena_trace(NULL, ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", (uintmax_t)size, error); goto fail_map_create; } @@ -242,7 +242,7 @@ ena_dma_alloc(device_t dmadev, bus_size_t size, error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, ena_dmamap_callback, &dma->paddr, mapflags); if (unlikely((error != 0) || (dma->paddr == 0))) { - ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); + ena_trace(NULL, ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); goto fail_map_load; } @@ -315,7 +315,7 @@ ena_probe(device_t dev) while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id)) { - ena_trace(ENA_DBG, "vendor=%x device=%x\n", + ena_trace(NULL, ENA_DBG, "vendor=%x device=%x\n", pci_vendor_id, pci_device_id); sprintf(adapter_name, DEVICE_DESC); @@ -345,7 +345,7 @@ ena_change_mtu(if_t ifp, int new_mtu) rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); if (likely(rc == 0)) { - ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); + ena_trace(NULL, ENA_DBG, "set MTU to %d\n", new_mtu); if_setmtu(ifp, new_mtu); } else { device_printf(adapter->pdev, "Failed to set MTU to %d\n", @@ -666,7 +666,7 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid) err = bus_dmamap_create(adapter->tx_buf_tag, 0, &tx_ring->tx_buffer_info[i].dmamap); if (unlikely(err != 0)) { - ena_trace(ENA_ALERT, + ena_trace(NULL, ENA_ALERT, "Unable to create Tx DMA map for buffer %d\n", i); goto err_map_release; @@ -679,7 +679,7 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid) err = bus_dmamap_create(adapter->tx_buf_tag, 0, &map[j]); if (unlikely(err != 0)) { - ena_trace(ENA_ALERT, "Unable to create " + ena_trace(NULL, ENA_ALERT, "Unable to create " "Tx DMA for buffer %d %d\n", i, j); goto err_map_release; } @@ -693,7 +693,7 @@ ena_setup_tx_resources(struct ena_adapter *adapter, int qid) tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); if (unlikely(tx_ring->enqueue_tq == NULL)) { - ena_trace(ENA_ALERT, + ena_trace(NULL, ENA_ALERT, "Unable to create taskqueue for enqueue task\n"); i = tx_ring->ring_size; goto err_map_release; @@ -878,7 +878,7 @@ ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) err = bus_dmamap_create(adapter->rx_buf_tag, 0, &(rx_ring->rx_buffer_info[i].map)); if (err != 0) { - ena_trace(ENA_ALERT, + ena_trace(NULL, ENA_ALERT, "Unable to create Rx DMA map for buffer %d\n", i); goto err_buf_info_unmap; } @@ -891,7 +891,7 @@ ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) device_printf(adapter->pdev, "LRO[%d] Initialization failed!\n", qid); } else { - ena_trace(ENA_INFO, + ena_trace(NULL, ENA_INFO, "RX Soft LRO[%d] Initialized\n", qid); rx_ring->lro.ifp = adapter->ifp; } @@ -1022,13 +1022,13 @@ ena_alloc_rx_mbuf(struct ena_adapter *adapter, rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; /* Map packets for DMA */ - ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, + ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (unlikely((error != 0) || (nsegs != 1))) { - ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " + ena_trace(NULL, ENA_WARNING, "failed to map mbuf, error: %d, " "nsegs: %d\n", error, nsegs); counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); goto exit; @@ -1041,7 +1041,7 @@ ena_alloc_rx_mbuf(struct ena_adapter *adapter, ena_buf->paddr = segs[0].ds_addr; ena_buf->len = mlen; - ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, + ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); @@ -1059,7 +1059,7 @@ ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, { if (rx_info->mbuf == NULL) { - ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); + ena_trace(NULL, ENA_WARNING, "Trying to free unallocated buffer\n"); return; } @@ -1084,7 +1084,7 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) uint32_t i; int rc; - ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n", + ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n", rx_ring->qid); next_to_use = rx_ring->next_to_use; @@ -1092,7 +1092,7 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) for (i = 0; i < num; i++) { struct ena_rx_buffer *rx_info; - ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, + ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC, "RX buffer - next to use: %d\n", next_to_use); req_id = rx_ring->free_rx_ids[next_to_use]; @@ -1104,7 +1104,7 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) #endif /* DEV_NETMAP */ rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); if (unlikely(rc != 0)) { - ena_trace(ENA_WARNING, + ena_trace(NULL, ENA_WARNING, "failed to alloc buffer for rx queue %d\n", rx_ring->qid); break; @@ -1112,7 +1112,7 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, &rx_info->ena_buf, req_id); if (unlikely(rc != 0)) { - ena_trace(ENA_WARNING, + ena_trace(NULL, ENA_WARNING, "failed to add buffer for rx queue %d\n", rx_ring->qid); break; @@ -1123,7 +1123,7 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) if (unlikely(i < num)) { counter_u64_add(rx_ring->rx_stats.refil_partial, 1); - ena_trace(ENA_WARNING, + ena_trace(NULL, ENA_WARNING, "refilled rx qid %d with only %d mbufs (from %d)\n", rx_ring->qid, i, num); } @@ -1332,7 +1332,7 @@ ena_refill_all_rx_bufs(struct ena_adapter *adapter) bufs_num = rx_ring->ring_size - 1; rc = ena_refill_rx_bufs(rx_ring, bufs_num); if (unlikely(rc != bufs_num)) - ena_trace(ENA_WARNING, "refilling Queue %d failed. " + ena_trace(NULL, ENA_WARNING, "refilling Queue %d failed. " "Allocated %d buffers from: %d\n", i, rc, bufs_num); #ifdef DEV_NETMAP rx_ring->initialized = true; @@ -1373,7 +1373,7 @@ ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) qid, i); print_once = false; } else { - ena_trace(ENA_DBG, + ena_trace(NULL, ENA_DBG, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); } @@ -1589,7 +1589,7 @@ ena_enable_msix(struct ena_adapter *adapter) adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), M_DEVBUF, M_WAITOK | M_ZERO); - ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); + ena_trace(NULL, ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); for (i = 0; i < msix_vecs; i++) { adapter->msix_entries[i].entry = i; @@ -1667,7 +1667,7 @@ ena_setup_io_intr(struct ena_adapter *adapter) adapter->irq_tbl[irq_idx].data = &adapter->que[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; - ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", + ena_trace(NULL, ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", adapter->msix_entries[irq_idx].vector); /* @@ -1717,7 +1717,7 @@ ena_request_mgmnt_irq(struct ena_adapter *adapter) return (rc); err_res_free: - ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", + ena_trace(NULL, ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", irq->vector); rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); @@ -1770,7 +1770,7 @@ ena_request_io_irq(struct ena_adapter *adapter) } irq->requested = true; - ena_trace(ENA_INFO, "queue %d - cpu %d\n", + ena_trace(NULL, ENA_INFO, "queue %d - cpu %d\n", i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); } @@ -1817,7 +1817,7 @@ ena_free_mgmnt_irq(struct ena_adapter *adapter) irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; if (irq->requested) { - ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", + ena_trace(NULL, ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) @@ -1827,7 +1827,7 @@ ena_free_mgmnt_irq(struct ena_adapter *adapter) } if (irq->res != NULL) { - ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", + ena_trace(NULL, ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); @@ -1847,7 +1847,7 @@ ena_free_io_irq(struct ena_adapter *adapter) for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested) { - ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", + ena_trace(NULL, ENA_INFO | ENA_IOQ, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); @@ -1859,7 +1859,7 @@ ena_free_io_irq(struct ena_adapter *adapter) } if (irq->res != NULL) { - ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", + ena_trace(NULL, ENA_INFO | ENA_IOQ, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); @@ -2006,21 +2006,21 @@ create_queues_with_size_backoff(struct ena_adapter *adapter) /* Allocate transmit descriptors */ rc = ena_setup_all_tx_resources(adapter); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "err_setup_tx\n"); + ena_trace(NULL, ENA_ALERT, "err_setup_tx\n"); goto err_setup_tx; } /* Allocate receive descriptors */ rc = ena_setup_all_rx_resources(adapter); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "err_setup_rx\n"); + ena_trace(NULL, ENA_ALERT, "err_setup_rx\n"); goto err_setup_rx; } /* Create IO queues for Rx & Tx */ rc = ena_create_io_queues(adapter); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, + ena_trace(NULL, ENA_ALERT, "create IO queues failed\n"); goto err_io_que; } @@ -2037,7 +2037,7 @@ create_queues_with_size_backoff(struct ena_adapter *adapter) * error straightaway. */ if (unlikely(rc != ENOMEM)) { - ena_trace(ENA_ALERT, + ena_trace(NULL, ENA_ALERT, "Queue creation failed with error code: %d\n", rc); return (rc); } @@ -2092,12 +2092,12 @@ ena_up(struct ena_adapter *adapter) /* setup interrupts for IO queues */ rc = ena_setup_io_intr(adapter); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "error setting up IO interrupt\n"); + ena_trace(NULL, ENA_ALERT, "error setting up IO interrupt\n"); goto error; } rc = ena_request_io_irq(adapter); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "err_req_irq\n"); + ena_trace(NULL, ENA_ALERT, "err_req_irq\n"); goto error; } @@ -2112,7 +2112,7 @@ ena_up(struct ena_adapter *adapter) rc = create_queues_with_size_backoff(adapter); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, + ena_trace(NULL, ENA_ALERT, "error creating queues with size backoff\n"); goto err_create_queues_with_backoff; } @@ -2194,7 +2194,7 @@ static void ena_media_status(if_t ifp, struct ifmediareq *ifmr) { struct ena_adapter *adapter = if_getsoftc(ifp); - ena_trace(ENA_DBG, "enter\n"); + ena_trace(NULL, ENA_DBG, "enter\n"); ENA_LOCK_LOCK(adapter); @@ -2203,7 +2203,7 @@ ena_media_status(if_t ifp, struct ifmediareq *ifmr) if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { ENA_LOCK_UNLOCK(adapter); - ena_trace(ENA_INFO, "Link is down\n"); + ena_trace(NULL, ENA_INFO, "Link is down\n"); return; } @@ -2397,7 +2397,7 @@ ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, ifp = adapter->ifp = if_gethandle(IFT_ETHER); if (unlikely(ifp == NULL)) { - ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); + ena_trace(NULL, ENA_ALERT, "can not allocate ifnet structure\n"); return (ENXIO); } if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); @@ -2534,7 +2534,7 @@ ena_enable_wc(struct resource *res) /* Enable write combining */ rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); + ena_trace(NULL, ENA_ALERT, "pmap_change_attr failed, %d\n", rc); return (rc); } @@ -2745,7 +2745,7 @@ ena_rss_init_default_deferred(void *arg) dc = devclass_find("ena"); if (unlikely(dc == NULL)) { - ena_trace(ENA_ALERT, "No devclass ena\n"); + ena_trace(NULL, ENA_ALERT, "No devclass ena\n"); return; } @@ -2776,7 +2776,7 @@ ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) /* Allocate only the host info */ rc = ena_com_allocate_host_info(ena_dev); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "Cannot allocate host info\n"); + ena_trace(NULL, ENA_ALERT, "Cannot allocate host info\n"); return; } @@ -2801,9 +2801,9 @@ ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) rc = ena_com_set_host_attributes(ena_dev); if (unlikely(rc != 0)) { if (rc == EOPNOTSUPP) - ena_trace(ENA_WARNING, "Cannot set host attributes\n"); + ena_trace(NULL, ENA_WARNING, "Cannot set host attributes\n"); else - ena_trace(ENA_ALERT, "Cannot set host attributes\n"); + ena_trace(NULL, ENA_ALERT, "Cannot set host attributes\n"); goto err; } @@ -3057,7 +3057,7 @@ check_missing_comp_in_tx_queue(struct ena_adapter *adapter, if (unlikely(time_offset > adapter->missing_tx_timeout)) { if (!tx_buf->print_once) - ena_trace(ENA_WARNING, "Found a Tx that wasn't " + ena_trace(NULL, ENA_WARNING, "Found a Tx that wasn't " "completed on time, qid %d, index %d.\n", tx_ring->qid, i); @@ -3786,11 +3786,11 @@ static void ena_notification(void *adapter_data, struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_ena_hw_hints *hints; - ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, + ENA_WARN(NULL, aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); - switch (aenq_e->aenq_common_desc.syndrom) { + switch (aenq_e->aenq_common_desc.syndrome) { case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); @@ -3799,7 +3799,7 @@ static void ena_notification(void *adapter_data, default: device_printf(adapter->pdev, "Invalid aenq notification link state %d\n", - aenq_e->aenq_common_desc.syndrom); + aenq_e->aenq_common_desc.syndrome); } } diff --git a/sys/dev/ena/ena.h b/sys/dev/ena/ena.h index ee57f53f365b..a40d89457d8c 100644 --- a/sys/dev/ena/ena.h +++ b/sys/dev/ena/ena.h @@ -514,20 +514,4 @@ ena_trigger_reset(struct ena_adapter *adapter, } } -static inline int -validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) -{ - if (likely(req_id < rx_ring->ring_size)) - return (0); - - device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n", - req_id); - counter_u64_add(rx_ring->rx_stats.bad_req_id, 1); - - /* Trigger device reset */ - ena_trigger_reset(rx_ring->adapter, ENA_REGS_RESET_INV_RX_REQ_ID); - - return (EFAULT); -} - #endif /* !(ENA_H) */ diff --git a/sys/dev/ena/ena_datapath.c b/sys/dev/ena/ena_datapath.c index f95bab8dc1a5..f2783efd98cc 100644 --- a/sys/dev/ena/ena_datapath.c +++ b/sys/dev/ena/ena_datapath.c @@ -76,7 +76,7 @@ ena_cleanup(void *arg, int pending) if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) return; - ena_trace(ENA_DBG, "MSI-X TX/RX routine\n"); + ena_trace(NULL, ENA_DBG, "MSI-X TX/RX routine\n"); tx_ring = que->tx_ring; rx_ring = que->rx_ring; @@ -267,7 +267,7 @@ ena_tx_cleanup(struct ena_ring *tx_ring) bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap); - ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n", + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed\n", tx_ring->qid, mbuf); m_freem(mbuf); @@ -292,7 +292,7 @@ ena_tx_cleanup(struct ena_ring *tx_ring) work_done = TX_BUDGET - budget; - ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n", + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d\n", tx_ring->qid, work_done); /* If there is still something to commit update ring state */ @@ -409,7 +409,6 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, struct ena_rx_buffer *rx_info; struct ena_adapter *adapter; unsigned int descs = ena_rx_ctx->descs; - int rc; uint16_t ntc, len, req_id, buf = 0; ntc = *next_to_clean; @@ -417,17 +416,13 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; - rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc != 0)) - return (NULL); - rx_info = &rx_ring->rx_buffer_info[req_id]; if (unlikely(rx_info->mbuf == NULL)) { device_printf(adapter->pdev, "NULL mbuf in rx_info"); return (NULL); } - ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n", + ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx\n", rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr); bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, @@ -441,7 +436,7 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, /* Fill mbuf with hash key and it's interpretation for optimization */ ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf); - ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n", + ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d\n", mbuf, mbuf->m_flags, mbuf->m_pkthdr.len); /* DMA address is not needed anymore, unmap it */ @@ -459,16 +454,6 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, ++buf; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; - rc = validate_rx_req_id(rx_ring, req_id); - if (unlikely(rc != 0)) { - /* - * If the req_id is invalid, then the device will be - * reset. In that case we must free all mbufs that - * were already gathered. - */ - m_freem(mbuf); - return (NULL); - } rx_info = &rx_ring->rx_buffer_info[req_id]; if (unlikely(rx_info->mbuf == NULL)) { @@ -491,11 +476,11 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, BUS_DMASYNC_POSTREAD); if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) { counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); - ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p\n", + ena_trace(NULL, ENA_WARNING, "Failed to append Rx mbuf %p\n", mbuf); } - ena_trace(ENA_DBG | ENA_RXPTH, + ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx mbuf updated. len %d\n", mbuf->m_pkthdr.len); /* Free already appended mbuf, it won't be useful anymore */ @@ -526,7 +511,7 @@ ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, /* ipv4 checksum error */ mbuf->m_pkthdr.csum_flags = 0; counter_u64_add(rx_ring->rx_stats.bad_csum, 1); - ena_trace(ENA_DBG, "RX IPv4 header checksum error\n"); + ena_trace(NULL, ENA_DBG, "RX IPv4 header checksum error\n"); return; } @@ -537,7 +522,7 @@ ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, /* TCP/UDP checksum error */ mbuf->m_pkthdr.csum_flags = 0; counter_u64_add(rx_ring->rx_stats.bad_csum, 1); - ena_trace(ENA_DBG, "RX L4 checksum error\n"); + ena_trace(NULL, ENA_DBG, "RX L4 checksum error\n"); } else { mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; @@ -557,6 +542,7 @@ ena_rx_cleanup(struct ena_ring *rx_ring) struct ena_com_rx_ctx ena_rx_ctx; struct ena_com_io_cq* io_cq; struct ena_com_io_sq* io_sq; + enum ena_regs_reset_reason_types reset_reason; if_t ifp; uint16_t ena_qid; uint16_t next_to_clean; @@ -583,7 +569,7 @@ ena_rx_cleanup(struct ena_ring *rx_ring) return (0); #endif /* DEV_NETMAP */ - ena_trace(ENA_DBG, "rx: qid %d\n", qid); + ena_trace(NULL, ENA_DBG, "rx: qid %d\n", qid); do { ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; @@ -592,14 +578,24 @@ ena_rx_cleanup(struct ena_ring *rx_ring) bus_dmamap_sync(io_cq->cdesc_addr.mem_handle.tag, io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD); rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx); - - if (unlikely(rc != 0)) - goto error; + if (unlikely(rc != 0)) { + if (rc == ENA_COM_NO_SPACE) { + counter_u64_add(rx_ring->rx_stats.bad_desc_num, + 1); + reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; + } else { + counter_u64_add(rx_ring->rx_stats.bad_req_id, + 1); + reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + } + ena_trigger_reset(adapter, reset_reason); + return (0); + } if (unlikely(ena_rx_ctx.descs == 0)) break; - ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " + ena_trace(NULL, ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. " "descs #: %d l3 proto %d l4 proto %d hash: %x\n", rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, ena_rx_ctx.hash); @@ -652,7 +648,7 @@ ena_rx_cleanup(struct ena_ring *rx_ring) do_if_input = 0; } if (do_if_input != 0) { - ena_trace(ENA_DBG | ENA_RXPTH, + ena_trace(NULL, ENA_DBG | ENA_RXPTH, "calling if_input() with mbuf %p\n", mbuf); (*ifp->if_input)(ifp, mbuf); } @@ -678,14 +674,6 @@ ena_rx_cleanup(struct ena_ring *rx_ring) tcp_lro_flush_all(&rx_ring->lro); return (RX_BUDGET - budget); - -error: - counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1); - - /* Too many desc from the device. Trigger reset */ - ena_trigger_reset(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); - - return (0); } static void @@ -841,7 +829,7 @@ ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->dmamap, mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (unlikely((rc != 0) || (nsegs == 0))) { - ena_trace(ENA_WARNING, + ena_trace(NULL, ENA_WARNING, "dmamap load failed! err: %d nsegs: %d\n", rc, nsegs); goto dma_error; } @@ -873,7 +861,7 @@ ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1); } - ena_trace(ENA_DBG | ENA_TXPTH, + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "mbuf: %p header_buf->vaddr: %p push_len: %d\n", mbuf, *push_hdr, *header_len); @@ -951,12 +939,12 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) rc = ena_check_and_collapse_mbuf(tx_ring, mbuf); if (unlikely(rc != 0)) { - ena_trace(ENA_WARNING, + ena_trace(NULL, ENA_WARNING, "Failed to collapse mbuf! err: %d\n", rc); return (rc); } - ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len); + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "Tx: %d bytes\n", (*mbuf)->m_pkthdr.len); next_to_use = tx_ring->next_to_use; req_id = tx_ring->free_tx_ids[next_to_use]; @@ -965,7 +953,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len); if (unlikely(rc != 0)) { - ena_trace(ENA_WARNING, "Failed to map TX mbuf\n"); + ena_trace(NULL, ENA_WARNING, "Failed to map TX mbuf\n"); return (rc); } memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); @@ -980,7 +968,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) if (tx_ring->acum_pkts == DB_THRESHOLD || ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) { - ena_trace(ENA_DBG | ENA_TXPTH, + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", tx_ring->que->id); ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); @@ -992,7 +980,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc); if (unlikely(rc != 0)) { if (likely(rc == ENA_COM_NO_MEM)) { - ena_trace(ENA_DBG | ENA_TXPTH, + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "tx ring[%d] if out of space\n", tx_ring->que->id); } else { device_printf(adapter->pdev, @@ -1025,7 +1013,7 @@ ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf) */ if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, adapter->max_tx_sgl_size + 2))) { - ena_trace(ENA_DBG | ENA_TXPTH, "Stop queue %d\n", + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "Stop queue %d\n", tx_ring->que->id); tx_ring->running = false; @@ -1078,7 +1066,7 @@ ena_start_xmit(struct ena_ring *tx_ring) io_sq = &adapter->ena_dev->io_sq_queues[ena_qid]; while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) { - ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" + ena_trace(NULL, ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and" " header csum flags %#jx\n", mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags); diff --git a/sys/dev/ena/ena_netmap.c b/sys/dev/ena/ena_netmap.c index 20a341173c8c..1f8e712358e9 100644 --- a/sys/dev/ena/ena_netmap.c +++ b/sys/dev/ena/ena_netmap.c @@ -88,7 +88,7 @@ ena_netmap_attach(struct ena_adapter *adapter) { struct netmap_adapter na; - ena_trace(ENA_NETMAP, "netmap attach\n"); + ena_trace(NULL, ENA_NETMAP, "netmap attach\n"); bzero(&na, sizeof(na)); na.na_flags = NAF_MOREFRAG; @@ -126,12 +126,12 @@ ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, nm_i = kring->nr_hwcur; head = kring->rhead; - ena_trace(ENA_NETMAP | ENA_DBG, "nr_hwcur: %d, nr_hwtail: %d, " + ena_trace(NULL, ENA_NETMAP | ENA_DBG, "nr_hwcur: %d, nr_hwtail: %d, " "rhead: %d, rcur: %d, rtail: %d\n", kring->nr_hwcur, kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail); if ((nm_i == head) && rx_ring->initialized) { - ena_trace(ENA_NETMAP, "No free slots in netmap ring\n"); + ena_trace(NULL, ENA_NETMAP, "No free slots in netmap ring\n"); return (ENOMEM); } @@ -150,7 +150,7 @@ ena_netmap_alloc_rx_slot(struct ena_adapter *adapter, rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr); if (rc != 0) { - ena_trace(ENA_WARNING, "DMA mapping error\n"); + ena_trace(NULL, ENA_WARNING, "DMA mapping error\n"); return (rc); } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); @@ -210,7 +210,7 @@ ena_netmap_free_rx_slot(struct ena_adapter *adapter, slot = &kring->ring->slot[nm_i]; - ENA_ASSERT(slot->buf_idx == 0, "Overwrite slot buf\n"); + ENA_WARN(slot->buf_idx != 0, NULL, "Overwrite slot buf\n"); slot->buf_idx = rx_info->netmap_buf_idx; slot->flags = NS_BUF_CHANGED; @@ -252,7 +252,7 @@ ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x) return; netmap_reset(NA(adapter->ifp), x, qid, 0); - ena_trace(ENA_NETMAP, "%s ring %d is in netmap mode\n", + ena_trace(NULL, ENA_NETMAP, "%s ring %d is in netmap mode\n", (x == NR_TX) ? "Tx" : "Rx", qid); } @@ -282,7 +282,7 @@ ena_netmap_reg(struct netmap_adapter *na, int onoff) ena_down(adapter); if (onoff) { - ena_trace(ENA_NETMAP, "netmap on\n"); + ena_trace(NULL, ENA_NETMAP, "netmap on\n"); for_rx_tx(t) { for (i = 0; i <= nma_get_nrings(na, t); i++) { kring = NMR(na, t)[i]; @@ -293,7 +293,7 @@ ena_netmap_reg(struct netmap_adapter *na, int onoff) } nm_set_native_flags(na); } else { - ena_trace(ENA_NETMAP, "netmap off\n"); + ena_trace(NULL, ENA_NETMAP, "netmap off\n"); nm_clear_native_flags(na); for_rx_tx(t) { for (i = 0; i <= nma_get_nrings(na, t); i++) { @@ -307,7 +307,7 @@ ena_netmap_reg(struct netmap_adapter *na, int onoff) rc = ena_up(adapter); if (rc != 0) { - ena_trace(ENA_WARNING, "ena_up failed with rc=%d\n", rc); + ena_trace(NULL, ENA_WARNING, "ena_up failed with rc=%d\n", rc); adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; nm_clear_native_flags(na); ena_destroy_device(adapter, false); @@ -401,7 +401,7 @@ ena_netmap_tx_frame(struct ena_netmap_ctx *ctx) adapter = ctx->adapter; if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) { - ena_trace(ENA_WARNING, "Too many slots per packet\n"); + ena_trace(NULL, ENA_WARNING, "Too many slots per packet\n"); return (EINVAL); } @@ -438,7 +438,7 @@ ena_netmap_tx_frame(struct ena_netmap_ctx *ctx) rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc); if (unlikely(rc != 0)) { if (likely(rc == ENA_COM_NO_MEM)) { - ena_trace(ENA_NETMAP | ENA_DBG | ENA_TXPTH, + ena_trace(NULL, ENA_NETMAP | ENA_DBG | ENA_TXPTH, "Tx ring[%d] is out of space\n", tx_ring->que->id); } else { device_printf(adapter->pdev, @@ -532,13 +532,13 @@ ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot, *vaddr = PNMB(na, slot, paddr); if (unlikely(vaddr == NULL)) { - ena_trace(ENA_ALERT, "Slot address is NULL\n"); + ena_trace(NULL, ENA_ALERT, "Slot address is NULL\n"); return (EINVAL); } rc = netmap_load_map(na, dmatag, dmamap, *vaddr); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "Failed to map slot %d for DMA\n", + ena_trace(NULL, ENA_ALERT, "Failed to map slot %d for DMA\n", slot->buf_idx); return (EINVAL); } @@ -626,7 +626,7 @@ ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx, delta = push_len - slot_head_len; } - ena_trace(ENA_NETMAP | ENA_DBG | ENA_TXPTH, + ena_trace(NULL, ENA_NETMAP | ENA_DBG | ENA_TXPTH, "slot: %d header_buf->vaddr: %p push_len: %d\n", slot->buf_idx, *push_hdr, push_len); @@ -860,7 +860,7 @@ ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id) /* Next, retain the sockets back to the userspace */ for (n = 0; n < nm_info->sockets_used; n++) { ctx->nm_i = nm_next(ctx->nm_i, ctx->lim); - ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, + ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, NULL, "Tx idx is not 0.\n"); ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n]; ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED; @@ -882,7 +882,7 @@ validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id) if (likely(req_id < tx_ring->ring_size)) return (0); - ena_trace(ENA_WARNING, "Invalid req_id: %hu\n", req_id); + ena_trace(NULL, ENA_WARNING, "Invalid req_id: %hu\n", req_id); counter_u64_add(tx_ring->tx_stats.bad_req_id, 1); ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID); @@ -949,6 +949,7 @@ static inline int ena_netmap_rx_frame(struct ena_netmap_ctx *ctx) { struct ena_com_rx_ctx ena_rx_ctx; + enum ena_regs_reset_reason_types reset_reason; int rc, len = 0; uint16_t buf, nm; @@ -959,16 +960,22 @@ ena_netmap_rx_frame(struct ena_netmap_ctx *ctx) rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx); if (unlikely(rc != 0)) { - ena_trace(ENA_ALERT, "Too many desc from the device.\n"); - counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1); - ena_trigger_reset(ctx->adapter, - ENA_REGS_RESET_TOO_MANY_RX_DESCS); + ena_trace(NULL, ENA_ALERT, + "Failed to read pkt from the device with error: %d\n", rc); + if (rc == ENA_COM_NO_SPACE) { + counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1); + reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; + } else { + counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1); + reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + } + ena_trigger_reset(ctx->adapter, reset_reason); return (rc); } if (unlikely(ena_rx_ctx.descs == 0)) return (ENA_NETMAP_NO_MORE_FRAMES); - ena_trace(ENA_NETMAP | ENA_DBG, "Rx: q %d got packet from ena. descs #:" + ena_trace(NULL, ENA_NETMAP | ENA_DBG, "Rx: q %d got packet from ena. descs #:" " %d l3 proto %d l4 proto %d hash: %x\n", ctx->ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, ena_rx_ctx.hash); @@ -1017,19 +1024,15 @@ ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len) { struct ena_rx_buffer *rx_info; uint16_t req_id; - int rc; req_id = ctx->ring->ena_bufs[buf].req_id; - rc = validate_rx_req_id(ctx->ring, req_id); - if (unlikely(rc != 0)) - return (rc); - rx_info = &ctx->ring->rx_buffer_info[req_id]; bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_POSTREAD); netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map); - ENA_ASSERT(ctx->slots[ctx->nm_i].buf_idx == 0, "Rx idx is not 0.\n"); + ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, NULL, + "Rx idx is not 0.\n"); ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx; rx_info->netmap_buf_idx = 0; @@ -1041,7 +1044,7 @@ ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len) ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len; *len += ctx->slots[ctx->nm_i].len; ctx->ring->free_rx_ids[ctx->nt] = req_id; - ena_trace(ENA_DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", + ena_trace(NULL, ENA_DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n", rx_info, ctx->slots[ctx->nm_i].buf_idx, (uintmax_t)rx_info->ena_buf.paddr, ctx->nm_i);