net/ena: support large LLQ headers
Default LLQ (Low-latency queue) maximum header size is 96 bytes and can be too small for some types of packets - like IPv6 packets with multiple extension. This can be fixed, by using large LLQ headers. If the device supports larger LLQ headers, the user can activate them by using device argument 'large_llq_hdr' with value '1'. If the device isn't supporting this feature, the default value (96B) will be used. Signed-off-by: Michal Krawczyk <mk@semihalf.com> Reviewed-by: Igor Chauskin <igorch@amazon.com> Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
This commit is contained in:
parent
5920d93083
commit
8a7a73f26c
@ -1,5 +1,5 @@
|
||||
.. SPDX-License-Identifier: BSD-3-Clause
|
||||
Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
|
||||
Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
|
||||
All rights reserved.
|
||||
|
||||
ENA Poll Mode Driver
|
||||
@ -95,6 +95,14 @@ Configuration information
|
||||
* **CONFIG_RTE_LIBRTE_ENA_COM_DEBUG** (default n): Enables or disables debug
|
||||
logging of low level tx/rx logic in ena_com(base) within the ENA PMD driver.
|
||||
|
||||
**Runtime Configuration Parameters**
|
||||
|
||||
* **large_llq_hdr** (default 0)
|
||||
|
||||
Enables or disables usage of large LLQ headers. This option will have
|
||||
effect only if the device also supports large LLQ headers. Otherwise, the
|
||||
default value will be used.
|
||||
|
||||
**ENA Configuration Parameters**
|
||||
|
||||
* **Number of Queues**
|
||||
|
@ -82,6 +82,12 @@ New Features
|
||||
(enqueue/dequeue start; enqueue/dequeue finish). That allows user to inspect
|
||||
objects in the ring without removing them from it (aka MT safe peek).
|
||||
|
||||
* **Updated Amazon ena driver.**
|
||||
|
||||
Updated ena PMD with new features and improvements, including:
|
||||
|
||||
* Added support for large LLQ (Low-latency queue) headers.
|
||||
|
||||
* **Updated Intel i40e driver.**
|
||||
|
||||
Updated i40e PMD with new features and improvements, including:
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <rte_errno.h>
|
||||
#include <rte_version.h>
|
||||
#include <rte_net.h>
|
||||
#include <rte_kvargs.h>
|
||||
|
||||
#include "ena_ethdev.h"
|
||||
#include "ena_logs.h"
|
||||
@ -82,6 +83,9 @@ struct ena_stats {
|
||||
#define ENA_STAT_GLOBAL_ENTRY(stat) \
|
||||
ENA_STAT_ENTRY(stat, dev)
|
||||
|
||||
/* Device arguments */
|
||||
#define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
|
||||
|
||||
/*
|
||||
* Each rte_memzone should have unique name.
|
||||
* To satisfy it, count number of allocation and add it to name.
|
||||
@ -231,6 +235,11 @@ static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
|
||||
const uint64_t *ids,
|
||||
uint64_t *values,
|
||||
unsigned int n);
|
||||
static int ena_process_bool_devarg(const char *key,
|
||||
const char *value,
|
||||
void *opaque);
|
||||
static int ena_parse_devargs(struct ena_adapter *adapter,
|
||||
struct rte_devargs *devargs);
|
||||
|
||||
static const struct eth_dev_ops ena_dev_ops = {
|
||||
.dev_configure = ena_dev_configure,
|
||||
@ -842,7 +851,8 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
|
||||
}
|
||||
|
||||
static int
|
||||
ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
|
||||
ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
|
||||
bool use_large_llq_hdr)
|
||||
{
|
||||
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
|
||||
struct ena_com_dev *ena_dev = ctx->ena_dev;
|
||||
@ -895,6 +905,21 @@ ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
|
||||
max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
|
||||
max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
|
||||
|
||||
if (use_large_llq_hdr) {
|
||||
if ((llq->entry_size_ctrl_supported &
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
|
||||
(ena_dev->tx_mem_queue_type ==
|
||||
ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
|
||||
max_tx_queue_size /= 2;
|
||||
PMD_INIT_LOG(INFO,
|
||||
"Forcing large headers and decreasing maximum TX queue size to %d\n",
|
||||
max_tx_queue_size);
|
||||
} else {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
|
||||
PMD_INIT_LOG(ERR, "Invalid queue size");
|
||||
return -EFAULT;
|
||||
@ -1594,14 +1619,25 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_default_llq_configurations(struct ena_llq_configurations *llq_config)
|
||||
set_default_llq_configurations(struct ena_llq_configurations *llq_config,
|
||||
struct ena_admin_feature_llq_desc *llq,
|
||||
bool use_large_llq_hdr)
|
||||
{
|
||||
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
|
||||
llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
||||
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
|
||||
llq_config->llq_num_decs_before_header =
|
||||
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
|
||||
llq_config->llq_ring_entry_size_value = 128;
|
||||
|
||||
if (use_large_llq_hdr &&
|
||||
(llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
|
||||
llq_config->llq_ring_entry_size =
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_256B;
|
||||
llq_config->llq_ring_entry_size_value = 256;
|
||||
} else {
|
||||
llq_config->llq_ring_entry_size =
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
||||
llq_config->llq_ring_entry_size_value = 128;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1740,6 +1776,12 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
|
||||
snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
|
||||
adapter->id_number);
|
||||
|
||||
rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
|
||||
if (rc != 0) {
|
||||
PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* device specific initialization routine */
|
||||
rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
|
||||
if (rc) {
|
||||
@ -1748,7 +1790,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
adapter->wd_state = wd_state;
|
||||
|
||||
set_default_llq_configurations(&llq_config);
|
||||
set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
|
||||
adapter->use_large_llq_hdr);
|
||||
rc = ena_set_queues_placement_policy(adapter, ena_dev,
|
||||
&get_feat_ctx.llq, &llq_config);
|
||||
if (unlikely(rc)) {
|
||||
@ -1766,7 +1809,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
|
||||
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
|
||||
|
||||
max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
|
||||
rc = ena_calc_io_queue_size(&calc_queue_ctx);
|
||||
rc = ena_calc_io_queue_size(&calc_queue_ctx,
|
||||
adapter->use_large_llq_hdr);
|
||||
if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
|
||||
rc = -EFAULT;
|
||||
goto err_device_destroy;
|
||||
@ -2582,6 +2626,59 @@ static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
|
||||
return valid;
|
||||
}
|
||||
|
||||
static int ena_process_bool_devarg(const char *key,
|
||||
const char *value,
|
||||
void *opaque)
|
||||
{
|
||||
struct ena_adapter *adapter = opaque;
|
||||
bool bool_value;
|
||||
|
||||
/* Parse the value. */
|
||||
if (strcmp(value, "1") == 0) {
|
||||
bool_value = true;
|
||||
} else if (strcmp(value, "0") == 0) {
|
||||
bool_value = false;
|
||||
} else {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
|
||||
value, key);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Now, assign it to the proper adapter field. */
|
||||
if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR))
|
||||
adapter->use_large_llq_hdr = bool_value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ena_parse_devargs(struct ena_adapter *adapter,
|
||||
struct rte_devargs *devargs)
|
||||
{
|
||||
static const char * const allowed_args[] = {
|
||||
ENA_DEVARG_LARGE_LLQ_HDR,
|
||||
};
|
||||
struct rte_kvargs *kvlist;
|
||||
int rc;
|
||||
|
||||
if (devargs == NULL)
|
||||
return 0;
|
||||
|
||||
kvlist = rte_kvargs_parse(devargs->args, allowed_args);
|
||||
if (kvlist == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
|
||||
devargs->args);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
|
||||
ena_process_bool_devarg, adapter);
|
||||
|
||||
rte_kvargs_free(kvlist);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
* PMD configuration
|
||||
*********************************************************************/
|
||||
@ -2608,6 +2705,7 @@ static struct rte_pci_driver rte_ena_pmd = {
|
||||
RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
|
||||
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
|
||||
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
|
||||
RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
|
||||
|
||||
RTE_INIT(ena_init_log)
|
||||
{
|
||||
|
@ -200,6 +200,8 @@ struct ena_adapter {
|
||||
bool trigger_reset;
|
||||
|
||||
bool wd_state;
|
||||
|
||||
bool use_large_llq_hdr;
|
||||
};
|
||||
|
||||
#endif /* _ENA_ETHDEV_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user