ethdev: add namespace

Add 'RTE_ETH' namespace to all enums & macros in a backward compatible
way. The macros for backward compatibility can be removed in next LTS.
Also updated some struct names to have 'rte_eth' prefix.

All internal components switched to using new names.

Syntax fixed on lines that this patch touches.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Wisam Jaddo <wisamm@nvidia.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
This commit is contained in:
Ferruh Yigit 2021-10-22 12:03:12 +01:00
parent a136b08c10
commit 295968d174
339 changed files with 6583 additions and 6367 deletions

View File

@ -757,11 +757,11 @@ show_port(void)
}
ret = rte_eth_dev_flow_ctrl_get(i, &fc_conf);
if (ret == 0 && fc_conf.mode != RTE_FC_NONE) {
if (ret == 0 && fc_conf.mode != RTE_ETH_FC_NONE) {
printf("\t -- flow control mode %s%s high %u low %u pause %u%s%s\n",
fc_conf.mode == RTE_FC_RX_PAUSE ? "rx " :
fc_conf.mode == RTE_FC_TX_PAUSE ? "tx " :
fc_conf.mode == RTE_FC_FULL ? "full" : "???",
fc_conf.mode == RTE_ETH_FC_RX_PAUSE ? "rx " :
fc_conf.mode == RTE_ETH_FC_TX_PAUSE ? "tx " :
fc_conf.mode == RTE_ETH_FC_FULL ? "full" : "???",
fc_conf.autoneg ? " auto" : "",
fc_conf.high_water,
fc_conf.low_water,

View File

@ -756,13 +756,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = ETH_RSS_IP,
.rss_hf = RTE_ETH_RSS_IP,
},
},
};

View File

@ -176,12 +176,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
struct rte_eth_rxconf rx_conf;
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = ETH_RSS_IP,
.rss_hf = RTE_ETH_RSS_IP,
},
},
};
@ -223,7 +223,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
local_port_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_RSS_HASH;
RTE_ETH_RX_OFFLOAD_RSS_HASH;
ret = rte_eth_dev_info_get(i, &dev_info);
if (ret != 0) {
@ -233,9 +233,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
}
/* Enable mbuf fast free if PMD has the capability. */
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;

View File

@ -5,7 +5,7 @@
#define FLOW_ITEM_MASK(_x) (UINT64_C(1) << _x)
#define FLOW_ACTION_MASK(_x) (UINT64_C(1) << _x)
#define FLOW_ATTR_MASK(_x) (UINT64_C(1) << _x)
#define GET_RSS_HF() (ETH_RSS_IP)
#define GET_RSS_HF() (RTE_ETH_RSS_IP)
/* Configuration */
#define RXQ_NUM 4

View File

@ -70,16 +70,16 @@ struct app_params app = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
.offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = ETH_RSS_IP,
.rss_hf = RTE_ETH_RSS_IP,
},
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
};
@ -178,7 +178,7 @@ app_ports_check_link(void)
RTE_LOG(INFO, USER1, "Port %u %s\n",
port,
link_status_text);
if (link.link_status == ETH_LINK_DOWN)
if (link.link_status == RTE_ETH_LINK_DOWN)
all_ports_up = 0;
}

View File

@ -1478,51 +1478,51 @@ parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed)
int duplex;
if (!strcmp(duplexstr, "half")) {
duplex = ETH_LINK_HALF_DUPLEX;
duplex = RTE_ETH_LINK_HALF_DUPLEX;
} else if (!strcmp(duplexstr, "full")) {
duplex = ETH_LINK_FULL_DUPLEX;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else if (!strcmp(duplexstr, "auto")) {
duplex = ETH_LINK_FULL_DUPLEX;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else {
fprintf(stderr, "Unknown duplex parameter\n");
return -1;
}
if (!strcmp(speedstr, "10")) {
*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M;
*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
RTE_ETH_LINK_SPEED_10M_HD : RTE_ETH_LINK_SPEED_10M;
} else if (!strcmp(speedstr, "100")) {
*speed = (duplex == ETH_LINK_HALF_DUPLEX) ?
ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M;
*speed = (duplex == RTE_ETH_LINK_HALF_DUPLEX) ?
RTE_ETH_LINK_SPEED_100M_HD : RTE_ETH_LINK_SPEED_100M;
} else {
if (duplex != ETH_LINK_FULL_DUPLEX) {
if (duplex != RTE_ETH_LINK_FULL_DUPLEX) {
fprintf(stderr, "Invalid speed/duplex parameters\n");
return -1;
}
if (!strcmp(speedstr, "1000")) {
*speed = ETH_LINK_SPEED_1G;
*speed = RTE_ETH_LINK_SPEED_1G;
} else if (!strcmp(speedstr, "10000")) {
*speed = ETH_LINK_SPEED_10G;
*speed = RTE_ETH_LINK_SPEED_10G;
} else if (!strcmp(speedstr, "25000")) {
*speed = ETH_LINK_SPEED_25G;
*speed = RTE_ETH_LINK_SPEED_25G;
} else if (!strcmp(speedstr, "40000")) {
*speed = ETH_LINK_SPEED_40G;
*speed = RTE_ETH_LINK_SPEED_40G;
} else if (!strcmp(speedstr, "50000")) {
*speed = ETH_LINK_SPEED_50G;
*speed = RTE_ETH_LINK_SPEED_50G;
} else if (!strcmp(speedstr, "100000")) {
*speed = ETH_LINK_SPEED_100G;
*speed = RTE_ETH_LINK_SPEED_100G;
} else if (!strcmp(speedstr, "200000")) {
*speed = ETH_LINK_SPEED_200G;
*speed = RTE_ETH_LINK_SPEED_200G;
} else if (!strcmp(speedstr, "auto")) {
*speed = ETH_LINK_SPEED_AUTONEG;
*speed = RTE_ETH_LINK_SPEED_AUTONEG;
} else {
fprintf(stderr, "Unknown speed parameter\n");
return -1;
}
}
if (*speed != ETH_LINK_SPEED_AUTONEG)
*speed |= ETH_LINK_SPEED_FIXED;
if (*speed != RTE_ETH_LINK_SPEED_AUTONEG)
*speed |= RTE_ETH_LINK_SPEED_FIXED;
return 0;
}
@ -2166,33 +2166,33 @@ cmd_config_rss_parsed(void *parsed_result,
int ret;
if (!strcmp(res->value, "all"))
rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP |
ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP |
ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP |
ETH_RSS_AH | ETH_RSS_PFCP | ETH_RSS_GTPU |
ETH_RSS_ECPRI;
rss_conf.rss_hf = RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP |
RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP |
RTE_ETH_RSS_L2_PAYLOAD | RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP |
RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP | RTE_ETH_RSS_GTPU |
RTE_ETH_RSS_ECPRI;
else if (!strcmp(res->value, "eth"))
rss_conf.rss_hf = ETH_RSS_ETH;
rss_conf.rss_hf = RTE_ETH_RSS_ETH;
else if (!strcmp(res->value, "vlan"))
rss_conf.rss_hf = ETH_RSS_VLAN;
rss_conf.rss_hf = RTE_ETH_RSS_VLAN;
else if (!strcmp(res->value, "ip"))
rss_conf.rss_hf = ETH_RSS_IP;
rss_conf.rss_hf = RTE_ETH_RSS_IP;
else if (!strcmp(res->value, "udp"))
rss_conf.rss_hf = ETH_RSS_UDP;
rss_conf.rss_hf = RTE_ETH_RSS_UDP;
else if (!strcmp(res->value, "tcp"))
rss_conf.rss_hf = ETH_RSS_TCP;
rss_conf.rss_hf = RTE_ETH_RSS_TCP;
else if (!strcmp(res->value, "sctp"))
rss_conf.rss_hf = ETH_RSS_SCTP;
rss_conf.rss_hf = RTE_ETH_RSS_SCTP;
else if (!strcmp(res->value, "ether"))
rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
rss_conf.rss_hf = RTE_ETH_RSS_L2_PAYLOAD;
else if (!strcmp(res->value, "port"))
rss_conf.rss_hf = ETH_RSS_PORT;
rss_conf.rss_hf = RTE_ETH_RSS_PORT;
else if (!strcmp(res->value, "vxlan"))
rss_conf.rss_hf = ETH_RSS_VXLAN;
rss_conf.rss_hf = RTE_ETH_RSS_VXLAN;
else if (!strcmp(res->value, "geneve"))
rss_conf.rss_hf = ETH_RSS_GENEVE;
rss_conf.rss_hf = RTE_ETH_RSS_GENEVE;
else if (!strcmp(res->value, "nvgre"))
rss_conf.rss_hf = ETH_RSS_NVGRE;
rss_conf.rss_hf = RTE_ETH_RSS_NVGRE;
else if (!strcmp(res->value, "l3-pre32"))
rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE32;
else if (!strcmp(res->value, "l3-pre40"))
@ -2206,46 +2206,46 @@ cmd_config_rss_parsed(void *parsed_result,
else if (!strcmp(res->value, "l3-pre96"))
rss_conf.rss_hf = RTE_ETH_RSS_L3_PRE96;
else if (!strcmp(res->value, "l3-src-only"))
rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY;
rss_conf.rss_hf = RTE_ETH_RSS_L3_SRC_ONLY;
else if (!strcmp(res->value, "l3-dst-only"))
rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY;
rss_conf.rss_hf = RTE_ETH_RSS_L3_DST_ONLY;
else if (!strcmp(res->value, "l4-src-only"))
rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY;
rss_conf.rss_hf = RTE_ETH_RSS_L4_SRC_ONLY;
else if (!strcmp(res->value, "l4-dst-only"))
rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY;
rss_conf.rss_hf = RTE_ETH_RSS_L4_DST_ONLY;
else if (!strcmp(res->value, "l2-src-only"))
rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY;
rss_conf.rss_hf = RTE_ETH_RSS_L2_SRC_ONLY;
else if (!strcmp(res->value, "l2-dst-only"))
rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY;
rss_conf.rss_hf = RTE_ETH_RSS_L2_DST_ONLY;
else if (!strcmp(res->value, "l2tpv3"))
rss_conf.rss_hf = ETH_RSS_L2TPV3;
rss_conf.rss_hf = RTE_ETH_RSS_L2TPV3;
else if (!strcmp(res->value, "esp"))
rss_conf.rss_hf = ETH_RSS_ESP;
rss_conf.rss_hf = RTE_ETH_RSS_ESP;
else if (!strcmp(res->value, "ah"))
rss_conf.rss_hf = ETH_RSS_AH;
rss_conf.rss_hf = RTE_ETH_RSS_AH;
else if (!strcmp(res->value, "pfcp"))
rss_conf.rss_hf = ETH_RSS_PFCP;
rss_conf.rss_hf = RTE_ETH_RSS_PFCP;
else if (!strcmp(res->value, "pppoe"))
rss_conf.rss_hf = ETH_RSS_PPPOE;
rss_conf.rss_hf = RTE_ETH_RSS_PPPOE;
else if (!strcmp(res->value, "gtpu"))
rss_conf.rss_hf = ETH_RSS_GTPU;
rss_conf.rss_hf = RTE_ETH_RSS_GTPU;
else if (!strcmp(res->value, "ecpri"))
rss_conf.rss_hf = ETH_RSS_ECPRI;
rss_conf.rss_hf = RTE_ETH_RSS_ECPRI;
else if (!strcmp(res->value, "mpls"))
rss_conf.rss_hf = ETH_RSS_MPLS;
rss_conf.rss_hf = RTE_ETH_RSS_MPLS;
else if (!strcmp(res->value, "ipv4-chksum"))
rss_conf.rss_hf = ETH_RSS_IPV4_CHKSUM;
rss_conf.rss_hf = RTE_ETH_RSS_IPV4_CHKSUM;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
else if (!strcmp(res->value, "level-default")) {
rss_hf &= (~ETH_RSS_LEVEL_MASK);
rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_PMD_DEFAULT);
rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_PMD_DEFAULT);
} else if (!strcmp(res->value, "level-outer")) {
rss_hf &= (~ETH_RSS_LEVEL_MASK);
rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_OUTERMOST);
rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_OUTERMOST);
} else if (!strcmp(res->value, "level-inner")) {
rss_hf &= (~ETH_RSS_LEVEL_MASK);
rss_conf.rss_hf = (rss_hf | ETH_RSS_LEVEL_INNERMOST);
rss_hf &= (~RTE_ETH_RSS_LEVEL_MASK);
rss_conf.rss_hf = (rss_hf | RTE_ETH_RSS_LEVEL_INNERMOST);
} else if (!strcmp(res->value, "default"))
use_default = 1;
else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
@ -2982,8 +2982,8 @@ parse_reta_config(const char *str,
return -1;
}
idx = hash_index / RTE_RETA_GROUP_SIZE;
shift = hash_index % RTE_RETA_GROUP_SIZE;
idx = hash_index / RTE_ETH_RETA_GROUP_SIZE;
shift = hash_index % RTE_ETH_RETA_GROUP_SIZE;
reta_conf[idx].mask |= (1ULL << shift);
reta_conf[idx].reta[shift] = nb_queue;
}
@ -3012,10 +3012,10 @@ cmd_set_rss_reta_parsed(void *parsed_result,
} else
printf("The reta size of port %d is %u\n",
res->port_id, dev_info.reta_size);
if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) {
if (dev_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512) {
fprintf(stderr,
"Currently do not support more than %u entries of redirection table\n",
ETH_RSS_RETA_SIZE_512);
RTE_ETH_RSS_RETA_SIZE_512);
return;
}
@ -3086,8 +3086,8 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
char *end;
char *str_fld[8];
uint16_t i;
uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
RTE_RETA_GROUP_SIZE;
uint16_t num = (nb_entries + RTE_ETH_RETA_GROUP_SIZE - 1) /
RTE_ETH_RETA_GROUP_SIZE;
int ret;
p = strchr(p0, '(');
@ -3132,7 +3132,7 @@ cmd_showport_reta_parsed(void *parsed_result,
if (ret != 0)
return;
max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512);
max_reta_size = RTE_MIN(dev_info.reta_size, RTE_ETH_RSS_RETA_SIZE_512);
if (res->size == 0 || res->size > max_reta_size) {
fprintf(stderr, "Invalid redirection table size: %u (1-%u)\n",
res->size, max_reta_size);
@ -3272,7 +3272,7 @@ cmd_config_dcb_parsed(void *parsed_result,
return;
}
if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) {
if ((res->num_tcs != RTE_ETH_4_TCS) && (res->num_tcs != RTE_ETH_8_TCS)) {
fprintf(stderr,
"The invalid number of traffic class, only 4 or 8 allowed.\n");
return;
@ -4276,9 +4276,9 @@ cmd_vlan_tpid_parsed(void *parsed_result,
enum rte_vlan_type vlan_type;
if (!strcmp(res->vlan_type, "inner"))
vlan_type = ETH_VLAN_TYPE_INNER;
vlan_type = RTE_ETH_VLAN_TYPE_INNER;
else if (!strcmp(res->vlan_type, "outer"))
vlan_type = ETH_VLAN_TYPE_OUTER;
vlan_type = RTE_ETH_VLAN_TYPE_OUTER;
else {
fprintf(stderr, "Unknown vlan type\n");
return;
@ -4615,55 +4615,55 @@ csum_show(int port_id)
printf("Parse tunnel is %s\n",
(ports[port_id].parse_tunnel) ? "on" : "off");
printf("IP checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
(tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw");
printf("UDP checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
(tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw");
printf("TCP checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw");
printf("SCTP checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw");
printf("Outer-Ip checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw");
printf("Outer-Udp checksum offload is %s\n",
(tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
(tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw");
/* display warnings if configuration is not supported by the NIC */
ret = eth_dev_info_get_print_err(port_id, &dev_info);
if (ret != 0)
return;
if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware IP checksum enabled but not supported by port %d\n",
port_id);
}
if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware UDP checksum enabled but not supported by port %d\n",
port_id);
}
if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware TCP checksum enabled but not supported by port %d\n",
port_id);
}
if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) {
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware SCTP checksum enabled but not supported by port %d\n",
port_id);
}
if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) {
fprintf(stderr,
"Warning: hardware outer IP checksum enabled but not supported by port %d\n",
port_id);
}
if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) &&
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
== 0) {
fprintf(stderr,
"Warning: hardware outer UDP checksum enabled but not supported by port %d\n",
@ -4713,8 +4713,8 @@ cmd_csum_parsed(void *parsed_result,
if (!strcmp(res->proto, "ip")) {
if (hw == 0 || (dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_IPV4_CKSUM)) {
csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
csum_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
} else {
fprintf(stderr,
"IP checksum offload is not supported by port %u\n",
@ -4722,8 +4722,8 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "udp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_UDP_CKSUM)) {
csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
csum_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
} else {
fprintf(stderr,
"UDP checksum offload is not supported by port %u\n",
@ -4731,8 +4731,8 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "tcp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_TCP_CKSUM)) {
csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) {
csum_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
} else {
fprintf(stderr,
"TCP checksum offload is not supported by port %u\n",
@ -4740,8 +4740,8 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "sctp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_SCTP_CKSUM)) {
csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) {
csum_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
} else {
fprintf(stderr,
"SCTP checksum offload is not supported by port %u\n",
@ -4749,9 +4749,9 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "outer-ip")) {
if (hw == 0 || (dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
csum_offloads |=
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
} else {
fprintf(stderr,
"Outer IP checksum offload is not supported by port %u\n",
@ -4759,9 +4759,9 @@ cmd_csum_parsed(void *parsed_result,
}
} else if (!strcmp(res->proto, "outer-udp")) {
if (hw == 0 || (dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
csum_offloads |=
DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
} else {
fprintf(stderr,
"Outer UDP checksum offload is not supported by port %u\n",
@ -4916,7 +4916,7 @@ cmd_tso_set_parsed(void *parsed_result,
return;
if ((ports[res->port_id].tso_segsz != 0) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
fprintf(stderr, "Error: TSO is not supported by port %d\n",
res->port_id);
return;
@ -4924,11 +4924,11 @@ cmd_tso_set_parsed(void *parsed_result,
if (ports[res->port_id].tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_TCP_TSO;
~RTE_ETH_TX_OFFLOAD_TCP_TSO;
printf("TSO for non-tunneled packets is disabled\n");
} else {
ports[res->port_id].dev_conf.txmode.offloads |=
DEV_TX_OFFLOAD_TCP_TSO;
RTE_ETH_TX_OFFLOAD_TCP_TSO;
printf("TSO segment size for non-tunneled packets is %d\n",
ports[res->port_id].tso_segsz);
}
@ -4940,7 +4940,7 @@ cmd_tso_set_parsed(void *parsed_result,
return;
if ((ports[res->port_id].tso_segsz != 0) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) {
(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
fprintf(stderr,
"Warning: TSO enabled but not supported by port %d\n",
res->port_id);
@ -5011,27 +5011,27 @@ check_tunnel_tso_nic_support(portid_t port_id)
if (eth_dev_info_get_print_err(port_id, &dev_info) != 0)
return dev_info;
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO))
if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO))
fprintf(stderr,
"Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO))
if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
fprintf(stderr,
"Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO))
if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO))
fprintf(stderr,
"Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO))
fprintf(stderr,
"Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO))
fprintf(stderr,
"Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO))
fprintf(stderr,
"Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n",
port_id);
@ -5059,20 +5059,20 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
dev_info = check_tunnel_tso_nic_support(res->port_id);
if (ports[res->port_id].tunnel_tso_segsz == 0) {
ports[res->port_id].dev_conf.txmode.offloads &=
~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
DEV_TX_OFFLOAD_IP_TNL_TSO |
DEV_TX_OFFLOAD_UDP_TNL_TSO);
~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
printf("TSO for tunneled packets is disabled\n");
} else {
uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
DEV_TX_OFFLOAD_IP_TNL_TSO |
DEV_TX_OFFLOAD_UDP_TNL_TSO);
uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
ports[res->port_id].dev_conf.txmode.offloads |=
(tso_offloads & dev_info.tx_offload_capa);
@ -5095,7 +5095,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
fprintf(stderr,
"Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n");
if (!(ports[res->port_id].dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
fprintf(stderr,
"Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n");
}
@ -7227,9 +7227,9 @@ cmd_link_flow_ctrl_show_parsed(void *parsed_result,
return;
}
if (fc_conf.mode == RTE_FC_RX_PAUSE || fc_conf.mode == RTE_FC_FULL)
if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
rx_fc_en = true;
if (fc_conf.mode == RTE_FC_TX_PAUSE || fc_conf.mode == RTE_FC_FULL)
if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE || fc_conf.mode == RTE_ETH_FC_FULL)
tx_fc_en = true;
printf("\n%s Flow control infos for port %-2d %s\n",
@ -7507,12 +7507,12 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
/*
* Rx on/off, flow control is enabled/disabled on RX side. This can indicate
* the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* Tx on/off, flow control is enabled/disabled on TX side. This can indicate
* the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
* the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
*/
static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
};
/* Partial command line, retrieve current configuration */
@ -7525,11 +7525,11 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
return;
}
if ((fc_conf.mode == RTE_FC_RX_PAUSE) ||
(fc_conf.mode == RTE_FC_FULL))
if ((fc_conf.mode == RTE_ETH_FC_RX_PAUSE) ||
(fc_conf.mode == RTE_ETH_FC_FULL))
rx_fc_en = 1;
if ((fc_conf.mode == RTE_FC_TX_PAUSE) ||
(fc_conf.mode == RTE_FC_FULL))
if ((fc_conf.mode == RTE_ETH_FC_TX_PAUSE) ||
(fc_conf.mode == RTE_ETH_FC_FULL))
tx_fc_en = 1;
}
@ -7597,12 +7597,12 @@ cmd_priority_flow_ctrl_set_parsed(void *parsed_result,
/*
* Rx on/off, flow control is enabled/disabled on RX side. This can indicate
* the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* the RTE_ETH_FC_TX_PAUSE, Transmit pause frame at the Rx side.
* Tx on/off, flow control is enabled/disabled on TX side. This can indicate
* the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
* the RTE_ETH_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
*/
static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = {
{RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL}
{RTE_ETH_FC_NONE, RTE_ETH_FC_TX_PAUSE}, {RTE_ETH_FC_RX_PAUSE, RTE_ETH_FC_FULL}
};
memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf));
@ -9250,13 +9250,13 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
if (!strcmp(res->what,"rxmode")) {
if (!strcmp(res->mode, "AUPE"))
vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_UNTAG;
else if (!strcmp(res->mode, "ROPE"))
vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_HASH_UC;
else if (!strcmp(res->mode, "BAM"))
vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_BROADCAST;
else if (!strncmp(res->mode, "MPE",3))
vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
vf_rxmode |= RTE_ETH_VMDQ_ACCEPT_MULTICAST;
}
RTE_SET_USED(is_on);
@ -9656,7 +9656,7 @@ cmd_tunnel_udp_config_parsed(void *parsed_result,
int ret;
tunnel_udp.udp_port = res->udp_port;
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
if (!strcmp(res->what, "add"))
ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
@ -9722,13 +9722,13 @@ cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
tunnel_udp.udp_port = res->udp_port;
if (!strcmp(res->tunnel_type, "vxlan")) {
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN;
} else if (!strcmp(res->tunnel_type, "geneve")) {
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_GENEVE;
} else if (!strcmp(res->tunnel_type, "vxlan-gpe")) {
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE;
tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_VXLAN_GPE;
} else if (!strcmp(res->tunnel_type, "ecpri")) {
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_ECPRI;
tunnel_udp.prot_type = RTE_ETH_TUNNEL_TYPE_ECPRI;
} else {
fprintf(stderr, "Invalid tunnel type\n");
return;
@ -11859,7 +11859,7 @@ cmd_set_macsec_offload_on_parsed(
if (ret != 0)
return;
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_NET_IXGBE
ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
#endif
@ -11870,7 +11870,7 @@ cmd_set_macsec_offload_on_parsed(
switch (ret) {
case 0:
ports[port_id].dev_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MACSEC_INSERT;
RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
cmd_reconfig_device_queue(port_id, 1, 1);
break;
case -ENODEV:
@ -11956,7 +11956,7 @@ cmd_set_macsec_offload_off_parsed(
if (ret != 0)
return;
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) {
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) {
#ifdef RTE_NET_IXGBE
ret = rte_pmd_ixgbe_macsec_disable(port_id);
#endif
@ -11964,7 +11964,7 @@ cmd_set_macsec_offload_off_parsed(
switch (ret) {
case 0:
ports[port_id].dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MACSEC_INSERT;
~RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
cmd_reconfig_device_queue(port_id, 1, 1);
break;
case -ENODEV:

View File

@ -86,62 +86,62 @@ static const struct {
};
const struct rss_type_info rss_type_table[] = {
{ "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP |
ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD |
ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP |
ETH_RSS_GTPU | ETH_RSS_ECPRI | ETH_RSS_MPLS},
{ "all", RTE_ETH_RSS_ETH | RTE_ETH_RSS_VLAN | RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP |
RTE_ETH_RSS_UDP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_L2_PAYLOAD |
RTE_ETH_RSS_L2TPV3 | RTE_ETH_RSS_ESP | RTE_ETH_RSS_AH | RTE_ETH_RSS_PFCP |
RTE_ETH_RSS_GTPU | RTE_ETH_RSS_ECPRI | RTE_ETH_RSS_MPLS},
{ "none", 0 },
{ "eth", ETH_RSS_ETH },
{ "l2-src-only", ETH_RSS_L2_SRC_ONLY },
{ "l2-dst-only", ETH_RSS_L2_DST_ONLY },
{ "vlan", ETH_RSS_VLAN },
{ "s-vlan", ETH_RSS_S_VLAN },
{ "c-vlan", ETH_RSS_C_VLAN },
{ "ipv4", ETH_RSS_IPV4 },
{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
{ "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
{ "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
{ "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
{ "ipv6", ETH_RSS_IPV6 },
{ "ipv6-frag", ETH_RSS_FRAG_IPV6 },
{ "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
{ "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
{ "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
{ "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
{ "l2-payload", ETH_RSS_L2_PAYLOAD },
{ "ipv6-ex", ETH_RSS_IPV6_EX },
{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
{ "port", ETH_RSS_PORT },
{ "vxlan", ETH_RSS_VXLAN },
{ "geneve", ETH_RSS_GENEVE },
{ "nvgre", ETH_RSS_NVGRE },
{ "ip", ETH_RSS_IP },
{ "udp", ETH_RSS_UDP },
{ "tcp", ETH_RSS_TCP },
{ "sctp", ETH_RSS_SCTP },
{ "tunnel", ETH_RSS_TUNNEL },
{ "eth", RTE_ETH_RSS_ETH },
{ "l2-src-only", RTE_ETH_RSS_L2_SRC_ONLY },
{ "l2-dst-only", RTE_ETH_RSS_L2_DST_ONLY },
{ "vlan", RTE_ETH_RSS_VLAN },
{ "s-vlan", RTE_ETH_RSS_S_VLAN },
{ "c-vlan", RTE_ETH_RSS_C_VLAN },
{ "ipv4", RTE_ETH_RSS_IPV4 },
{ "ipv4-frag", RTE_ETH_RSS_FRAG_IPV4 },
{ "ipv4-tcp", RTE_ETH_RSS_NONFRAG_IPV4_TCP },
{ "ipv4-udp", RTE_ETH_RSS_NONFRAG_IPV4_UDP },
{ "ipv4-sctp", RTE_ETH_RSS_NONFRAG_IPV4_SCTP },
{ "ipv4-other", RTE_ETH_RSS_NONFRAG_IPV4_OTHER },
{ "ipv6", RTE_ETH_RSS_IPV6 },
{ "ipv6-frag", RTE_ETH_RSS_FRAG_IPV6 },
{ "ipv6-tcp", RTE_ETH_RSS_NONFRAG_IPV6_TCP },
{ "ipv6-udp", RTE_ETH_RSS_NONFRAG_IPV6_UDP },
{ "ipv6-sctp", RTE_ETH_RSS_NONFRAG_IPV6_SCTP },
{ "ipv6-other", RTE_ETH_RSS_NONFRAG_IPV6_OTHER },
{ "l2-payload", RTE_ETH_RSS_L2_PAYLOAD },
{ "ipv6-ex", RTE_ETH_RSS_IPV6_EX },
{ "ipv6-tcp-ex", RTE_ETH_RSS_IPV6_TCP_EX },
{ "ipv6-udp-ex", RTE_ETH_RSS_IPV6_UDP_EX },
{ "port", RTE_ETH_RSS_PORT },
{ "vxlan", RTE_ETH_RSS_VXLAN },
{ "geneve", RTE_ETH_RSS_GENEVE },
{ "nvgre", RTE_ETH_RSS_NVGRE },
{ "ip", RTE_ETH_RSS_IP },
{ "udp", RTE_ETH_RSS_UDP },
{ "tcp", RTE_ETH_RSS_TCP },
{ "sctp", RTE_ETH_RSS_SCTP },
{ "tunnel", RTE_ETH_RSS_TUNNEL },
{ "l3-pre32", RTE_ETH_RSS_L3_PRE32 },
{ "l3-pre40", RTE_ETH_RSS_L3_PRE40 },
{ "l3-pre48", RTE_ETH_RSS_L3_PRE48 },
{ "l3-pre56", RTE_ETH_RSS_L3_PRE56 },
{ "l3-pre64", RTE_ETH_RSS_L3_PRE64 },
{ "l3-pre96", RTE_ETH_RSS_L3_PRE96 },
{ "l3-src-only", ETH_RSS_L3_SRC_ONLY },
{ "l3-dst-only", ETH_RSS_L3_DST_ONLY },
{ "l4-src-only", ETH_RSS_L4_SRC_ONLY },
{ "l4-dst-only", ETH_RSS_L4_DST_ONLY },
{ "esp", ETH_RSS_ESP },
{ "ah", ETH_RSS_AH },
{ "l2tpv3", ETH_RSS_L2TPV3 },
{ "pfcp", ETH_RSS_PFCP },
{ "pppoe", ETH_RSS_PPPOE },
{ "gtpu", ETH_RSS_GTPU },
{ "ecpri", ETH_RSS_ECPRI },
{ "mpls", ETH_RSS_MPLS },
{ "ipv4-chksum", ETH_RSS_IPV4_CHKSUM },
{ "l4-chksum", ETH_RSS_L4_CHKSUM },
{ "l3-src-only", RTE_ETH_RSS_L3_SRC_ONLY },
{ "l3-dst-only", RTE_ETH_RSS_L3_DST_ONLY },
{ "l4-src-only", RTE_ETH_RSS_L4_SRC_ONLY },
{ "l4-dst-only", RTE_ETH_RSS_L4_DST_ONLY },
{ "esp", RTE_ETH_RSS_ESP },
{ "ah", RTE_ETH_RSS_AH },
{ "l2tpv3", RTE_ETH_RSS_L2TPV3 },
{ "pfcp", RTE_ETH_RSS_PFCP },
{ "pppoe", RTE_ETH_RSS_PPPOE },
{ "gtpu", RTE_ETH_RSS_GTPU },
{ "ecpri", RTE_ETH_RSS_ECPRI },
{ "mpls", RTE_ETH_RSS_MPLS },
{ "ipv4-chksum", RTE_ETH_RSS_IPV4_CHKSUM },
{ "l4-chksum", RTE_ETH_RSS_L4_CHKSUM },
{ NULL, 0 },
};
@ -538,39 +538,39 @@ static void
device_infos_display_speeds(uint32_t speed_capa)
{
printf("\n\tDevice speed capability:");
if (speed_capa == ETH_LINK_SPEED_AUTONEG)
if (speed_capa == RTE_ETH_LINK_SPEED_AUTONEG)
printf(" Autonegotiate (all speeds)");
if (speed_capa & ETH_LINK_SPEED_FIXED)
if (speed_capa & RTE_ETH_LINK_SPEED_FIXED)
printf(" Disable autonegotiate (fixed speed) ");
if (speed_capa & ETH_LINK_SPEED_10M_HD)
if (speed_capa & RTE_ETH_LINK_SPEED_10M_HD)
printf(" 10 Mbps half-duplex ");
if (speed_capa & ETH_LINK_SPEED_10M)
if (speed_capa & RTE_ETH_LINK_SPEED_10M)
printf(" 10 Mbps full-duplex ");
if (speed_capa & ETH_LINK_SPEED_100M_HD)
if (speed_capa & RTE_ETH_LINK_SPEED_100M_HD)
printf(" 100 Mbps half-duplex ");
if (speed_capa & ETH_LINK_SPEED_100M)
if (speed_capa & RTE_ETH_LINK_SPEED_100M)
printf(" 100 Mbps full-duplex ");
if (speed_capa & ETH_LINK_SPEED_1G)
if (speed_capa & RTE_ETH_LINK_SPEED_1G)
printf(" 1 Gbps ");
if (speed_capa & ETH_LINK_SPEED_2_5G)
if (speed_capa & RTE_ETH_LINK_SPEED_2_5G)
printf(" 2.5 Gbps ");
if (speed_capa & ETH_LINK_SPEED_5G)
if (speed_capa & RTE_ETH_LINK_SPEED_5G)
printf(" 5 Gbps ");
if (speed_capa & ETH_LINK_SPEED_10G)
if (speed_capa & RTE_ETH_LINK_SPEED_10G)
printf(" 10 Gbps ");
if (speed_capa & ETH_LINK_SPEED_20G)
if (speed_capa & RTE_ETH_LINK_SPEED_20G)
printf(" 20 Gbps ");
if (speed_capa & ETH_LINK_SPEED_25G)
if (speed_capa & RTE_ETH_LINK_SPEED_25G)
printf(" 25 Gbps ");
if (speed_capa & ETH_LINK_SPEED_40G)
if (speed_capa & RTE_ETH_LINK_SPEED_40G)
printf(" 40 Gbps ");
if (speed_capa & ETH_LINK_SPEED_50G)
if (speed_capa & RTE_ETH_LINK_SPEED_50G)
printf(" 50 Gbps ");
if (speed_capa & ETH_LINK_SPEED_56G)
if (speed_capa & RTE_ETH_LINK_SPEED_56G)
printf(" 56 Gbps ");
if (speed_capa & ETH_LINK_SPEED_100G)
if (speed_capa & RTE_ETH_LINK_SPEED_100G)
printf(" 100 Gbps ");
if (speed_capa & ETH_LINK_SPEED_200G)
if (speed_capa & RTE_ETH_LINK_SPEED_200G)
printf(" 200 Gbps ");
}
@ -723,9 +723,9 @@ port_infos_display(portid_t port_id)
printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
printf("Link speed: %s\n", rte_eth_link_speed_to_str(link.link_speed));
printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
printf("Link duplex: %s\n", (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
printf("Autoneg status: %s\n", (link.link_autoneg == ETH_LINK_AUTONEG) ?
printf("Autoneg status: %s\n", (link.link_autoneg == RTE_ETH_LINK_AUTONEG) ?
("On") : ("Off"));
if (!rte_eth_dev_get_mtu(port_id, &mtu))
@ -743,22 +743,22 @@ port_infos_display(portid_t port_id)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (vlan_offload >= 0){
printf("VLAN offload: \n");
if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
if (vlan_offload & RTE_ETH_VLAN_STRIP_OFFLOAD)
printf(" strip on, ");
else
printf(" strip off, ");
if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
if (vlan_offload & RTE_ETH_VLAN_FILTER_OFFLOAD)
printf("filter on, ");
else
printf("filter off, ");
if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
if (vlan_offload & RTE_ETH_VLAN_EXTEND_OFFLOAD)
printf("extend on, ");
else
printf("extend off, ");
if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD)
if (vlan_offload & RTE_ETH_QINQ_STRIP_OFFLOAD)
printf("qinq strip on\n");
else
printf("qinq strip off\n");
@ -2953,8 +2953,8 @@ port_rss_reta_info(portid_t port_id,
}
for (i = 0; i < nb_entries; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
printf("RSS RETA configuration: hash index=%u, queue=%u\n",
@ -3427,7 +3427,7 @@ dcb_fwd_config_setup(void)
for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
fwd_lcores[lc_id]->stream_nb = 0;
fwd_lcores[lc_id]->stream_idx = sm_id;
for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
for (i = 0; i < RTE_ETH_MAX_VMDQ_POOL; i++) {
/* if the nb_queue is zero, means this tc is
* not enabled on the POOL
*/
@ -4490,11 +4490,11 @@ vlan_extend_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
vlan_offload |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
} else {
vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
vlan_offload &= ~RTE_ETH_VLAN_EXTEND_OFFLOAD;
port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@ -4520,11 +4520,11 @@ rx_vlan_strip_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
} else {
vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
vlan_offload &= ~RTE_ETH_VLAN_STRIP_OFFLOAD;
port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@ -4565,11 +4565,11 @@ rx_vlan_filter_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
vlan_offload |= RTE_ETH_VLAN_FILTER_OFFLOAD;
port_rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
} else {
vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
vlan_offload &= ~RTE_ETH_VLAN_FILTER_OFFLOAD;
port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@ -4595,11 +4595,11 @@ rx_vlan_qinq_strip_set(portid_t port_id, int on)
vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
if (on) {
vlan_offload |= ETH_QINQ_STRIP_OFFLOAD;
port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
vlan_offload |= RTE_ETH_QINQ_STRIP_OFFLOAD;
port_rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
} else {
vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD;
port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
vlan_offload &= ~RTE_ETH_QINQ_STRIP_OFFLOAD;
port_rx_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
}
diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
@ -4669,7 +4669,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
return;
if (ports[port_id].dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_QINQ_INSERT) {
RTE_ETH_TX_OFFLOAD_QINQ_INSERT) {
fprintf(stderr, "Error, as QinQ has been enabled.\n");
return;
}
@ -4678,7 +4678,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
if (ret != 0)
return;
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) {
if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) == 0) {
fprintf(stderr,
"Error: vlan insert is not supported by port %d\n",
port_id);
@ -4686,7 +4686,7 @@ tx_vlan_set(portid_t port_id, uint16_t vlan_id)
}
tx_vlan_reset(port_id);
ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
ports[port_id].tx_vlan_id = vlan_id;
}
@ -4705,7 +4705,7 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
if (ret != 0)
return;
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) {
if ((dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) == 0) {
fprintf(stderr,
"Error: qinq insert not supported by port %d\n",
port_id);
@ -4713,8 +4713,8 @@ tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
}
tx_vlan_reset(port_id);
ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].dev_conf.txmode.offloads |= (RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = vlan_id;
ports[port_id].tx_vlan_id_outer = vlan_id_outer;
}
@ -4723,8 +4723,8 @@ void
tx_vlan_reset(portid_t port_id)
{
ports[port_id].dev_conf.txmode.offloads &=
~(DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT);
~(RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT);
ports[port_id].tx_vlan_id = 0;
ports[port_id].tx_vlan_id_outer = 0;
}
@ -5130,7 +5130,7 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
ret = eth_link_get_nowait_print_err(port_id, &link);
if (ret < 0)
return 1;
if (link.link_speed != ETH_SPEED_NUM_UNKNOWN &&
if (link.link_speed != RTE_ETH_SPEED_NUM_UNKNOWN &&
rate > link.link_speed) {
fprintf(stderr,
"Invalid rate value:%u bigger than link speed: %u\n",

View File

@ -485,7 +485,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
ol_flags |= PKT_TX_IP_CKSUM;
} else {
ipv4_hdr->hdr_checksum = 0;
@ -502,7 +502,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
ol_flags |= PKT_TX_UDP_CKSUM;
} else {
udp_hdr->dgram_cksum = 0;
@ -517,7 +517,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
ol_flags |= PKT_TX_TCP_CKSUM;
} else {
tcp_hdr->cksum = 0;
@ -532,7 +532,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
((char *)l3_hdr + info->l3_len);
/* sctp payload must be a multiple of 4 to be
* offloaded */
if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
((ipv4_hdr->total_length & 0x3) == 0)) {
ol_flags |= PKT_TX_SCTP_CKSUM;
} else {
@ -559,7 +559,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
ipv4_hdr->hdr_checksum = 0;
ol_flags |= PKT_TX_OUTER_IPV4;
if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
@ -576,7 +576,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
ol_flags |= PKT_TX_TCP_SEG;
/* Skip SW outer UDP checksum generation if HW supports it */
if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
udp_hdr->dgram_cksum
= rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
@ -959,9 +959,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
m->l2_len = info.l2_len;
@ -1022,19 +1022,19 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_be_to_cpu_16(info.outer_ethertype),
info.outer_l3_len);
/* dump tx packet info */
if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
info.tso_segsz != 0)
printf("tx: m->l2_len=%d m->l3_len=%d "
"m->l4_len=%d\n",
m->l2_len, m->l3_len, m->l4_len);
if (info.is_tunnel == 1) {
if ((tx_offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(tx_offloads &
DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
(tx_ol_flags & PKT_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",

View File

@ -99,11 +99,11 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer;
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags |= PKT_TX_VLAN_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {

View File

@ -72,11 +72,11 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))

View File

@ -10,11 +10,11 @@ ol_flags_init(uint64_t tx_offload)
{
uint64_t ol_flags = 0;
ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) ?
PKT_TX_VLAN : 0;
ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_QINQ_INSERT) ?
PKT_TX_QINQ : 0;
ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
ol_flags |= (tx_offload & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT) ?
PKT_TX_MACSEC : 0;
return ol_flags;

View File

@ -547,29 +547,29 @@ parse_xstats_list(const char *in_str, struct rte_eth_xstat_name **xstats,
static int
parse_link_speed(int n)
{
uint32_t speed = ETH_LINK_SPEED_FIXED;
uint32_t speed = RTE_ETH_LINK_SPEED_FIXED;
switch (n) {
case 1000:
speed |= ETH_LINK_SPEED_1G;
speed |= RTE_ETH_LINK_SPEED_1G;
break;
case 10000:
speed |= ETH_LINK_SPEED_10G;
speed |= RTE_ETH_LINK_SPEED_10G;
break;
case 25000:
speed |= ETH_LINK_SPEED_25G;
speed |= RTE_ETH_LINK_SPEED_25G;
break;
case 40000:
speed |= ETH_LINK_SPEED_40G;
speed |= RTE_ETH_LINK_SPEED_40G;
break;
case 50000:
speed |= ETH_LINK_SPEED_50G;
speed |= RTE_ETH_LINK_SPEED_50G;
break;
case 100000:
speed |= ETH_LINK_SPEED_100G;
speed |= RTE_ETH_LINK_SPEED_100G;
break;
case 200000:
speed |= ETH_LINK_SPEED_200G;
speed |= RTE_ETH_LINK_SPEED_200G;
break;
case 100:
case 10:
@ -1002,13 +1002,13 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
if (!strcmp(optarg, "64K"))
fdir_conf.pballoc =
RTE_FDIR_PBALLOC_64K;
RTE_ETH_FDIR_PBALLOC_64K;
else if (!strcmp(optarg, "128K"))
fdir_conf.pballoc =
RTE_FDIR_PBALLOC_128K;
RTE_ETH_FDIR_PBALLOC_128K;
else if (!strcmp(optarg, "256K"))
fdir_conf.pballoc =
RTE_FDIR_PBALLOC_256K;
RTE_ETH_FDIR_PBALLOC_256K;
else
rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
" must be: 64K or 128K or 256K\n",
@ -1050,34 +1050,34 @@ launch_args_parse(int argc, char** argv)
}
#endif
if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
rx_offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM;
rx_offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
if (!strcmp(lgopts[opt_idx].name,
"enable-rx-timestamp"))
rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan"))
rx_offloads |= DEV_RX_OFFLOAD_VLAN;
rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-filter"))
rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-strip"))
rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-vlan-extend"))
rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
rx_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
if (!strcmp(lgopts[opt_idx].name,
"enable-hw-qinq-strip"))
rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
rx_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
if (!strcmp(lgopts[opt_idx].name, "enable-drop-en"))
rx_drop_en = 1;
@ -1099,13 +1099,13 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
set_pkt_forwarding_mode(optarg);
if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
rss_hf = ETH_RSS_IP;
rss_hf = RTE_ETH_RSS_IP;
if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
rss_hf = ETH_RSS_UDP;
rss_hf = RTE_ETH_RSS_UDP;
if (!strcmp(lgopts[opt_idx].name, "rss-level-inner"))
rss_hf |= ETH_RSS_LEVEL_INNERMOST;
rss_hf |= RTE_ETH_RSS_LEVEL_INNERMOST;
if (!strcmp(lgopts[opt_idx].name, "rss-level-outer"))
rss_hf |= ETH_RSS_LEVEL_OUTERMOST;
rss_hf |= RTE_ETH_RSS_LEVEL_OUTERMOST;
if (!strcmp(lgopts[opt_idx].name, "rxq")) {
n = atoi(optarg);
if (n >= 0 && check_nb_rxq((queueid_t)n) == 0)
@ -1495,12 +1495,12 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) {
char *end = NULL;
n = strtoul(optarg, &end, 16);
if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS)
if (n >= 0 && n <= RTE_ETH_MQ_RX_VMDQ_DCB_RSS)
rx_mq_mode = (enum rte_eth_rx_mq_mode)n;
else
rte_exit(EXIT_FAILURE,
"rx-mq-mode must be >= 0 and <= %d\n",
ETH_MQ_RX_VMDQ_DCB_RSS);
RTE_ETH_MQ_RX_VMDQ_DCB_RSS);
}
if (!strcmp(lgopts[opt_idx].name, "record-core-cycles"))
record_core_cycles = 1;

View File

@ -349,7 +349,7 @@ uint64_t noisy_lkup_num_reads_writes;
/*
* Receive Side Scaling (RSS) configuration.
*/
uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */
/*
* Port topology configuration
@ -460,12 +460,12 @@ lcoreid_t latencystats_lcore_id = -1;
struct rte_eth_rxmode rx_mode;
struct rte_eth_txmode tx_mode = {
.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
.offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE,
};
struct rte_fdir_conf fdir_conf = {
struct rte_eth_fdir_conf fdir_conf = {
.mode = RTE_FDIR_MODE_NONE,
.pballoc = RTE_FDIR_PBALLOC_64K,
.pballoc = RTE_ETH_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
.vlan_tci_mask = 0xFFEF,
@ -524,7 +524,7 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
/*
* hexadecimal bitmask of RX mq mode can be enabled.
*/
enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS;
/*
* Used to set forced link speed
@ -1578,9 +1578,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Apply Rx offloads configuration */
for (i = 0; i < port->dev_info.max_rx_queues; i++)
@ -1717,8 +1717,8 @@ init_config(void)
init_port_config();
gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
@ -3466,7 +3466,7 @@ check_all_ports_link_status(uint32_t port_mask)
continue;
}
/* clear all_ports_up flag if any link down */
if (link.link_status == ETH_LINK_DOWN) {
if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
@ -3769,17 +3769,17 @@ init_port_config(void)
if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) {
port->dev_conf.rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
(rx_mq_mode & ETH_MQ_RX_RSS);
(rx_mq_mode & RTE_ETH_MQ_RX_RSS);
} else {
port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE;
port->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_RSS_HASH;
~RTE_ETH_RX_OFFLOAD_RSS_HASH;
for (i = 0;
i < port->dev_info.nb_rx_queues;
i++)
port->rx_conf[i].offloads &=
~DEV_RX_OFFLOAD_RSS_HASH;
~RTE_ETH_RX_OFFLOAD_RSS_HASH;
}
}
@ -3867,9 +3867,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
vmdq_rx_conf->enable_default_pool = 0;
vmdq_rx_conf->default_pool = 0;
vmdq_rx_conf->nb_queue_pools =
(num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
(num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_tx_conf->nb_queue_pools =
(num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
(num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS);
vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
@ -3877,7 +3877,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
vmdq_rx_conf->pool_map[i].pools =
1 << (i % vmdq_rx_conf->nb_queue_pools);
}
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
}
@ -3885,8 +3885,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
/* set DCB mode of RX and TX of multiple queues */
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
(rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
(rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB);
eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
} else {
struct rte_eth_dcb_rx_conf *rx_conf =
&eth_conf->rx_adv_conf.dcb_rx_conf;
@ -3902,23 +3902,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
eth_conf->rxmode.mq_mode =
(enum rte_eth_rx_mq_mode)
(rx_mq_mode & ETH_MQ_RX_DCB_RSS);
(rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS);
eth_conf->rx_adv_conf.rss_conf = rss_conf;
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB;
}
if (pfc_en)
eth_conf->dcb_capability_en =
ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT;
else
eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT;
return 0;
}
@ -3947,7 +3947,7 @@ init_port_dcb_config(portid_t pid,
retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
/* re-configure the device . */
retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
@ -3997,7 +3997,7 @@ init_port_dcb_config(portid_t pid,
rxtx_port_config(pid);
/* VLAN filter */
rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
for (i = 0; i < RTE_DIM(vlan_tags); i++)
rx_vft_set(pid, vlan_tags[i], 1);

View File

@ -493,7 +493,7 @@ extern lcoreid_t bitrate_lcore_id;
extern uint8_t bitrate_enabled;
#endif
extern struct rte_fdir_conf fdir_conf;
extern struct rte_eth_fdir_conf fdir_conf;
extern uint32_t max_rx_pkt_len;

View File

@ -354,11 +354,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
tx_offloads = txp->dev_conf.txmode.offloads;
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
ol_flags = PKT_TX_VLAN_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
ol_flags |= PKT_TX_QINQ_PKT;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
/*

View File

@ -14,10 +14,10 @@ test_link_status_up_default(void)
{
int ret = 0;
struct rte_eth_link link_status = {
.link_speed = ETH_SPEED_NUM_2_5G,
.link_status = ETH_LINK_UP,
.link_autoneg = ETH_LINK_AUTONEG,
.link_duplex = ETH_LINK_FULL_DUPLEX
.link_speed = RTE_ETH_SPEED_NUM_2_5G,
.link_status = RTE_ETH_LINK_UP,
.link_autoneg = RTE_ETH_LINK_AUTONEG,
.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
@ -27,9 +27,9 @@ test_link_status_up_default(void)
TEST_ASSERT_BUFFERS_ARE_EQUAL("Link up at 2.5 Gbps FDX Autoneg",
text, strlen(text), "Invalid default link status string");
link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
link_status.link_autoneg = ETH_LINK_FIXED;
link_status.link_speed = ETH_SPEED_NUM_10M,
link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
link_status.link_autoneg = RTE_ETH_LINK_FIXED;
link_status.link_speed = RTE_ETH_SPEED_NUM_10M;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #2: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@ -37,7 +37,7 @@ test_link_status_up_default(void)
text, strlen(text), "Invalid default link status "
"string with HDX");
link_status.link_speed = ETH_SPEED_NUM_UNKNOWN;
link_status.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #3: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@ -45,7 +45,7 @@ test_link_status_up_default(void)
text, strlen(text), "Invalid default link status "
"string with HDX");
link_status.link_speed = ETH_SPEED_NUM_NONE;
link_status.link_speed = RTE_ETH_SPEED_NUM_NONE;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #3: %s\n", text);
RTE_TEST_ASSERT(ret > 0, "Failed to format default string\n");
@ -54,9 +54,9 @@ test_link_status_up_default(void)
"string with HDX");
/* test max str len */
link_status.link_speed = ETH_SPEED_NUM_200G;
link_status.link_duplex = ETH_LINK_HALF_DUPLEX;
link_status.link_autoneg = ETH_LINK_AUTONEG;
link_status.link_speed = RTE_ETH_SPEED_NUM_200G;
link_status.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
link_status.link_autoneg = RTE_ETH_LINK_AUTONEG;
ret = rte_eth_link_to_str(text, sizeof(text), &link_status);
printf("Default link up #4:len = %d, %s\n", ret, text);
RTE_TEST_ASSERT(ret < RTE_ETH_LINK_MAX_STR_LEN,
@ -69,10 +69,10 @@ test_link_status_down_default(void)
{
int ret = 0;
struct rte_eth_link link_status = {
.link_speed = ETH_SPEED_NUM_2_5G,
.link_status = ETH_LINK_DOWN,
.link_autoneg = ETH_LINK_AUTONEG,
.link_duplex = ETH_LINK_FULL_DUPLEX
.link_speed = RTE_ETH_SPEED_NUM_2_5G,
.link_status = RTE_ETH_LINK_DOWN,
.link_autoneg = RTE_ETH_LINK_AUTONEG,
.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
@ -90,9 +90,9 @@ test_link_status_invalid(void)
int ret = 0;
struct rte_eth_link link_status = {
.link_speed = 55555,
.link_status = ETH_LINK_UP,
.link_autoneg = ETH_LINK_AUTONEG,
.link_duplex = ETH_LINK_FULL_DUPLEX
.link_status = RTE_ETH_LINK_UP,
.link_autoneg = RTE_ETH_LINK_AUTONEG,
.link_duplex = RTE_ETH_LINK_FULL_DUPLEX
};
char text[RTE_ETH_LINK_MAX_STR_LEN];
@ -116,21 +116,21 @@ test_link_speed_all_values(void)
const char *value;
uint32_t link_speed;
} speed_str_map[] = {
{ "None", ETH_SPEED_NUM_NONE },
{ "10 Mbps", ETH_SPEED_NUM_10M },
{ "100 Mbps", ETH_SPEED_NUM_100M },
{ "1 Gbps", ETH_SPEED_NUM_1G },
{ "2.5 Gbps", ETH_SPEED_NUM_2_5G },
{ "5 Gbps", ETH_SPEED_NUM_5G },
{ "10 Gbps", ETH_SPEED_NUM_10G },
{ "20 Gbps", ETH_SPEED_NUM_20G },
{ "25 Gbps", ETH_SPEED_NUM_25G },
{ "40 Gbps", ETH_SPEED_NUM_40G },
{ "50 Gbps", ETH_SPEED_NUM_50G },
{ "56 Gbps", ETH_SPEED_NUM_56G },
{ "100 Gbps", ETH_SPEED_NUM_100G },
{ "200 Gbps", ETH_SPEED_NUM_200G },
{ "Unknown", ETH_SPEED_NUM_UNKNOWN },
{ "None", RTE_ETH_SPEED_NUM_NONE },
{ "10 Mbps", RTE_ETH_SPEED_NUM_10M },
{ "100 Mbps", RTE_ETH_SPEED_NUM_100M },
{ "1 Gbps", RTE_ETH_SPEED_NUM_1G },
{ "2.5 Gbps", RTE_ETH_SPEED_NUM_2_5G },
{ "5 Gbps", RTE_ETH_SPEED_NUM_5G },
{ "10 Gbps", RTE_ETH_SPEED_NUM_10G },
{ "20 Gbps", RTE_ETH_SPEED_NUM_20G },
{ "25 Gbps", RTE_ETH_SPEED_NUM_25G },
{ "40 Gbps", RTE_ETH_SPEED_NUM_40G },
{ "50 Gbps", RTE_ETH_SPEED_NUM_50G },
{ "56 Gbps", RTE_ETH_SPEED_NUM_56G },
{ "100 Gbps", RTE_ETH_SPEED_NUM_100G },
{ "200 Gbps", RTE_ETH_SPEED_NUM_200G },
{ "Unknown", RTE_ETH_SPEED_NUM_UNKNOWN },
{ "Invalid", 50505 }
};

View File

@ -103,7 +103,7 @@ port_init_rx_intr(uint16_t port, struct rte_mempool *mp)
{
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.mq_mode = RTE_ETH_MQ_RX_NONE,
},
.intr_conf = {
.rxq = 1,
@ -118,7 +118,7 @@ port_init(uint16_t port, struct rte_mempool *mp)
{
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.mq_mode = RTE_ETH_MQ_RX_NONE,
},
};

View File

@ -74,7 +74,7 @@ static const struct rte_eth_txconf tx_conf = {
static const struct rte_eth_conf port_conf = {
.txmode = {
.mq_mode = ETH_DCB_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
};

View File

@ -134,11 +134,11 @@ static uint16_t vlan_id = 0x100;
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};

View File

@ -107,11 +107,11 @@ static struct link_bonding_unittest_params test_params = {
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};

View File

@ -52,7 +52,7 @@ struct slave_conf {
struct rte_eth_rss_conf rss_conf;
uint8_t rss_key[40];
struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_RETA_GROUP_SIZE];
struct rte_eth_rss_reta_entry64 reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
uint8_t is_slave;
struct rte_ring *rxtx_queue[RXTX_QUEUE_COUNT];
@ -61,7 +61,7 @@ struct slave_conf {
struct link_bonding_rssconf_unittest_params {
uint8_t bond_port_id;
struct rte_eth_dev_info bond_dev_info;
struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_RETA_GROUP_SIZE];
struct rte_eth_rss_reta_entry64 bond_reta_conf[512 / RTE_ETH_RETA_GROUP_SIZE];
struct slave_conf slave_ports[SLAVE_COUNT];
struct rte_mempool *mbuf_pool;
@ -80,27 +80,27 @@ static struct link_bonding_rssconf_unittest_params test_params = {
*/
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 0,
};
static struct rte_eth_conf rss_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.mq_mode = RTE_ETH_MQ_RX_RSS,
.split_hdr_size = 0,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
.rss_hf = ETH_RSS_IPV6,
.rss_hf = RTE_ETH_RSS_IPV6,
},
},
.lpbk_mode = 0,
@ -207,13 +207,13 @@ bond_slaves(void)
static int
reta_set(uint16_t port_id, uint8_t value, int reta_size)
{
struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_RETA_GROUP_SIZE];
struct rte_eth_rss_reta_entry64 reta_conf[512/RTE_ETH_RETA_GROUP_SIZE];
int i, j;
for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) {
for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) {
/* select all fields to set */
reta_conf[i].mask = ~0LL;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
reta_conf[i].reta[j] = value;
}
@ -232,8 +232,8 @@ reta_check_synced(struct slave_conf *port)
for (i = 0; i < test_params.bond_dev_info.reta_size;
i++) {
int index = i / RTE_RETA_GROUP_SIZE;
int shift = i % RTE_RETA_GROUP_SIZE;
int index = i / RTE_ETH_RETA_GROUP_SIZE;
int shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (port->reta_conf[index].reta[shift] !=
test_params.bond_reta_conf[index].reta[shift])
@ -251,7 +251,7 @@ static int
bond_reta_fetch(void) {
unsigned j;
for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_RETA_GROUP_SIZE;
for (j = 0; j < test_params.bond_dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE;
j++)
test_params.bond_reta_conf[j].mask = ~0LL;
@ -268,7 +268,7 @@ static int
slave_reta_fetch(struct slave_conf *port) {
unsigned j;
for (j = 0; j < port->dev_info.reta_size / RTE_RETA_GROUP_SIZE; j++)
for (j = 0; j < port->dev_info.reta_size / RTE_ETH_RETA_GROUP_SIZE; j++)
port->reta_conf[j].mask = ~0LL;
TEST_ASSERT_SUCCESS(rte_eth_dev_rss_reta_query(port->port_id,

View File

@ -62,11 +62,11 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.mq_mode = RTE_ETH_MQ_RX_NONE,
.split_hdr_size = 0,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mq_mode = RTE_ETH_MQ_TX_NONE,
},
.lpbk_mode = 1, /* enable loopback */
};
@ -155,7 +155,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
continue;
}
/* clear all_ports_up flag if any link down */
if (link.link_status == ETH_LINK_DOWN) {
if (link.link_status == RTE_ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
@ -822,7 +822,7 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
/* bulk alloc rx, full-featured tx */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "hybrid")) {
/* bulk alloc rx, vector tx
@ -831,13 +831,13 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
*/
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "full")) {
/* full feature rx,tx pair */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
return 0;
}

View File

@ -53,7 +53,7 @@ static int virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
void *pkt = NULL;
struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
@ -168,7 +168,7 @@ virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
int wait_to_complete __rte_unused)
{
if (!bonded_eth_dev->data->dev_started)
bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
bonded_eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
@ -567,9 +567,9 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL)

View File

@ -42,7 +42,7 @@ Features of the OCTEON cnxk SSO PMD are:
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue configuration.
- HW managed event vectorization on CN10K for packets enqueued from ethdev to

View File

@ -35,7 +35,7 @@ Features of the OCTEON TX2 SSO PMD are:
- HW managed packets enqueued from ethdev to eventdev exposed through event eth
RX adapter.
- N:1 ethernet device Rx queue to Event queue mapping.
- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
- Lockfree Tx from event eth Tx adapter using ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``
capability while maintaining receive packet order.
- Full Rx/Tx offload support defined through ethdev queue config.

View File

@ -70,5 +70,5 @@ Features and Limitations
------------------------
The PMD will re-insert the VLAN tag transparently to the packet if the kernel
strips it, as long as the ``DEV_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
strips it, as long as the ``RTE_ETH_RX_OFFLOAD_VLAN_STRIP`` is not enabled by the
application.

View File

@ -877,21 +877,21 @@ processing. This improved performance is derived from a number of optimizations:
 * TX: only the following reduced set of transmit offloads is supported in
vector mode::
  DEV_TX_OFFLOAD_MBUF_FAST_FREE
  RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
 * RX: only the following reduced set of receive offloads is supported in
vector mode (note that jumbo MTU is allowed only when the MTU setting
does not require `DEV_RX_OFFLOAD_SCATTER` to be enabled)::
does not require `RTE_ETH_RX_OFFLOAD_SCATTER` to be enabled)::
  DEV_RX_OFFLOAD_VLAN_STRIP
  DEV_RX_OFFLOAD_KEEP_CRC
  DEV_RX_OFFLOAD_IPV4_CKSUM
  DEV_RX_OFFLOAD_UDP_CKSUM
  DEV_RX_OFFLOAD_TCP_CKSUM
  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
  DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
  DEV_RX_OFFLOAD_RSS_HASH
  DEV_RX_OFFLOAD_VLAN_FILTER
  RTE_ETH_RX_OFFLOAD_VLAN_STRIP
  RTE_ETH_RX_OFFLOAD_KEEP_CRC
  RTE_ETH_RX_OFFLOAD_IPV4_CKSUM
  RTE_ETH_RX_OFFLOAD_UDP_CKSUM
  RTE_ETH_RX_OFFLOAD_TCP_CKSUM
  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM
  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM
  RTE_ETH_RX_OFFLOAD_RSS_HASH
  RTE_ETH_RX_OFFLOAD_VLAN_FILTER
The BNXT Vector PMD is enabled in DPDK builds by default. The decision to enable
vector processing is made at run-time when the port is started; if no transmit

View File

@ -432,7 +432,7 @@ Limitations
.. code-block:: console
vlan_offload = rte_eth_dev_get_vlan_offload(port);
vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
vlan_offload |= RTE_ETH_VLAN_STRIP_OFFLOAD;
rte_eth_dev_set_vlan_offload(port, vlan_offload);
Another alternative is modify the adapter's ingress VLAN rewrite mode so that

View File

@ -30,7 +30,7 @@ Speed capabilities
Supports getting the speed capabilities that the current device is capable of.
* **[provides] rte_eth_dev_info**: ``speed_capa:ETH_LINK_SPEED_*``.
* **[provides] rte_eth_dev_info**: ``speed_capa:RTE_ETH_LINK_SPEED_*``.
* **[related] API**: ``rte_eth_dev_info_get()``.
@ -101,11 +101,11 @@ Supports Rx interrupts.
Lock-free Tx queue
------------------
If a PMD advertises DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
If a PMD advertises RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
invoke rte_eth_tx_burst() concurrently on the same Tx queue without SW lock.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MT_LOCKFREE``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MT_LOCKFREE``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MT_LOCKFREE``.
* **[related] API**: ``rte_eth_tx_burst()``.
@ -117,8 +117,8 @@ Fast mbuf free
Supports optimization for fast release of mbufs following successful Tx.
Requires that per queue, all mbufs come from the same mempool and has refcnt = 1.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MBUF_FAST_FREE``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE``.
.. _nic_features_free_tx_mbuf_on_demand:
@ -165,7 +165,7 @@ Scattered Rx
Supports receiving segmented mbufs.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SCATTER``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SCATTER``.
* **[implements] datapath**: ``Scattered Rx function``.
* **[implements] rte_eth_dev_data**: ``scattered_rx``.
* **[provides] eth_dev_ops**: ``rxq_info_get:scattered_rx``.
@ -193,12 +193,12 @@ LRO
Supports Large Receive Offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TCP_LRO``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
``dev_conf.rxmode.max_lro_pkt_size``.
* **[implements] datapath**: ``LRO functionality``.
* **[implements] rte_eth_dev_data**: ``lro``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_LRO``, ``mbuf.tso_segsz``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_TCP_LRO``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_TCP_LRO``.
* **[provides] rte_eth_dev_info**: ``max_lro_pkt_size``.
@ -209,12 +209,12 @@ TSO
Supports TCP Segmentation Offloading.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
* **[uses] mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_TCP_TSO,RTE_ETH_TX_OFFLOAD_UDP_TSO``.
.. _nic_features_promiscuous_mode:
@ -275,9 +275,9 @@ RSS hash
Supports RSS hashing on RX.
* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_RSS_FLAG``.
* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_RSS_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.rss_conf``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] rte_eth_dev_info**: ``flow_type_rss_offloads``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
@ -290,7 +290,7 @@ Inner RSS
Supports RX RSS hashing on Inner headers.
* **[uses] rte_flow_action_rss**: ``level``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_RSS_HASH``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_RSS_HASH``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
@ -327,7 +327,7 @@ VMDq
Supports Virtual Machine Device Queues (VMDq).
* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_VMDQ_FLAG``.
* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_VMDQ_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_rx_conf``.
* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@ -350,7 +350,7 @@ DCB
Supports Data Center Bridging (DCB).
* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``ETH_MQ_RX_DCB_FLAG``.
* **[uses] user config**: ``dev_conf.rxmode.mq_mode`` = ``RTE_ETH_MQ_RX_DCB_FLAG``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.vmdq_dcb_conf``.
* **[uses] user config**: ``dev_conf.rx_adv_conf.dcb_rx_conf``.
* **[uses] user config**: ``dev_conf.tx_adv_conf.vmdq_dcb_tx_conf``.
@ -366,7 +366,7 @@ VLAN filter
Supports filtering of a VLAN Tag identifier.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_FILTER``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_FILTER``.
* **[implements] eth_dev_ops**: ``vlan_filter_set``.
* **[related] API**: ``rte_eth_dev_vlan_filter()``.
@ -404,13 +404,13 @@ Supports inline crypto processing defined by rte_security library to perform cry
operations of security protocol while packet is received in NIC. NIC is not aware
of protocol operations. See Security library and PMD documentation for more details.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[uses] mbuf**: ``mbuf.l2_len``.
* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``capabilities_get``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO``
@ -426,14 +426,14 @@ protocol processing for the security protocol (e.g. IPsec, MACSEC) while the
packet is received at NIC. The NIC is capable of understanding the security
protocol operations. See security library and PMD documentation for more details.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_SECURITY``,
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_SECURITY``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_SECURITY``,
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[uses] mbuf**: ``mbuf.l2_len``.
* **[implements] rte_security_ops**: ``session_create``, ``session_update``,
``session_stats_get``, ``session_destroy``, ``set_pkt_metadata``, ``get_userdata``,
``capabilities_get``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_SECURITY``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_SECURITY``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_SECURITY``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD``,
``mbuf.ol_flags:PKT_TX_SEC_OFFLOAD``, ``mbuf.ol_flags:PKT_RX_SEC_OFFLOAD_FAILED``.
* **[provides] rte_security_ops, capabilities_get**: ``action: RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL``
@ -447,7 +447,7 @@ CRC offload
Supports CRC stripping by hardware.
A PMD assumed to support CRC stripping by default. PMD should advertise if it supports keeping CRC.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_KEEP_CRC``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_KEEP_CRC``.
.. _nic_features_vlan_offload:
@ -457,13 +457,13 @@ VLAN offload
Supports VLAN offload to hardware.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_VLAN_STRIP,DEV_RX_OFFLOAD_VLAN_FILTER,DEV_RX_OFFLOAD_VLAN_EXTEND``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_VLAN_INSERT``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_VLAN_STRIP,RTE_ETH_RX_OFFLOAD_VLAN_FILTER,RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_VLAN``, ``mbuf.vlan_tci``.
* **[implements] eth_dev_ops**: ``vlan_offload_set``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN`` ``mbuf.vlan_tci``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_VLAN_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_VLAN_INSERT``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_VLAN_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_VLAN_INSERT``.
* **[related] API**: ``rte_eth_dev_set_vlan_offload()``,
``rte_eth_dev_get_vlan_offload()``.
@ -475,14 +475,14 @@ QinQ offload
Supports QinQ (queue in queue) offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_QINQ_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_QINQ``, ``mbuf.vlan_tci_outer``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_QINQ_STRIPPED``, ``mbuf.ol_flags:PKT_RX_QINQ``,
``mbuf.ol_flags:PKT_RX_VLAN_STRIPPED``, ``mbuf.ol_flags:PKT_RX_VLAN``
``mbuf.vlan_tci``, ``mbuf.vlan_tci_outer``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_QINQ_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_QINQ_INSERT``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_QINQ_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_QINQ_INSERT``.
.. _nic_features_fec:
@ -496,7 +496,7 @@ information to correct the bit errors generated during data packet transmission
improves signal quality but also brings a delay to signals. This function can be enabled or disabled as required.
* **[implements] eth_dev_ops**: ``fec_get_capability``, ``fec_get``, ``fec_set``.
* **[provides] rte_eth_fec_capa**: ``speed:ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
* **[provides] rte_eth_fec_capa**: ``speed:RTE_ETH_SPEED_NUM_*``, ``capa:RTE_ETH_FEC_MODE_TO_CAPA()``.
* **[related] API**: ``rte_eth_fec_get_capability()``, ``rte_eth_fec_get()``, ``rte_eth_fec_set()``.
@ -507,16 +507,16 @@ L3 checksum offload
Supports L3 checksum offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
``PKT_RX_IP_CKSUM_NONE``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_IPV4_CKSUM``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_IPV4_CKSUM``.
.. _nic_features_l4_checksum_offload:
@ -526,8 +526,8 @@ L4 checksum offload
Supports L4 checksum offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@ -535,8 +535,8 @@ Supports L4 checksum offload.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
``PKT_RX_L4_CKSUM_NONE``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM,DEV_RX_OFFLOAD_SCTP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_UDP_CKSUM,RTE_ETH_RX_OFFLOAD_TCP_CKSUM,RTE_ETH_RX_OFFLOAD_SCTP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_UDP_CKSUM,RTE_ETH_TX_OFFLOAD_TCP_CKSUM,RTE_ETH_TX_OFFLOAD_SCTP_CKSUM``.
.. _nic_features_hw_timestamp:
@ -545,10 +545,10 @@ Timestamp offload
Supports Timestamp.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_TIMESTAMP``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_TIMESTAMP``.
* **[provides] mbuf**: ``mbuf.timestamp``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: DEV_RX_OFFLOAD_TIMESTAMP``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa: RTE_ETH_RX_OFFLOAD_TIMESTAMP``.
* **[related] eth_dev_ops**: ``read_clock``.
.. _nic_features_macsec_offload:
@ -558,11 +558,11 @@ MACsec offload
Supports MACsec.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_MACSEC_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_MACSEC``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_MACSEC_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_MACSEC_INSERT``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_MACSEC_STRIP``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_MACSEC_INSERT``.
.. _nic_features_inner_l3_checksum:
@ -572,16 +572,16 @@ Inner L3 checksum
Supports inner packet L3 checksum.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_OUTER_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_IP_CKSUM_BAD``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM``.
.. _nic_features_inner_l4_checksum:
@ -591,15 +591,15 @@ Inner L4 checksum
Supports inner packet L4 checksum.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_OUTER_L4_CKSUM_UNKNOWN`` |
``PKT_RX_OUTER_L4_CKSUM_BAD`` | ``PKT_RX_OUTER_L4_CKSUM_GOOD`` | ``PKT_RX_OUTER_L4_CKSUM_INVALID``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_OUTER_IPV4`` | ``PKT_TX_OUTER_IPV6``.
``mbuf.ol_flags:PKT_TX_OUTER_UDP_CKSUM``.
* **[uses] mbuf**: ``mbuf.outer_l2_len``, ``mbuf.outer_l3_len``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:DEV_RX_OFFLOAD_OUTER_UDP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.
* **[provides] rte_eth_dev_info**: ``rx_offload_capa,rx_queue_offload_capa:RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM``,
``tx_offload_capa,tx_queue_offload_capa:RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM``.
.. _nic_features_shared_rx_queue:

View File

@ -78,11 +78,11 @@ To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
will be checked:
* ``DEV_RX_OFFLOAD_VLAN_EXTEND``
* ``RTE_ETH_RX_OFFLOAD_VLAN_EXTEND``
* ``DEV_RX_OFFLOAD_CHECKSUM``
* ``RTE_ETH_RX_OFFLOAD_CHECKSUM``
* ``DEV_RX_OFFLOAD_HEADER_SPLIT``
* ``RTE_ETH_RX_OFFLOAD_HEADER_SPLIT``
* ``fdir_conf->mode``

View File

@ -216,21 +216,21 @@ For example,
* If the max number of VFs (max_vfs) is set in the range of 1 to 32:
If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
pools (RTE_ETH_32_POOLS), and each VF could have 4 Rx queues;
If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
pools (RTE_ETH_32_POOLS), and each VF could have 2 Rx queues;
* If the max number of VFs (max_vfs) is in the range of 33 to 64:
If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
as ``rxq`` is not correct at this case;
If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (RTE_ETH_64_POOLS),
and each VF have 2 Rx queues;
On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
On host, to enable VF RSS functionality, rx mq mode should be set as RTE_ETH_MQ_RX_VMDQ_RSS
or RTE_ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
It also needs config VF RSS information like hash function, RSS key, RSS key length.
.. note::

View File

@ -89,13 +89,13 @@ Other features are supported using optional MACRO configuration. They include:
To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
* DEV_RX_OFFLOAD_VLAN_STRIP
* RTE_ETH_RX_OFFLOAD_VLAN_STRIP
* DEV_RX_OFFLOAD_VLAN_EXTEND
* RTE_ETH_RX_OFFLOAD_VLAN_EXTEND
* DEV_RX_OFFLOAD_CHECKSUM
* RTE_ETH_RX_OFFLOAD_CHECKSUM
* DEV_RX_OFFLOAD_HEADER_SPLIT
* RTE_ETH_RX_OFFLOAD_HEADER_SPLIT
* dev_conf
@ -163,13 +163,13 @@ l3fwd
~~~~~
When running l3fwd with vPMD, there is one thing to note.
In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
In the configuration, ensure that RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
Otherwise, by default, RX vPMD is disabled.
load_balancer
~~~~~~~~~~~~~
As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
As in the case of l3fwd, to enable vPMD, do NOT set RTE_ETH_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.

View File

@ -371,7 +371,7 @@ Limitations
- CRC:
- ``DEV_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
- ``RTE_ETH_RX_OFFLOAD_KEEP_CRC`` cannot be supported with decapsulation
for some NICs (such as ConnectX-6 Dx, ConnectX-6 Lx, and BlueField-2).
The capability bit ``scatter_fcs_w_decap_disable`` shows NIC support.
@ -611,7 +611,7 @@ Driver options
small-packet traffic.
When MPRQ is enabled, MTU can be larger than the size of
user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
user-provided mbuf even if RTE_ETH_RX_OFFLOAD_SCATTER isn't enabled. PMD will
configure large stride size enough to accommodate MTU as long as
device allows. Note that this can waste system memory compared to enabling Rx
scatter and multi-segment packet.

View File

@ -275,7 +275,7 @@ An example utility for eBPF instruction generation in the format of C arrays wil
be added in next releases
TAP reports on supported RSS functions as part of dev_infos_get callback:
``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
``RTE_ETH_RSS_IP``, ``RTE_ETH_RSS_UDP`` and ``RTE_ETH_RSS_TCP``.
**Known limitation:** TAP supports all of the above hash functions together
and not in partial combinations.

View File

@ -194,11 +194,11 @@ To segment an outgoing packet, an application must:
- the bit mask of required GSO types. The GSO library uses the same macros as
those that describe a physical device's TX offloading capabilities (i.e.
``DEV_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
``RTE_ETH_TX_OFFLOAD_*_TSO``) for gso_types. For example, if an application
wants to segment TCP/IPv4 packets, it should set gso_types to
``DEV_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
supported for gso_types are ``DEV_TX_OFFLOAD_VXLAN_TNL_TSO``, and
``DEV_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
``RTE_ETH_TX_OFFLOAD_TCP_TSO``. The only other supported values currently
supported for gso_types are ``RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO``, and
``RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO``; a combination of these macros is also
allowed.
- a flag, that indicates whether the IPv4 headers of output segments should

View File

@ -137,7 +137,7 @@ a vxlan-encapsulated tcp packet:
mb->ol_flags |= PKT_TX_IPV4 | PKT_TX_IP_CSUM
set out_ip checksum to 0 in the packet
This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
- calculate checksum of out_ip and out_udp::
@ -147,8 +147,8 @@ a vxlan-encapsulated tcp packet:
set out_ip checksum to 0 in the packet
set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
and DEV_TX_OFFLOAD_UDP_CKSUM.
This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM
and RTE_ETH_TX_OFFLOAD_UDP_CKSUM.
- calculate checksum of in_ip::
@ -158,7 +158,7 @@ a vxlan-encapsulated tcp packet:
set in_ip checksum to 0 in the packet
This is similar to case 1), but l2_len is different. It is supported
on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM.
on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM.
Note that it can only work if outer L4 checksum is 0.
- calculate checksum of in_ip and in_tcp::
@ -170,8 +170,8 @@ a vxlan-encapsulated tcp packet:
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
This is similar to case 2), but l2_len is different. It is supported
on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM and
DEV_TX_OFFLOAD_TCP_CKSUM.
on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM and
RTE_ETH_TX_OFFLOAD_TCP_CKSUM.
Note that it can only work if outer L4 checksum is 0.
- segment inner TCP::
@ -185,7 +185,7 @@ a vxlan-encapsulated tcp packet:
set in_tcp checksum to pseudo header without including the IP
payload length using rte_ipv4_phdr_cksum()
This is supported on hardware advertising DEV_TX_OFFLOAD_TCP_TSO.
This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_TCP_TSO.
Note that it can only work if outer L4 checksum is 0.
- calculate checksum of out_ip, in_ip, in_tcp::
@ -200,8 +200,8 @@ a vxlan-encapsulated tcp packet:
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM,
RTE_ETH_TX_OFFLOAD_UDP_CKSUM and RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM.
The list of flags and their precise meaning is described in the mbuf API
documentation (rte_mbuf.h). Also refer to the testpmd source code

View File

@ -57,7 +57,7 @@ Whenever needed and appropriate, asynchronous communication should be introduced
Avoiding lock contention is a key issue in a multi-core environment.
To address this issue, PMDs are designed to work with per-core private resources as much as possible.
For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable.
For example, a PMD maintains a separate transmit queue per-core, per-port, if the PMD is not ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable.
In the same way, every receive queue of a port is assigned to and polled by a single logical core (lcore).
To comply with Non-Uniform Memory Access (NUMA), memory management is designed to assign to each logical core
@ -119,7 +119,7 @@ This is also true for the pipe-line model provided all logical cores used are lo
Multiple logical cores should never share receive or transmit queues for interfaces since this would require global locks and hinder performance.
If the PMD is ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
If the PMD is ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capable, multiple threads can invoke ``rte_eth_tx_burst()``
concurrently on the same tx queue without SW lock. This PMD feature found in some NICs and useful in the following use cases:
* Remove explicit spinlock in some applications where lcores are not mapped to Tx queues with 1:1 relation.
@ -127,7 +127,7 @@ concurrently on the same tx queue without SW lock. This PMD feature found in som
* In the eventdev use case, avoid dedicating a separate TX core for transmitting and thus
enables more scaling as all workers can send the packets.
See `Hardware Offload`_ for ``DEV_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
See `Hardware Offload`_ for ``RTE_ETH_TX_OFFLOAD_MT_LOCKFREE`` capability probing details.
Device Identification, Ownership and Configuration
--------------------------------------------------
@ -311,7 +311,7 @@ The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get(
The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
Supported offloads can be either per-port or per-queue.
Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
Offloads are enabled using the existing ``RTE_ETH_TX_OFFLOAD_*`` or ``RTE_ETH_RX_OFFLOAD_*`` flags.
Any requested offloading by an application must be within the device capabilities.
Any offloading is disabled by default if it is not set in the parameter
``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and

View File

@ -1993,23 +1993,23 @@ only matching traffic goes through.
.. table:: RSS
+---------------+---------------------------------------------+
| Field | Value |
+===============+=============================================+
| ``func`` | RSS hash function to apply |
+---------------+---------------------------------------------+
| ``level`` | encapsulation level for ``types`` |
+---------------+---------------------------------------------+
| ``types`` | specific RSS hash types (see ``ETH_RSS_*``) |
+---------------+---------------------------------------------+
| ``key_len`` | hash key length in bytes |
+---------------+---------------------------------------------+
| ``queue_num`` | number of entries in ``queue`` |
+---------------+---------------------------------------------+
| ``key`` | hash key |
+---------------+---------------------------------------------+
| ``queue`` | queue indices to use |
+---------------+---------------------------------------------+
+---------------+-------------------------------------------------+
| Field | Value |
+===============+=================================================+
| ``func`` | RSS hash function to apply |
+---------------+-------------------------------------------------+
| ``level`` | encapsulation level for ``types`` |
+---------------+-------------------------------------------------+
| ``types`` | specific RSS hash types (see ``RTE_ETH_RSS_*``) |
+---------------+-------------------------------------------------+
| ``key_len`` | hash key length in bytes |
+---------------+-------------------------------------------------+
| ``queue_num`` | number of entries in ``queue`` |
+---------------+-------------------------------------------------+
| ``key`` | hash key |
+---------------+-------------------------------------------------+
| ``queue`` | queue indices to use |
+---------------+-------------------------------------------------+
Action: ``PF``
^^^^^^^^^^^^^^

View File

@ -569,7 +569,7 @@ created by the application is attached to the security session by the API
For Inline Crypto and Inline protocol offload, device specific defined metadata is
updated in the mbuf using ``rte_security_set_pkt_metadata()`` if
``DEV_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
``RTE_ETH_TX_OFFLOAD_SEC_NEED_MDATA`` is set.
For inline protocol offloaded ingress traffic, the application can register a
pointer, ``userdata`` , in the security session. When the packet is received,

View File

@ -69,22 +69,16 @@ Deprecation Notices
``RTE_ETH_FLOW_MAX`` is one sample of the mentioned case, adding a new flow
type will break the ABI because of ``flex_mask[RTE_ETH_FLOW_MAX]`` array
usage in following public struct hierarchy:
``rte_eth_fdir_flex_conf -> rte_fdir_conf -> rte_eth_conf (in the middle)``.
``rte_eth_fdir_flex_conf -> rte_eth_fdir_conf -> rte_eth_conf (in the middle)``.
Need to identify this kind of usages and fix in 20.11, otherwise this blocks
us extending existing enum/define.
One solution can be using a fixed size array instead of ``.*MAX.*`` value.
* ethdev: Will add ``RTE_ETH_`` prefix to all ethdev macros/enums in v21.11.
Macros will be added for backward compatibility.
Backward compatibility macros will be removed on v22.11.
A few old backward compatibility macros from 2013 that does not have
proper prefix will be removed on v21.11.
* ethdev: The flow director API, including ``rte_eth_conf.fdir_conf`` field,
and the related structures (``rte_fdir_*`` and ``rte_eth_fdir_*``),
will be removed in DPDK 20.11.
* ethdev: New offload flags ``DEV_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
* ethdev: New offload flags ``RTE_ETH_RX_OFFLOAD_FLOW_MARK`` will be added in 19.11.
This will allow application to enable or disable PMDs from updating
``rte_mbuf::hash::fdir``.
This scheme will allow PMDs to avoid writes to ``rte_mbuf`` fields on Rx and

View File

@ -428,6 +428,9 @@ ABI Changes
Also, make sure to start the actual text at the margin.
=======================================================
* ethdev: All enums & macros updated to have ``RTE_ETH`` prefix and structures
updated to have ``rte_eth`` prefix. DPDK components updated to use new names.
* ethdev: Input parameters for ``eth_rx_queue_count_t`` was changed.
Instead of pointer to ``rte_eth_dev`` and queue index, now it accepts pointer
to internal queue data as input parameter. While this change is transparent

View File

@ -209,12 +209,12 @@ Where:
device will ensure the ordering. Ordering will be lost when tried in PARALLEL.
* ``--rxoffload MASK``: RX HW offload capabilities to enable/use on this port
(bitmask of DEV_RX_OFFLOAD_* values). It is an optional parameter and
(bitmask of RTE_ETH_RX_OFFLOAD_* values). It is an optional parameter and
allows user to disable some of the RX HW offload capabilities.
By default all HW RX offloads are enabled.
* ``--txoffload MASK``: TX HW offload capabilities to enable/use on this port
(bitmask of DEV_TX_OFFLOAD_* values). It is an optional parameter and
(bitmask of RTE_ETH_TX_OFFLOAD_* values). It is an optional parameter and
allows user to disable some of the TX HW offload capabilities.
By default all HW TX offloads are enabled.

View File

@ -546,7 +546,7 @@ The command line options are:
Set the hexadecimal bitmask of RX multi queue mode which can be enabled.
The default value is 0x7::
ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG | ETH_MQ_RX_VMDQ_FLAG
RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG
* ``--record-core-cycles``

View File

@ -90,20 +90,20 @@ int dpaa_intr_disable(char *if_name);
struct usdpaa_ioctl_link_status_args_old {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
/* link status(ETH_LINK_UP/DOWN) */
/* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
};
struct usdpaa_ioctl_link_status_args {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
/* link status(ETH_LINK_UP/DOWN) */
/* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
/* link speed (ETH_SPEED_NUM_)*/
/* link speed (RTE_ETH_SPEED_NUM_)*/
int link_speed;
/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
int link_duplex;
/* link autoneg (ETH_LINK_AUTONEG/FIXED)*/
/* link autoneg (RTE_ETH_LINK_AUTONEG/FIXED)*/
int link_autoneg;
};
@ -111,16 +111,16 @@ struct usdpaa_ioctl_link_status_args {
struct usdpaa_ioctl_update_link_status_args {
/* network device node name */
char if_name[IF_NAME_MAX_LEN];
/* link status(ETH_LINK_UP/DOWN) */
/* link status(RTE_ETH_LINK_UP/DOWN) */
int link_status;
};
struct usdpaa_ioctl_update_link_speed {
/* network device node name*/
char if_name[IF_NAME_MAX_LEN];
/* link speed (ETH_SPEED_NUM_)*/
/* link speed (RTE_ETH_SPEED_NUM_)*/
int link_speed;
/* link duplex (ETH_LINK_[HALF/FULL]_DUPLEX)*/
/* link duplex (RTE_ETH_LINK_[HALF/FULL]_DUPLEX)*/
int link_duplex;
};

View File

@ -167,7 +167,7 @@ enum roc_npc_rss_hash_function {
struct roc_npc_action_rss {
enum roc_npc_rss_hash_function func;
uint32_t level;
uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
uint64_t types; /**< Specific RSS hash types (see RTE_ETH_RSS_*). */
uint32_t key_len; /**< Hash key length in bytes. */
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */

View File

@ -93,10 +93,10 @@ static const char *valid_arguments[] = {
};
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
.link_autoneg = ETH_LINK_FIXED,
.link_speed = RTE_ETH_SPEED_NUM_10G,
.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
.link_status = RTE_ETH_LINK_DOWN,
.link_autoneg = RTE_ETH_LINK_FIXED,
};
RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
@ -290,7 +290,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
static int
eth_dev_start(struct rte_eth_dev *dev)
{
dev->data->dev_link.link_status = ETH_LINK_UP;
dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
@ -320,7 +320,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
internals->tx_queue[i].sockfd = -1;
}
dev->data->dev_link.link_status = ETH_LINK_DOWN;
dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
@ -331,7 +331,7 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
struct pmd_internals *internals = dev->data->dev_private;
internals->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
@ -346,9 +346,9 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
return 0;
}

View File

@ -164,10 +164,10 @@ static const char * const valid_arguments[] = {
};
static const struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
.link_autoneg = ETH_LINK_AUTONEG
.link_speed = RTE_ETH_SPEED_NUM_10G,
.link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
.link_status = RTE_ETH_LINK_DOWN,
.link_autoneg = RTE_ETH_LINK_AUTONEG
};
/* List which tracks PMDs to facilitate sharing UMEMs across them. */
@ -653,7 +653,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
static int
eth_dev_start(struct rte_eth_dev *dev)
{
dev->data->dev_link.link_status = ETH_LINK_UP;
dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
@ -662,7 +662,7 @@ eth_dev_start(struct rte_eth_dev *dev)
static int
eth_dev_stop(struct rte_eth_dev *dev)
{
dev->data->dev_link.link_status = ETH_LINK_DOWN;
dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}

View File

@ -736,14 +736,14 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
.nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
/* ARK PMD supports all line rates, how do we indicate that here ?? */
dev_info->speed_capa = (ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_10G |
ETH_LINK_SPEED_25G |
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G);
dev_info->speed_capa = (RTE_ETH_LINK_SPEED_1G |
RTE_ETH_LINK_SPEED_10G |
RTE_ETH_LINK_SPEED_25G |
RTE_ETH_LINK_SPEED_40G |
RTE_ETH_LINK_SPEED_50G |
RTE_ETH_LINK_SPEED_100G);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TIMESTAMP;
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return 0;
}

View File

@ -154,20 +154,20 @@ static struct rte_pci_driver rte_atl_pmd = {
.remove = eth_atl_pci_remove,
};
#define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
| DEV_RX_OFFLOAD_IPV4_CKSUM \
| DEV_RX_OFFLOAD_UDP_CKSUM \
| DEV_RX_OFFLOAD_TCP_CKSUM \
| DEV_RX_OFFLOAD_MACSEC_STRIP \
| DEV_RX_OFFLOAD_VLAN_FILTER)
#define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
#define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
| DEV_TX_OFFLOAD_IPV4_CKSUM \
| DEV_TX_OFFLOAD_UDP_CKSUM \
| DEV_TX_OFFLOAD_TCP_CKSUM \
| DEV_TX_OFFLOAD_TCP_TSO \
| DEV_TX_OFFLOAD_MACSEC_INSERT \
| DEV_TX_OFFLOAD_MULTI_SEGS)
#define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
| RTE_ETH_TX_OFFLOAD_TCP_TSO \
| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define SFP_EEPROM_SIZE 0x100
@ -488,7 +488,7 @@ atl_dev_start(struct rte_eth_dev *dev)
/* set adapter started */
hw->adapter_stopped = 0;
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(ERR,
"Invalid link_speeds for port %u, fix speed not supported",
dev->data->port_id);
@ -655,18 +655,18 @@ atl_dev_set_link_up(struct rte_eth_dev *dev)
uint32_t link_speeds = dev->data->dev_conf.link_speeds;
uint32_t speed_mask = 0;
if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
speed_mask = hw->aq_nic_cfg->link_speed_msk;
} else {
if (link_speeds & ETH_LINK_SPEED_10G)
if (link_speeds & RTE_ETH_LINK_SPEED_10G)
speed_mask |= AQ_NIC_RATE_10G;
if (link_speeds & ETH_LINK_SPEED_5G)
if (link_speeds & RTE_ETH_LINK_SPEED_5G)
speed_mask |= AQ_NIC_RATE_5G;
if (link_speeds & ETH_LINK_SPEED_1G)
if (link_speeds & RTE_ETH_LINK_SPEED_1G)
speed_mask |= AQ_NIC_RATE_1G;
if (link_speeds & ETH_LINK_SPEED_2_5G)
if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
speed_mask |= AQ_NIC_RATE_2G5;
if (link_speeds & ETH_LINK_SPEED_100M)
if (link_speeds & RTE_ETH_LINK_SPEED_100M)
speed_mask |= AQ_NIC_RATE_100M;
}
@ -1127,10 +1127,10 @@ atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
dev_info->speed_capa |= ETH_LINK_SPEED_100M;
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
return 0;
}
@ -1175,10 +1175,10 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
u32 fc = AQ_NIC_FC_OFF;
int err = 0;
link.link_status = ETH_LINK_DOWN;
link.link_status = RTE_ETH_LINK_DOWN;
link.link_speed = 0;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
memset(&old, 0, sizeof(old));
/* load old link status */
@ -1198,8 +1198,8 @@ atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
return 0;
}
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_status = RTE_ETH_LINK_UP;
link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link.link_speed = hw->aq_link_status.mbps;
rte_eth_linkstatus_set(dev, &link);
@ -1333,7 +1333,7 @@ atl_dev_link_status_print(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
(unsigned int)link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
PMD_DRV_LOG(INFO, " Port %d: Link Down",
@ -1532,13 +1532,13 @@ atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
hw->aq_fw_ops->get_flow_control(hw, &fc);
if (fc == AQ_NIC_FC_OFF)
fc_conf->mode = RTE_FC_NONE;
fc_conf->mode = RTE_ETH_FC_NONE;
else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
fc_conf->mode = RTE_FC_FULL;
fc_conf->mode = RTE_ETH_FC_FULL;
else if (fc & AQ_NIC_FC_RX)
fc_conf->mode = RTE_FC_RX_PAUSE;
fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (fc & AQ_NIC_FC_TX)
fc_conf->mode = RTE_FC_TX_PAUSE;
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
return 0;
}
@ -1553,13 +1553,13 @@ atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (hw->aq_fw_ops->set_flow_control == NULL)
return -ENOTSUP;
if (fc_conf->mode == RTE_FC_NONE)
if (fc_conf->mode == RTE_ETH_FC_NONE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
else if (fc_conf->mode == RTE_FC_RX_PAUSE)
else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
else if (fc_conf->mode == RTE_FC_TX_PAUSE)
else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
else if (fc_conf->mode == RTE_FC_FULL)
else if (fc_conf->mode == RTE_ETH_FC_FULL)
hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
if (old_flow_control != hw->aq_nic_cfg->flow_control)
@ -1727,14 +1727,14 @@ atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
PMD_INIT_FUNC_TRACE();
ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
for (i = 0; i < dev->data->nb_rx_queues; i++)
hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
if (mask & ETH_VLAN_EXTEND_MASK)
if (mask & RTE_ETH_VLAN_EXTEND_MASK)
ret = -ENOTSUP;
return ret;
@ -1750,10 +1750,10 @@ atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
PMD_INIT_FUNC_TRACE();
switch (vlan_type) {
case ETH_VLAN_TYPE_INNER:
case RTE_ETH_VLAN_TYPE_INNER:
hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
break;
case ETH_VLAN_TYPE_OUTER:
case RTE_ETH_VLAN_TYPE_OUTER:
hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
break;
default:

View File

@ -11,15 +11,15 @@
#include "hw_atl/hw_atl_utils.h"
#define ATL_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_IPV6 | \
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP | \
ETH_RSS_IPV6_EX | \
ETH_RSS_IPV6_TCP_EX | \
ETH_RSS_IPV6_UDP_EX)
RTE_ETH_RSS_IPV4 | \
RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_IPV6_EX | \
RTE_ETH_RSS_IPV6_TCP_EX | \
RTE_ETH_RSS_IPV6_UDP_EX)
#define ATL_DEV_PRIVATE_TO_HW(adapter) \
(&((struct atl_adapter *)adapter)->hw)

View File

@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_IPV4_CKSUM;
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
(DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
(RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
/* allocate memory for the software ring */

View File

@ -1998,9 +1998,9 @@ avp_dev_configure(struct rte_eth_dev *eth_dev)
/* Setup required number of queues */
_avp_set_queue_counts(eth_dev);
mask = (ETH_VLAN_STRIP_MASK |
ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK);
mask = (RTE_ETH_VLAN_STRIP_MASK |
RTE_ETH_VLAN_FILTER_MASK |
RTE_ETH_VLAN_EXTEND_MASK);
ret = avp_vlan_offload_set(eth_dev, mask);
if (ret < 0) {
PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n",
@ -2140,8 +2140,8 @@ avp_dev_link_update(struct rte_eth_dev *eth_dev,
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct rte_eth_link *link = &eth_dev->data->dev_link;
link->link_speed = ETH_SPEED_NUM_10G;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
link->link_speed = RTE_ETH_SPEED_NUM_10G;
link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_status = !!(avp->flags & AVP_F_LINKUP);
return -1;
@ -2191,8 +2191,8 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
}
return 0;
@ -2205,9 +2205,9 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
uint64_t offloads = dev_conf->rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@ -2216,13 +2216,13 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
}
}
if (mask & ETH_VLAN_FILTER_MASK) {
if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}

View File

@ -840,11 +840,11 @@ static void axgbe_rss_options(struct axgbe_port *pdata)
pdata->rss_hf = rss_conf->rss_hf;
rss_hf = rss_conf->rss_hf;
if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
}

View File

@ -326,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
struct axgbe_port *pdata = dev->data->dev_private;
/* Checksum offload to hardware */
pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM;
RTE_ETH_RX_OFFLOAD_CHECKSUM;
return 0;
}
@ -335,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
{
struct axgbe_port *pdata = dev->data->dev_private;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
pdata->rss_enable = 1;
else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
pdata->rss_enable = 0;
else
return -1;
@ -385,7 +385,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
max_pkt_len > pdata->rx_buf_size)
dev_data->scattered_rx = 1;
@ -521,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
continue;
pdata->rss_table[i] = reta_conf[idx].reta[shift];
@ -552,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
}
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
shift = i % RTE_ETH_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
continue;
reta_conf[idx].reta[shift] = pdata->rss_table[i];
@ -590,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
if (pdata->rss_hf &
(ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
(RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
if (pdata->rss_hf &
(ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
(RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
/* Set the RSS options */
@ -765,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
link.link_status = pdata->phy_link;
link.link_speed = pdata->phy_speed;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
RTE_ETH_LINK_SPEED_FIXED);
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == -1)
PMD_DRV_LOG(ERR, "No change in link status\n");
@ -1208,24 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_SCATTER |
RTE_ETH_RX_OFFLOAD_KEEP_CRC;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
if (pdata->hw_feat.rss) {
dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@ -1262,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc.autoneg = pdata->pause_autoneg;
if (pdata->rx_pause && pdata->tx_pause)
fc.mode = RTE_FC_FULL;
fc.mode = RTE_ETH_FC_FULL;
else if (pdata->rx_pause)
fc.mode = RTE_FC_RX_PAUSE;
fc.mode = RTE_ETH_FC_RX_PAUSE;
else if (pdata->tx_pause)
fc.mode = RTE_FC_TX_PAUSE;
fc.mode = RTE_ETH_FC_TX_PAUSE;
else
fc.mode = RTE_FC_NONE;
fc.mode = RTE_ETH_FC_NONE;
fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024;
fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024;
@ -1298,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
AXGMAC_IOWRITE(pdata, reg, reg_val);
fc.mode = fc_conf->mode;
if (fc.mode == RTE_FC_FULL) {
if (fc.mode == RTE_ETH_FC_FULL) {
pdata->tx_pause = 1;
pdata->rx_pause = 1;
} else if (fc.mode == RTE_FC_RX_PAUSE) {
} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
pdata->tx_pause = 0;
pdata->rx_pause = 1;
} else if (fc.mode == RTE_FC_TX_PAUSE) {
} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
pdata->tx_pause = 1;
pdata->rx_pause = 0;
} else {
@ -1386,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
fc.mode = pfc_conf->fc.mode;
if (fc.mode == RTE_FC_FULL) {
if (fc.mode == RTE_ETH_FC_FULL) {
pdata->tx_pause = 1;
pdata->rx_pause = 1;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
} else if (fc.mode == RTE_FC_RX_PAUSE) {
} else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
pdata->tx_pause = 0;
pdata->rx_pause = 1;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
} else if (fc.mode == RTE_FC_TX_PAUSE) {
} else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
pdata->tx_pause = 1;
pdata->rx_pause = 0;
AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@ -1830,8 +1830,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
switch (vlan_type) {
case ETH_VLAN_TYPE_INNER:
PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
case RTE_ETH_VLAN_TYPE_INNER:
PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
if (qinq) {
if (tpid != 0x8100 && tpid != 0x88a8)
PMD_DRV_LOG(ERR,
@ -1848,8 +1848,8 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
"Inner type not supported in single tag\n");
}
break;
case ETH_VLAN_TYPE_OUTER:
PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
case RTE_ETH_VLAN_TYPE_OUTER:
PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
if (qinq) {
PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
/*Enable outer VLAN tag*/
@ -1866,11 +1866,11 @@ axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
"tag supported 0x8100/0x88A8\n");
}
break;
case ETH_VLAN_TYPE_MAX:
PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
case RTE_ETH_VLAN_TYPE_MAX:
PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
break;
case ETH_VLAN_TYPE_UNKNOWN:
PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
case RTE_ETH_VLAN_TYPE_UNKNOWN:
PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
break;
}
return 0;
@ -1904,8 +1904,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
if (mask & ETH_VLAN_STRIP_MASK) {
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
if (mask & RTE_ETH_VLAN_STRIP_MASK) {
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_stripping(pdata);
@ -1915,8 +1915,8 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
pdata->hw_if.disable_rx_vlan_stripping(pdata);
}
}
if (mask & ETH_VLAN_FILTER_MASK) {
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
pdata->eth_dev->device->name);
pdata->hw_if.enable_rx_vlan_filtering(pdata);
@ -1926,14 +1926,14 @@ axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
pdata->hw_if.disable_rx_vlan_filtering(pdata);
}
}
if (mask & ETH_VLAN_EXTEND_MASK) {
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
axgbe_vlan_extend_enable(pdata);
/* Set global registers with default ethertype*/
axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
RTE_ETHER_TYPE_VLAN);
axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
RTE_ETHER_TYPE_VLAN);
} else {
PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");

View File

@ -97,12 +97,12 @@
/* Receive Side Scaling */
#define AXGBE_RSS_OFFLOAD ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_IPV6 | \
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
RTE_ETH_RSS_IPV4 | \
RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_NONFRAG_IPV6_UDP)
#define AXGBE_RSS_HASH_KEY_SIZE 40
#define AXGBE_RSS_MAX_TABLE_SIZE 256

View File

@ -597,7 +597,7 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
pdata->an_int = 0;
axgbe_an73_clear_interrupts(pdata);
pdata->eth_dev->data->dev_link.link_status =
ETH_LINK_DOWN;
RTE_ETH_LINK_DOWN;
} else if (pdata->an_state == AXGBE_AN_ERROR) {
PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
cur_state);

View File

@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
(DMA_CH_INC * rxq->queue_id));
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
DMA_CH_RDTR_LO);
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@ -286,7 +286,7 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
@ -430,7 +430,7 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
mbuf->vlan_tci =
AXGMAC_GET_BITS_LE(desc->write.desc0,
RX_NORMAL_DESC0, OVT);
if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED;
else
mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED;

View File

@ -94,14 +94,14 @@ bnx2x_link_update(struct rte_eth_dev *dev)
link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
}
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
RTE_ETH_LINK_SPEED_FIXED);
link.link_status = sc->link_vars.link_up;
return rte_eth_linkstatus_set(dev, &link);
@ -408,7 +408,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
"VF device is no longer operational");
dev->data->dev_link.link_status = ETH_LINK_DOWN;
dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
}
return ret;
@ -534,7 +534,7 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
@ -669,7 +669,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
bnx2x_load_firmware(sc);
assert(sc->firmware);
if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
sc->udp_rss = 1;
sc->rx_budget = BNX2X_RX_BUDGET;

View File

@ -569,37 +569,37 @@ struct bnxt_rep_info {
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
#define BNXT_ETH_RSS_SUPPORT ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_IPV6 | \
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP | \
ETH_RSS_LEVEL_MASK)
RTE_ETH_RSS_IPV4 | \
RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
RTE_ETH_RSS_IPV6 | \
RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_LEVEL_MASK)
#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_IPV4_CKSUM | \
DEV_TX_OFFLOAD_TCP_CKSUM | \
DEV_TX_OFFLOAD_UDP_CKSUM | \
DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
DEV_TX_OFFLOAD_GRE_TNL_TSO | \
DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
DEV_TX_OFFLOAD_QINQ_INSERT | \
DEV_TX_OFFLOAD_MULTI_SEGS)
#define BNXT_DEV_TX_OFFLOAD_SUPPORT (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_TSO | \
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
#define BNXT_DEV_RX_OFFLOAD_SUPPORT (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | \
RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
RTE_ETH_RX_OFFLOAD_TCP_LRO | \
RTE_ETH_RX_OFFLOAD_SCATTER | \
RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)

View File

@ -426,7 +426,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
goto err_out;
/* Alloc RSS context only if RSS mode is enabled */
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
/* RSS table size in Thor is 512.
@ -458,7 +458,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
* setting is not available at this time, it will not be
* configured correctly in the CFA.
*/
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
@ -493,7 +493,7 @@ static int bnxt_setup_one_vnic(struct bnxt *bp, uint16_t vnic_id)
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
rc = bnxt_hwrm_vnic_tpa_cfg(bp, vnic,
(rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) ?
(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) ?
true : false);
if (rc)
goto err_out;
@ -923,35 +923,35 @@ uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
link_speed = bp->link_info->support_pam4_speeds;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB)
speed_capa |= ETH_LINK_SPEED_100M;
speed_capa |= RTE_ETH_LINK_SPEED_100M;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD)
speed_capa |= ETH_LINK_SPEED_100M_HD;
speed_capa |= RTE_ETH_LINK_SPEED_100M_HD;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB)
speed_capa |= ETH_LINK_SPEED_1G;
speed_capa |= RTE_ETH_LINK_SPEED_1G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
speed_capa |= ETH_LINK_SPEED_2_5G;
speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB)
speed_capa |= ETH_LINK_SPEED_10G;
speed_capa |= RTE_ETH_LINK_SPEED_10G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
speed_capa |= ETH_LINK_SPEED_20G;
speed_capa |= RTE_ETH_LINK_SPEED_20G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB)
speed_capa |= ETH_LINK_SPEED_25G;
speed_capa |= RTE_ETH_LINK_SPEED_25G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB)
speed_capa |= ETH_LINK_SPEED_40G;
speed_capa |= RTE_ETH_LINK_SPEED_40G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB)
speed_capa |= ETH_LINK_SPEED_50G;
speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
speed_capa |= ETH_LINK_SPEED_100G;
speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
speed_capa |= ETH_LINK_SPEED_50G;
speed_capa |= RTE_ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
speed_capa |= ETH_LINK_SPEED_100G;
speed_capa |= RTE_ETH_LINK_SPEED_100G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
speed_capa |= ETH_LINK_SPEED_200G;
speed_capa |= RTE_ETH_LINK_SPEED_200G;
if (bp->link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
speed_capa |= ETH_LINK_SPEED_FIXED;
speed_capa |= RTE_ETH_LINK_SPEED_FIXED;
return speed_capa;
}
@ -995,14 +995,14 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
if (bp->vnic_cap_flags & BNXT_VNIC_CAP_VLAN_RX_STRIP)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
dev_info->tx_queue_offload_capa;
if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
@ -1049,8 +1049,8 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
*/
/* VMDq resources */
vpool = 64; /* ETH_64_POOLS */
vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
vpool = 64; /* RTE_ETH_64_POOLS */
vrxq = 128; /* RTE_ETH_VMDQ_DCB_NUM_QUEUES */
for (i = 0; i < 4; vpool >>= 1, i++) {
if (max_vnics > vpool) {
for (j = 0; j < 5; vrxq >>= 1, j++) {
@ -1145,15 +1145,15 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
(uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps)
goto resource_error;
if (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
if (!(eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) &&
bp->max_vnics < eth_dev->data->nb_rx_queues)
goto resource_error;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
@ -1182,7 +1182,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
PMD_DRV_LOG(INFO, "Port %d Link Down\n",
@ -1199,10 +1199,10 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
uint16_t buf_size;
int i;
if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER)
if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return 1;
if (eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
return 1;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@ -1247,15 +1247,15 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
* a limited subset have been enabled.
*/
if (eth_dev->data->dev_conf.rxmode.offloads &
~(DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_VLAN_FILTER))
~(RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
RTE_ETH_RX_OFFLOAD_KEEP_CRC |
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_RSS_HASH |
RTE_ETH_RX_OFFLOAD_VLAN_FILTER))
goto use_scalar_rx;
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
@ -1307,7 +1307,7 @@ bnxt_transmit_function(struct rte_eth_dev *eth_dev)
* or tx offloads.
*/
if (eth_dev->data->scattered_rx ||
(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
(offloads & ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) ||
BNXT_TRUFLOW_EN(bp))
goto use_scalar_tx;
@ -1608,10 +1608,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
bnxt_link_update_op(eth_dev, 1);
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
vlan_mask |= ETH_VLAN_FILTER_MASK;
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
vlan_mask |= ETH_VLAN_STRIP_MASK;
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
vlan_mask |= RTE_ETH_VLAN_FILTER_MASK;
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vlan_mask |= RTE_ETH_VLAN_STRIP_MASK;
rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
if (rc)
goto error;
@ -1833,8 +1833,8 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
/* Retrieve link info from hardware */
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
new.link_speed = RTE_ETH_LINK_SPEED_100M;
new.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
@ -2028,7 +2028,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
if (!vnic->rss_table)
return -EINVAL;
if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG))
return -EINVAL;
if (reta_size != tbl_size) {
@ -2041,8 +2041,8 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < reta_size; i++) {
struct bnxt_rx_queue *rxq;
idx = i / RTE_RETA_GROUP_SIZE;
sft = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << sft)))
continue;
@ -2095,8 +2095,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
}
for (idx = 0, i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
sft = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
sft = i % RTE_ETH_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << sft)) {
uint16_t qid;
@ -2134,7 +2134,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
* If RSS enablement were different than dev_configure,
* then return -EINVAL
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
@ -2152,7 +2152,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
vnic->hash_mode =
bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
ETH_RSS_LEVEL(rss_conf->rss_hf));
RTE_ETH_RSS_LEVEL(rss_conf->rss_hf));
/*
* If hashkey is not specified, use the previously configured
@ -2197,30 +2197,30 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
hash_types = vnic->hash_type;
rss_conf->rss_hf = 0;
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
rss_conf->rss_hf |= ETH_RSS_IPV4;
rss_conf->rss_hf |= RTE_ETH_RSS_IPV4;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
rss_conf->rss_hf |= ETH_RSS_IPV6;
rss_conf->rss_hf |= RTE_ETH_RSS_IPV6;
hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
}
if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
@ -2260,17 +2260,17 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
fc_conf->autoneg = 1;
switch (bp->link_info->pause) {
case 0:
fc_conf->mode = RTE_FC_NONE;
fc_conf->mode = RTE_ETH_FC_NONE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
fc_conf->mode = RTE_FC_TX_PAUSE;
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
fc_conf->mode = RTE_FC_RX_PAUSE;
fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
break;
case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
fc_conf->mode = RTE_FC_FULL;
fc_conf->mode = RTE_ETH_FC_FULL;
break;
}
return 0;
@ -2293,11 +2293,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
}
switch (fc_conf->mode) {
case RTE_FC_NONE:
case RTE_ETH_FC_NONE:
bp->link_info->auto_pause = 0;
bp->link_info->force_pause = 0;
break;
case RTE_FC_RX_PAUSE:
case RTE_ETH_FC_RX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
@ -2308,7 +2308,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
}
break;
case RTE_FC_TX_PAUSE:
case RTE_ETH_FC_TX_PAUSE:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
@ -2319,7 +2319,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
}
break;
case RTE_FC_FULL:
case RTE_ETH_FC_FULL:
if (fc_conf->autoneg) {
bp->link_info->auto_pause =
HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
@ -2350,7 +2350,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
return rc;
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
@ -2364,7 +2364,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
tunnel_type =
HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN;
break;
case RTE_TUNNEL_TYPE_GENEVE:
case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
@ -2413,7 +2413,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
return rc;
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
case RTE_ETH_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
@ -2430,7 +2430,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN;
port = bp->vxlan_fw_dst_port_id;
break;
case RTE_TUNNEL_TYPE_GENEVE:
case RTE_ETH_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
@ -2608,7 +2608,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
int rc;
vnic = BNXT_GET_DEFAULT_VNIC(bp);
if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
if (!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
@ -2628,7 +2628,7 @@ bnxt_config_vlan_hw_filter(struct bnxt *bp, uint64_t rx_offloads)
bnxt_add_vlan_filter(bp, 0);
}
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
!!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
!!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER));
return 0;
}
@ -2641,7 +2641,7 @@ static int bnxt_free_one_vnic(struct bnxt *bp, uint16_t vnic_id)
/* Destroy vnic filters and vnic */
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) {
RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
for (i = 0; i < RTE_ETHER_MAX_VLAN_ID; i++)
bnxt_del_vlan_filter(bp, i);
}
@ -2680,7 +2680,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
return rc;
if (bp->eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) {
RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
rc = bnxt_add_vlan_filter(bp, 0);
if (rc)
return rc;
@ -2698,7 +2698,7 @@ bnxt_config_vlan_hw_stripping(struct bnxt *bp, uint64_t rx_offloads)
return rc;
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
!!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
!!(rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP));
return rc;
}
@ -2718,22 +2718,22 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
if (!dev->data->dev_started)
return 0;
if (mask & ETH_VLAN_FILTER_MASK) {
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* Enable or disable VLAN filtering */
rc = bnxt_config_vlan_hw_filter(bp, rx_offloads);
if (rc)
return rc;
}
if (mask & ETH_VLAN_STRIP_MASK) {
if (mask & RTE_ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
rc = bnxt_config_vlan_hw_stripping(bp, rx_offloads);
if (rc)
return rc;
}
if (mask & ETH_VLAN_EXTEND_MASK) {
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(DEBUG, "Extend VLAN supported\n");
else
PMD_DRV_LOG(INFO, "Extend VLAN unsupported\n");
@ -2748,10 +2748,10 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
{
struct bnxt *bp = dev->data->dev_private;
int qinq = dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_EXTEND;
RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
if (vlan_type != ETH_VLAN_TYPE_INNER &&
vlan_type != ETH_VLAN_TYPE_OUTER) {
if (vlan_type != RTE_ETH_VLAN_TYPE_INNER &&
vlan_type != RTE_ETH_VLAN_TYPE_OUTER) {
PMD_DRV_LOG(ERR,
"Unsupported vlan type.");
return -EINVAL;
@ -2763,7 +2763,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
return -EINVAL;
}
if (vlan_type == ETH_VLAN_TYPE_OUTER) {
if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
switch (tpid) {
case RTE_ETHER_TYPE_QINQ:
bp->outer_tpid_bd =
@ -2791,7 +2791,7 @@ bnxt_vlan_tpid_set_op(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
}
bp->outer_tpid_bd |= tpid;
PMD_DRV_LOG(INFO, "outer_tpid_bd = %x\n", bp->outer_tpid_bd);
} else if (vlan_type == ETH_VLAN_TYPE_INNER) {
} else if (vlan_type == RTE_ETH_VLAN_TYPE_INNER) {
PMD_DRV_LOG(ERR,
"Can accelerate only outer vlan in QinQ\n");
return -EINVAL;
@ -2831,7 +2831,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
bnxt_del_dflt_mac_filter(bp, vnic);
memcpy(bp->mac_addr, addr, RTE_ETHER_ADDR_LEN);
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
/* This filter will allow only untagged packets */
rc = bnxt_add_vlan_filter(bp, 0);
} else {
@ -6556,4 +6556,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");

View File

@ -978,7 +978,7 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
}
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
@ -1177,7 +1177,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
}
/* If RSS types is 0, use a best effort configuration */
types = rss->types ? rss->types : ETH_RSS_IPV4;
types = rss->types ? rss->types : RTE_ETH_RSS_IPV4;
hash_type = bnxt_rte_to_hwrm_hash_types(types);
@ -1322,7 +1322,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rxq = bp->rx_queues[act_q->index];
if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
vnic->fw_vnic_id != INVALID_HW_RING_ID)
goto use_vnic;

View File

@ -628,7 +628,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
uint16_t j = dst_id - 1;
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
if ((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
@ -2979,12 +2979,12 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
{
uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
if ((conf_link_speed & RTE_ETH_LINK_SPEED_FIXED) == RTE_ETH_LINK_SPEED_AUTONEG)
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
switch (conf_link_speed) {
case ETH_LINK_SPEED_10M_HD:
case ETH_LINK_SPEED_100M_HD:
case RTE_ETH_LINK_SPEED_10M_HD:
case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
@ -3001,51 +3001,51 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
{
uint16_t eth_link_speed = 0;
if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
return ETH_LINK_SPEED_AUTONEG;
if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
return RTE_ETH_LINK_SPEED_AUTONEG;
switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
case ETH_LINK_SPEED_100M:
case ETH_LINK_SPEED_100M_HD:
switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
case RTE_ETH_LINK_SPEED_100M:
case RTE_ETH_LINK_SPEED_100M_HD:
/* FALLTHROUGH */
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
case ETH_LINK_SPEED_1G:
case RTE_ETH_LINK_SPEED_1G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
break;
case ETH_LINK_SPEED_2_5G:
case RTE_ETH_LINK_SPEED_2_5G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
break;
case ETH_LINK_SPEED_10G:
case RTE_ETH_LINK_SPEED_10G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
case ETH_LINK_SPEED_20G:
case RTE_ETH_LINK_SPEED_20G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
break;
case ETH_LINK_SPEED_25G:
case RTE_ETH_LINK_SPEED_25G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
break;
case ETH_LINK_SPEED_40G:
case RTE_ETH_LINK_SPEED_40G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
case ETH_LINK_SPEED_50G:
case RTE_ETH_LINK_SPEED_50G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
case ETH_LINK_SPEED_100G:
case RTE_ETH_LINK_SPEED_100G:
eth_link_speed = pam4_link ?
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
break;
case ETH_LINK_SPEED_200G:
case RTE_ETH_LINK_SPEED_200G:
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
break;
@ -3058,11 +3058,11 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
return eth_link_speed;
}
#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
#define BNXT_SUPPORTED_SPEEDS (RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_100M_HD | \
RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | \
RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_20G | RTE_ETH_LINK_SPEED_25G | \
RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_50G | \
RTE_ETH_LINK_SPEED_100G | RTE_ETH_LINK_SPEED_200G)
static int bnxt_validate_link_speed(struct bnxt *bp)
{
@ -3071,13 +3071,13 @@ static int bnxt_validate_link_speed(struct bnxt *bp)
uint32_t link_speed_capa;
uint32_t one_speed;
if (link_speed == ETH_LINK_SPEED_AUTONEG)
if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG)
return 0;
link_speed_capa = bnxt_get_speed_capabilities(bp);
if (link_speed & ETH_LINK_SPEED_FIXED) {
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
if (link_speed & RTE_ETH_LINK_SPEED_FIXED) {
one_speed = link_speed & ~RTE_ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
PMD_DRV_LOG(ERR,
@ -3107,71 +3107,71 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
{
uint16_t ret = 0;
if (link_speed == ETH_LINK_SPEED_AUTONEG) {
if (link_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
if (bp->link_info->support_speeds)
return bp->link_info->support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
}
if (link_speed & ETH_LINK_SPEED_100M)
if (link_speed & RTE_ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
if (link_speed & ETH_LINK_SPEED_100M_HD)
if (link_speed & RTE_ETH_LINK_SPEED_100M_HD)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
if (link_speed & ETH_LINK_SPEED_1G)
if (link_speed & RTE_ETH_LINK_SPEED_1G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
if (link_speed & ETH_LINK_SPEED_2_5G)
if (link_speed & RTE_ETH_LINK_SPEED_2_5G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
if (link_speed & ETH_LINK_SPEED_10G)
if (link_speed & RTE_ETH_LINK_SPEED_10G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
if (link_speed & ETH_LINK_SPEED_20G)
if (link_speed & RTE_ETH_LINK_SPEED_20G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
if (link_speed & ETH_LINK_SPEED_25G)
if (link_speed & RTE_ETH_LINK_SPEED_25G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
if (link_speed & ETH_LINK_SPEED_40G)
if (link_speed & RTE_ETH_LINK_SPEED_40G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
if (link_speed & ETH_LINK_SPEED_50G)
if (link_speed & RTE_ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
if (link_speed & ETH_LINK_SPEED_100G)
if (link_speed & RTE_ETH_LINK_SPEED_100G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
if (link_speed & ETH_LINK_SPEED_200G)
if (link_speed & RTE_ETH_LINK_SPEED_200G)
ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
return ret;
}
static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
{
uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
uint32_t eth_link_speed = RTE_ETH_SPEED_NUM_NONE;
switch (hw_link_speed) {
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
eth_link_speed = ETH_SPEED_NUM_100M;
eth_link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
eth_link_speed = ETH_SPEED_NUM_1G;
eth_link_speed = RTE_ETH_SPEED_NUM_1G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
eth_link_speed = ETH_SPEED_NUM_2_5G;
eth_link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
eth_link_speed = ETH_SPEED_NUM_10G;
eth_link_speed = RTE_ETH_SPEED_NUM_10G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
eth_link_speed = ETH_SPEED_NUM_20G;
eth_link_speed = RTE_ETH_SPEED_NUM_20G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
eth_link_speed = ETH_SPEED_NUM_25G;
eth_link_speed = RTE_ETH_SPEED_NUM_25G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
eth_link_speed = ETH_SPEED_NUM_40G;
eth_link_speed = RTE_ETH_SPEED_NUM_40G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
eth_link_speed = ETH_SPEED_NUM_50G;
eth_link_speed = RTE_ETH_SPEED_NUM_50G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
eth_link_speed = ETH_SPEED_NUM_100G;
eth_link_speed = RTE_ETH_SPEED_NUM_100G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
eth_link_speed = ETH_SPEED_NUM_200G;
eth_link_speed = RTE_ETH_SPEED_NUM_200G;
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
@ -3184,16 +3184,16 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
{
uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
uint16_t eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
/* FALLTHROUGH */
eth_link_duplex = ETH_LINK_FULL_DUPLEX;
eth_link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
eth_link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
default:
PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
@ -3222,12 +3222,12 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
link->link_speed = ETH_SPEED_NUM_NONE;
link->link_speed = RTE_ETH_SPEED_NUM_NONE;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
ETH_LINK_FIXED : ETH_LINK_AUTONEG;
RTE_ETH_LINK_FIXED : RTE_ETH_LINK_AUTONEG;
exit:
return rc;
}
@ -3253,7 +3253,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
if (BNXT_CHIP_P5(bp) &&
dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
dev_conf->link_speeds == RTE_ETH_LINK_SPEED_40G) {
/* 40G is not supported as part of media auto detect.
* The speed should be forced and autoneg disabled
* to configure 40G speed.
@ -3344,7 +3344,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
HWRM_CHECK_RESULT();
bp->vlan = rte_le_to_cpu_16(resp->vlan) & ETH_VLAN_ID_MAX;
bp->vlan = rte_le_to_cpu_16(resp->vlan) & RTE_ETH_VLAN_ID_MAX;
svif_info = rte_le_to_cpu_16(resp->svif_info);
if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)

View File

@ -537,7 +537,7 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;

View File

@ -187,7 +187,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
rx_ring_info->rx_ring_struct->ring_size *
AGG_RING_SIZE_FACTOR)) : 0;
if (rx_ring_info && (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
if (rx_ring_info && (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
int tpa_max = BNXT_TPA_MAX_AGGS(bp);
tpa_info_len = tpa_max * sizeof(struct bnxt_tpa_info);
@ -283,7 +283,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
ag_bitmap_start, ag_bitmap_len);
/* TPA info */
if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
rx_ring_info->tpa_info =
((struct bnxt_tpa_info *)
((char *)mz->addr + tpa_info_start));

View File

@ -52,13 +52,13 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
bp->nr_vnics = 0;
/* Multi-queue mode */
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
case ETH_MQ_RX_VMDQ_DCB_RSS:
case RTE_ETH_MQ_RX_VMDQ_RSS:
case RTE_ETH_MQ_RX_VMDQ_ONLY:
case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
/* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
@ -66,14 +66,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
max_pools = RTE_MIN(bp->max_vnics,
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
ETH_64_POOLS)));
RTE_ETH_64_POOLS)));
PMD_DRV_LOG(DEBUG,
"pools = %u max_pools = %u\n",
pools, max_pools);
if (pools > max_pools)
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
case RTE_ETH_MQ_RX_RSS:
pools = bp->rx_cosq_cnt ? bp->rx_cosq_cnt : 1;
break;
default:
@ -111,7 +111,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
ring_idx, rxq, i, vnic);
}
if (i == 0) {
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB) {
bp->eth_dev->data->promiscuous = 1;
vnic->flags |= BNXT_VNIC_INFO_PROMISC;
}
@ -121,8 +121,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = end_grp_id;
if (i) {
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB ||
!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS))
vnic->rss_dflt_cr = true;
goto skip_filter_allocation;
}
@ -147,14 +147,14 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
bp->rx_num_qs_per_vnic = nb_q_per_grp;
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
if (bp->flags & BNXT_FLAG_UPDATE_HASH)
bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
for (i = 0; i < bp->nr_vnics; i++) {
uint32_t lvl = ETH_RSS_LEVEL(rss->rss_hf);
uint32_t lvl = RTE_ETH_RSS_LEVEL(rss->rss_hf);
vnic = &bp->vnic_info[i];
vnic->hash_type =
@ -363,7 +363,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
@ -478,7 +478,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
if (BNXT_HAS_RING_GRPS(bp)) {
@ -549,7 +549,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;

View File

@ -566,8 +566,8 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq)
dev_conf = &rxq->bp->eth_dev->data->dev_conf;
offloads = dev_conf->rxmode.offloads;
outer_cksum_enabled = !!(offloads & (DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM));
outer_cksum_enabled = !!(offloads & (RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM));
/* Initialize ol_flags table. */
pt = rxr->ol_flags_table;

View File

@ -416,7 +416,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);

View File

@ -96,7 +96,7 @@ bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
}
/*
* Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
* Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static inline void

View File

@ -352,7 +352,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);

View File

@ -333,7 +333,7 @@ bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp_vec(txq, nb_tx_pkts);

View File

@ -353,7 +353,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
/*
* Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
* Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
@ -479,7 +479,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp(txq, nb_tx_pkts);

View File

@ -239,17 +239,17 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
{
uint16_t hwrm_type = 0;
if (rte_type & ETH_RSS_IPV4)
if (rte_type & RTE_ETH_RSS_IPV4)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
if (rte_type & ETH_RSS_NONFRAG_IPV4_TCP)
if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
if (rte_type & ETH_RSS_NONFRAG_IPV4_UDP)
if (rte_type & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
if (rte_type & ETH_RSS_IPV6)
if (rte_type & RTE_ETH_RSS_IPV6)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
if (rte_type & ETH_RSS_NONFRAG_IPV6_TCP)
if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
if (rte_type & ETH_RSS_NONFRAG_IPV6_UDP)
if (rte_type & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
hwrm_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
return hwrm_type;
@ -258,11 +258,11 @@ uint16_t bnxt_rte_to_hwrm_hash_types(uint64_t rte_type)
int bnxt_rte_to_hwrm_hash_level(struct bnxt *bp, uint64_t hash_f, uint32_t lvl)
{
uint32_t mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
bool l3 = (hash_f & (ETH_RSS_IPV4 | ETH_RSS_IPV6));
bool l4 = (hash_f & (ETH_RSS_NONFRAG_IPV4_UDP |
ETH_RSS_NONFRAG_IPV6_UDP |
ETH_RSS_NONFRAG_IPV4_TCP |
ETH_RSS_NONFRAG_IPV6_TCP));
bool l3 = (hash_f & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6));
bool l4 = (hash_f & (RTE_ETH_RSS_NONFRAG_IPV4_UDP |
RTE_ETH_RSS_NONFRAG_IPV6_UDP |
RTE_ETH_RSS_NONFRAG_IPV4_TCP |
RTE_ETH_RSS_NONFRAG_IPV6_TCP));
bool l3_only = l3 && !l4;
bool l3_and_l4 = l3 && l4;
@ -307,16 +307,16 @@ uint64_t bnxt_hwrm_to_rte_rss_level(struct bnxt *bp, uint32_t mode)
* return default hash mode.
*/
if (!(bp->vnic_cap_flags & BNXT_VNIC_CAP_OUTER_RSS))
return ETH_RSS_LEVEL_PMD_DEFAULT;
return RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 ||
mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4)
rss_level |= ETH_RSS_LEVEL_OUTERMOST;
rss_level |= RTE_ETH_RSS_LEVEL_OUTERMOST;
else if (mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 ||
mode == HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4)
rss_level |= ETH_RSS_LEVEL_INNERMOST;
rss_level |= RTE_ETH_RSS_LEVEL_INNERMOST;
else
rss_level |= ETH_RSS_LEVEL_PMD_DEFAULT;
rss_level |= RTE_ETH_RSS_LEVEL_PMD_DEFAULT;
return rss_level;
}

View File

@ -421,18 +421,18 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
if (vf >= bp->pdev->max_vfs)
return -EINVAL;
if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG) {
PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
/* Is this really the correct mapping? VFd seems to think it is. */
if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
flag |= BNXT_VNIC_INFO_PROMISC;
if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
flag |= BNXT_VNIC_INFO_BCAST;
if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST;
if (on)

View File

@ -167,8 +167,8 @@ struct bond_dev_private {
struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */
uint16_t reta_size;
struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
RTE_RETA_GROUP_SIZE];
struct rte_eth_rss_reta_entry64 reta_conf[RTE_ETH_RSS_RETA_SIZE_512 /
RTE_ETH_RETA_GROUP_SIZE];
uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
uint8_t rss_key_len; /**< hash key length in bytes. */

View File

@ -770,25 +770,25 @@ link_speed_key(uint16_t speed) {
uint16_t key_speed;
switch (speed) {
case ETH_SPEED_NUM_NONE:
case RTE_ETH_SPEED_NUM_NONE:
key_speed = 0x00;
break;
case ETH_SPEED_NUM_10M:
case RTE_ETH_SPEED_NUM_10M:
key_speed = BOND_LINK_SPEED_KEY_10M;
break;
case ETH_SPEED_NUM_100M:
case RTE_ETH_SPEED_NUM_100M:
key_speed = BOND_LINK_SPEED_KEY_100M;
break;
case ETH_SPEED_NUM_1G:
case RTE_ETH_SPEED_NUM_1G:
key_speed = BOND_LINK_SPEED_KEY_1000M;
break;
case ETH_SPEED_NUM_10G:
case RTE_ETH_SPEED_NUM_10G:
key_speed = BOND_LINK_SPEED_KEY_10G;
break;
case ETH_SPEED_NUM_20G:
case RTE_ETH_SPEED_NUM_20G:
key_speed = BOND_LINK_SPEED_KEY_20G;
break;
case ETH_SPEED_NUM_40G:
case RTE_ETH_SPEED_NUM_40G:
key_speed = BOND_LINK_SPEED_KEY_40G;
break;
default:
@ -887,7 +887,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
if (ret >= 0 && link_info.link_status != 0) {
key = link_speed_key(link_info.link_speed) << 1;
if (link_info.link_duplex == ETH_LINK_FULL_DUPLEX)
if (link_info.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
key |= BOND_LINK_FULL_DUPLEX_KEY;
} else {
key = 0;

View File

@ -204,7 +204,7 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
RTE_ETH_RX_OFFLOAD_VLAN_FILTER) == 0)
return 0;
internals = bonded_eth_dev->data->dev_private;
@ -592,7 +592,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
return -1;
}
if (link_props.link_status == ETH_LINK_UP) {
if (link_props.link_status == RTE_ETH_LINK_UP) {
if (internals->active_slave_count == 0 &&
!internals->user_defined_primary_port)
bond_ethdev_primary_set(internals,
@ -727,7 +727,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
internals->tx_offload_capa = 0;
internals->rx_queue_offload_capa = 0;
internals->tx_queue_offload_capa = 0;
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
internals->reta_size = 0;
internals->candidate_max_rx_pktlen = 0;
internals->max_rx_pktlen = 0;

View File

@ -1369,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link)
* In any other mode the link properties are set to default
* values of AUTONEG/DUPLEX
*/
ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
}
}
@ -1700,7 +1700,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
/* If RSS is enabled for bonding, try to enable it for slaves */
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
/* rss_key won't be empty if RSS is configured in bonded dev */
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
internals->rss_key_len;
@ -1714,12 +1714,12 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER)
RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
slave_eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_VLAN_FILTER;
RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
else
slave_eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_VLAN_FILTER;
~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
slave_eth_dev->data->dev_conf.rxmode.mtu =
bonded_eth_dev->data->dev_conf.rxmode.mtu;
@ -1823,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* If RSS is enabled for bonding, synchronize RETA */
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
int i;
struct bond_dev_private *internals;
@ -1946,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
return -1;
}
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 1;
internals = eth_dev->data->dev_private;
@ -2086,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
internals->link_status_polling_enabled = 0;
@ -2416,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
bond_ctx = ethdev->data->dev_private;
ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
if (ethdev->data->dev_started == 0 ||
bond_ctx->active_slave_count == 0) {
ethdev->data->dev_link.link_status = ETH_LINK_DOWN;
ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
ethdev->data->dev_link.link_status = ETH_LINK_UP;
ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP;
if (wait_to_complete)
link_update = rte_eth_link_get;
@ -2449,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
&slave_link);
if (ret < 0) {
ethdev->data->dev_link.link_speed =
ETH_SPEED_NUM_NONE;
RTE_ETH_SPEED_NUM_NONE;
RTE_BOND_LOG(ERR,
"Slave (port %u) link get failed: %s",
bond_ctx->active_slaves[idx],
@ -2491,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
* In theses mode the maximum theoretical link speed is the sum
* of all the slaves
*/
ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
one_link_update_succeeded = false;
for (idx = 0; idx < bond_ctx->active_slave_count; idx++) {
@ -2865,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
goto link_update;
/* check link state properties if bonded link is up*/
if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
if (link_properties_valid(bonded_eth_dev, &link) != 0)
RTE_BOND_LOG(ERR, "Invalid link properties "
"for slave %d in bonding mode %d",
@ -2881,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (internals->active_slave_count < 1) {
/* If first active slave, then change link status */
bonded_eth_dev->data->dev_link.link_status =
ETH_LINK_UP;
RTE_ETH_LINK_UP;
internals->current_primary_port = port_id;
lsc_flag = 1;
@ -2973,12 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
/* Copy RETA table */
reta_count = (reta_size + RTE_RETA_GROUP_SIZE - 1) /
RTE_RETA_GROUP_SIZE;
reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) /
RTE_ETH_RETA_GROUP_SIZE;
for (i = 0; i < reta_count; i++) {
internals->reta_conf[i].mask = reta_conf[i].mask;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
internals->reta_conf[i].reta[j] = reta_conf[i].reta[j];
}
@ -3011,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
/* Copy RETA table */
for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++)
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++)
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = internals->reta_conf[i].reta[j];
@ -3274,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->max_rx_pktlen = 0;
/* Initially allow to choose any offload type */
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK;
memset(&internals->default_rxconf, 0,
sizeof(internals->default_rxconf));
@ -3501,7 +3501,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
* set key to the the value specified in port RSS configuration.
* Fall back to default RSS key if the key is not specified
*/
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) {
struct rte_eth_rss_conf *rss_conf =
&dev->data->dev_conf.rx_adv_conf.rss_conf;
if (rss_conf->rss_key != NULL) {
@ -3526,9 +3526,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
internals->reta_conf[i].mask = ~0LL;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++)
internals->reta_conf[i].reta[j] =
(i * RTE_RETA_GROUP_SIZE + j) %
(i * RTE_ETH_RETA_GROUP_SIZE + j) %
dev->data->nb_rx_queues;
}
}

View File

@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
conf & DEV_TX_OFFLOAD_QINQ_INSERT)
if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
if (conf & DEV_TX_OFFLOAD_TCP_TSO)
if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
if (conf & DEV_TX_OFFLOAD_SECURITY)
if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;

View File

@ -98,7 +98,7 @@ cn10k_rss_action_validate(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("multi-queue mode is disabled");
return -ENOTSUP;
}

View File

@ -77,12 +77,12 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
if (dev->scalar_ena) {
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_burst);
}
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
}

View File

@ -78,11 +78,11 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
if (dev->scalar_ena) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
} else {
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}

View File

@ -15,28 +15,28 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
(dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS &&
(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM))
flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
flags |= NIX_RX_MULTI_SEG_F;
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_RX_OFFLOAD_TSTAMP_F;
if (!dev->ptype_disable)
flags |= NIX_RX_OFFLOAD_PTYPE_F;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
flags |= NIX_RX_OFFLOAD_SECURITY_F;
return flags;
@ -72,39 +72,39 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
conf & DEV_TX_OFFLOAD_QINQ_INSERT)
if (conf & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
conf & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
if (conf & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
conf & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)
flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
conf & DEV_TX_OFFLOAD_UDP_CKSUM || conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
if (conf & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM ||
conf & RTE_ETH_TX_OFFLOAD_TCP_CKSUM ||
conf & RTE_ETH_TX_OFFLOAD_UDP_CKSUM || conf & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
if (!(conf & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE))
flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
if (conf & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
flags |= NIX_TX_MULTI_SEG_F;
/* Enable Inner checksum for TSO */
if (conf & DEV_TX_OFFLOAD_TCP_TSO)
if (conf & RTE_ETH_TX_OFFLOAD_TCP_TSO)
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F);
/* Enable Inner and Outer checksum for Tunnel TSO */
if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO))
if (conf & (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO))
flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
NIX_TX_OFFLOAD_L3_L4_CSUM_F);
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP))
flags |= NIX_TX_OFFLOAD_TSTAMP_F;
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
flags |= NIX_TX_OFFLOAD_SECURITY_F;
return flags;
@ -298,9 +298,9 @@ cn9k_nix_configure(struct rte_eth_dev *eth_dev)
/* Platform specific checks */
if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
(txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
(txmode->offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
((txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(txmode->offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
plt_err("Outer IP and SCTP checksum unsupported");
return -EINVAL;
}
@ -553,17 +553,17 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
* TSO not supported for earlier chip revisions
*/
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0())
dev->tx_offload_capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
dev->tx_offload_capa &= ~(RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO);
/* 50G and 100G to be supported for board version C0
* and above of CN9K.
*/
if (roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) {
dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_50G;
dev->speed_capa &= ~(uint64_t)ETH_LINK_SPEED_100G;
dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_50G;
dev->speed_capa &= ~(uint64_t)RTE_ETH_LINK_SPEED_100G;
}
dev->hwcap = 0;

View File

@ -77,12 +77,12 @@ cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
if (dev->scalar_ena) {
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_burst);
}
if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst_mseg);
return pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
}

View File

@ -77,11 +77,11 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
if (dev->scalar_ena) {
pick_tx_func(eth_dev, nix_eth_tx_burst);
if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
} else {
pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}

View File

@ -10,7 +10,7 @@ nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
if (roc_nix_is_vf_or_sdp(&dev->nix) ||
dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
return capa;
}
@ -28,11 +28,11 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
uint32_t speed_capa;
/* Auto negotiation disabled */
speed_capa = ETH_LINK_SPEED_FIXED;
speed_capa = RTE_ETH_LINK_SPEED_FIXED;
if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
}
return speed_capa;
@ -65,7 +65,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
struct roc_nix *nix = &dev->nix;
int i, rc = 0;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Setup Inline Inbound */
rc = roc_nix_inl_inb_init(nix);
if (rc) {
@ -80,8 +80,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
cnxk_nix_inb_mode_set(dev, true);
}
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
struct plt_bitmap *bmap;
size_t bmap_sz;
void *mem;
@ -100,8 +100,8 @@ nix_security_setup(struct cnxk_eth_dev *dev)
dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
/* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
/* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
goto done;
rc = -ENOMEM;
@ -136,7 +136,7 @@ nix_security_setup(struct cnxk_eth_dev *dev)
done:
return 0;
cleanup:
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
rc |= roc_nix_inl_inb_fini(nix);
return rc;
}
@ -182,7 +182,7 @@ nix_security_release(struct cnxk_eth_dev *dev)
int rc, ret = 0;
/* Cleanup Inline inbound */
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Destroy inbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
@ -199,8 +199,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
}
/* Cleanup Inline outbound */
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Destroy outbound sessions */
tvar = NULL;
RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
@ -242,8 +242,8 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
}
}
@ -273,7 +273,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct rte_eth_fc_conf fc_conf = {0};
int rc;
/* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
/* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
* by AF driver, update those info in PMD structure.
*/
rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
@ -281,10 +281,10 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
goto exit;
fc->mode = fc_conf.mode;
fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
(fc_conf.mode == RTE_FC_RX_PAUSE);
fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
(fc_conf.mode == RTE_FC_TX_PAUSE);
fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
(fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
(fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
exit:
return rc;
@ -305,11 +305,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
if (roc_model_is_cn96_ax() &&
dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
(fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
(fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
fc_cfg.mode =
(fc_cfg.mode == RTE_FC_FULL ||
fc_cfg.mode == RTE_FC_TX_PAUSE) ?
RTE_FC_TX_PAUSE : RTE_FC_NONE;
(fc_cfg.mode == RTE_ETH_FC_FULL ||
fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
}
return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
@ -352,7 +352,7 @@ nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
* Maximum three segments can be supported with W8, Choose
* NIX_MAXSQESZ_W16 for multi segment offload.
*/
if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
return NIX_MAXSQESZ_W16;
else
return NIX_MAXSQESZ_W8;
@ -380,7 +380,7 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
/* When Tx Security offload is enabled, increase tx desc count by
* max possible outbound desc count.
*/
if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
nb_desc += dev->outb.nb_desc;
/* Setup ROC SQ */
@ -499,7 +499,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
* to avoid meta packet drop as LBK does not currently support
* backpressure.
*/
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
/* Use current RQ's aura limit if inl rq is not available */
@ -561,7 +561,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq_sp->qconf.nb_desc = nb_desc;
rxq_sp->qconf.mp = mp;
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
/* Setup rq reference for inline dev if present */
rc = roc_nix_inl_dev_rq_get(rq);
if (rc)
@ -579,7 +579,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
* These are needed in deriving raw clock value from tsc counter.
* read_clock eth op returns raw clock value.
*/
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
rc = cnxk_nix_tsc_convert(dev);
if (rc) {
plt_err("Failed to calculate delta and freq mult");
@ -618,7 +618,7 @@ cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
plt_nix_dbg("Releasing rxq %u", qid);
/* Release rq reference for inline dev if present */
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
roc_nix_inl_dev_rq_put(rq);
/* Cleanup ROC RQ */
@ -657,24 +657,24 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
dev->ethdev_rss_hf = ethdev_rss;
if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
}
if (ethdev_rss & ETH_RSS_C_VLAN)
if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
if (ethdev_rss & RSS_IPV4_ENABLE)
@ -683,34 +683,34 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
if (ethdev_rss & RSS_IPV6_ENABLE)
flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
if (ethdev_rss & ETH_RSS_TCP)
if (ethdev_rss & RTE_ETH_RSS_TCP)
flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
if (ethdev_rss & ETH_RSS_UDP)
if (ethdev_rss & RTE_ETH_RSS_UDP)
flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
if (ethdev_rss & ETH_RSS_SCTP)
if (ethdev_rss & RTE_ETH_RSS_SCTP)
flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
if (ethdev_rss & RSS_IPV6_EX_ENABLE)
flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
if (ethdev_rss & ETH_RSS_PORT)
if (ethdev_rss & RTE_ETH_RSS_PORT)
flowkey_cfg |= FLOW_KEY_TYPE_PORT;
if (ethdev_rss & ETH_RSS_NVGRE)
if (ethdev_rss & RTE_ETH_RSS_NVGRE)
flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
if (ethdev_rss & ETH_RSS_VXLAN)
if (ethdev_rss & RTE_ETH_RSS_VXLAN)
flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
if (ethdev_rss & ETH_RSS_GENEVE)
if (ethdev_rss & RTE_ETH_RSS_GENEVE)
flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
if (ethdev_rss & ETH_RSS_GTPU)
if (ethdev_rss & RTE_ETH_RSS_GTPU)
flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
return flowkey_cfg;
@ -746,7 +746,7 @@ nix_rss_default_setup(struct cnxk_eth_dev *dev)
uint64_t rss_hf;
rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
rss_hash_level = ETH_RSS_LEVEL(rss_hf);
rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
@ -958,8 +958,8 @@ nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
/* Nothing much to do if offload is not enabled */
if (!(dev->tx_offloads &
(DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
(RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
return 0;
/* Setup LSO formats in AF. Its a no-op if other ethdev has
@ -1007,13 +1007,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto fail_configure;
}
if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
rxmode->mq_mode != ETH_MQ_RX_RSS) {
if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
goto fail_configure;
}
if (txmode->mq_mode != ETH_MQ_TX_NONE) {
if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
goto fail_configure;
}
@ -1054,7 +1054,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
/* Prepare rx cfg */
rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
if (dev->rx_offloads &
(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
(RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
}
@ -1062,7 +1062,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
/* Disable drop re if rx offload security is enabled and
* platform does not support it.
@ -1454,12 +1454,12 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
* enabled on PF owning this VF
*/
memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
cnxk_eth_dev_ops.timesync_enable(eth_dev);
else
cnxk_eth_dev_ops.timesync_disable(eth_dev);
if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
rc = rte_mbuf_dyn_rx_timestamp_register
(&dev->tstamp.tstamp_dynfield_offset,
&dev->tstamp.rx_tstamp_dynflag);

View File

@ -58,41 +58,44 @@
CNXK_NIX_TX_NB_SEG_MAX)
#define CNXK_NIX_RSS_L3_L4_SRC_DST \
(ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY | \
ETH_RSS_L4_DST_ONLY)
(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
#define CNXK_NIX_RSS_OFFLOAD \
(ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
(RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | \
RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL | \
RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST | \
RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
#define CNXK_NIX_TX_OFFLOAD_CAPA \
(DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE | \
DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | \
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | \
DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS | \
DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \
RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO | \
RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
#define CNXK_NIX_RX_OFFLOAD_CAPA \
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | \
DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_SECURITY)
(RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER | \
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH | \
RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
RTE_ETH_RX_OFFLOAD_SECURITY)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
#define RSS_IPV6_ENABLE \
(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP | \
ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
#define RSS_IPV6_EX_ENABLE \
(ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
(RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
#define RSS_MAX_LEVELS 3

View File

@ -104,11 +104,11 @@ parse_reta_size(const char *key, const char *value, void *extra_args)
val = atoi(value);
if (val <= ETH_RSS_RETA_SIZE_64)
if (val <= RTE_ETH_RSS_RETA_SIZE_64)
val = ROC_NIX_RSS_RETA_SZ_64;
else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
else if (val > RTE_ETH_RSS_RETA_SIZE_64 && val <= RTE_ETH_RSS_RETA_SIZE_128)
val = ROC_NIX_RSS_RETA_SZ_128;
else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
else if (val > RTE_ETH_RSS_RETA_SIZE_128 && val <= RTE_ETH_RSS_RETA_SIZE_256)
val = ROC_NIX_RSS_RETA_SZ_256;
else
val = ROC_NIX_RSS_RETA_SZ_64;

View File

@ -81,24 +81,24 @@ cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
{DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
{DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
{DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
{DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_SECURITY, " Security,"},
{DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
{RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
{RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
{RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
{RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
{RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
{RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
};
static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
"Scalar, Rx Offloads:"
@ -142,28 +142,28 @@ cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
uint64_t flags;
const char *output;
} tx_offload_map[] = {
{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
{DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
{DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
{DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
{DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
{DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
{DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
{DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
{DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
{DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
{DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
{DEV_TX_OFFLOAD_SECURITY, " Security,"},
{DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
{DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
{DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
{DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
{RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
{RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
{RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
{RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
{RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
{RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
{RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
{RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
{RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
{RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
{RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
{RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
{RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
};
static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
"Scalar, Tx Offloads:"
@ -203,8 +203,8 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
enum rte_eth_fc_mode mode_map[] = {
RTE_FC_NONE, RTE_FC_RX_PAUSE,
RTE_FC_TX_PAUSE, RTE_FC_FULL
RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
};
struct roc_nix *nix = &dev->nix;
int mode;
@ -264,10 +264,10 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
if (fc_conf->mode == fc->mode)
return 0;
rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
(fc_conf->mode == RTE_FC_RX_PAUSE);
tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
(fc_conf->mode == RTE_FC_TX_PAUSE);
rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
(fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
(fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
/* Check if TX pause frame is already enabled or not */
if (fc->tx_pause ^ tx_pause) {
@ -408,13 +408,13 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
* when this feature has not been enabled before.
*/
if (data->dev_started && frame_size > buffsz &&
!(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
!(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
plt_err("Scatter offload is not enabled for mtu");
goto exit;
}
/* Check <seg size> * <max_seg> >= max_frame */
if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) &&
frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
plt_err("Greater than maximum supported packet length");
goto exit;
@ -734,8 +734,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
}
/* Copy RETA table */
for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
reta[idx] = reta_conf[i].reta[j];
idx++;
@ -770,8 +770,8 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
goto fail;
/* Copy RETA table */
for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
if ((reta_conf[i].mask >> j) & 0x01)
reta_conf[i].reta[j] = reta[idx];
idx++;
@ -804,7 +804,7 @@ cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
if (rss_conf->rss_key)
roc_nix_rss_key_set(nix, rss_conf->rss_key);
rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
if (rss_hash_level)
rss_hash_level -= 1;
flowkey_cfg =

View File

@ -38,7 +38,7 @@ nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
plt_info("Port %d: Link Up - speed %u Mbps - %s",
(int)(eth_dev->data->port_id),
(uint32_t)link->link_speed,
link->link_duplex == ETH_LINK_FULL_DUPLEX
link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX
? "full-duplex"
: "half-duplex");
else
@ -89,7 +89,7 @@ cnxk_eth_dev_link_status_cb(struct roc_nix *nix, struct roc_nix_link_info *link)
eth_link.link_status = link->status;
eth_link.link_speed = link->speed;
eth_link.link_autoneg = ETH_LINK_AUTONEG;
eth_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
eth_link.link_duplex = link->full_duplex;
/* Print link info */
@ -117,17 +117,17 @@ cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
return 0;
if (roc_nix_is_lbk(&dev->nix)) {
link.link_status = ETH_LINK_UP;
link.link_speed = ETH_SPEED_NUM_100G;
link.link_autoneg = ETH_LINK_FIXED;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_status = RTE_ETH_LINK_UP;
link.link_speed = RTE_ETH_SPEED_NUM_100G;
link.link_autoneg = RTE_ETH_LINK_FIXED;
link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
} else {
rc = roc_nix_mac_link_info_get(&dev->nix, &info);
if (rc)
return rc;
link.link_status = info.status;
link.link_speed = info.speed;
link.link_autoneg = ETH_LINK_AUTONEG;
link.link_autoneg = RTE_ETH_LINK_AUTONEG;
if (info.full_duplex)
link.link_duplex = info.full_duplex;
}

View File

@ -227,7 +227,7 @@ cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev)
dev->rx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
dev->tx_tstamp_tc.cc_mask = CNXK_CYCLECOUNTER_MASK;
dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
rc = roc_nix_ptp_rx_ena_dis(nix, true);
if (!rc) {
@ -257,7 +257,7 @@ int
cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
uint64_t rx_offloads = DEV_RX_OFFLOAD_TIMESTAMP;
uint64_t rx_offloads = RTE_ETH_RX_OFFLOAD_TIMESTAMP;
struct roc_nix *nix = &dev->nix;
int rc = 0;

View File

@ -69,7 +69,7 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev,
return -EINVAL;
}
if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
if (eth_dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
plt_err("multi-queue mode is disabled");
return -ENOTSUP;
}

View File

@ -28,31 +28,31 @@
#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
ETH_RSS_NONFRAG_IPV4_OTHER)
#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
ETH_RSS_NONFRAG_IPV6_OTHER | \
ETH_RSS_IPV6_EX)
#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_IPV6_TCP_EX)
#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
ETH_RSS_IPV6_UDP_EX)
#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
#define CXGBE_RSS_HF_IPV4_MASK (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
#define CXGBE_RSS_HF_IPV6_MASK (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
RTE_ETH_RSS_IPV6_EX)
#define CXGBE_RSS_HF_TCP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_IPV6_TCP_EX)
#define CXGBE_RSS_HF_UDP_IPV6_MASK (RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_IPV6_UDP_EX)
#define CXGBE_RSS_HF_ALL (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP)
/* Tx/Rx Offloads supported */
#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
DEV_TX_OFFLOAD_IPV4_CKSUM | \
DEV_TX_OFFLOAD_UDP_CKSUM | \
DEV_TX_OFFLOAD_TCP_CKSUM | \
DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_MULTI_SEGS)
#define CXGBE_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_TSO | \
RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
#define CXGBE_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_RX_OFFLOAD_SCATTER | \
RTE_ETH_RX_OFFLOAD_RSS_HASH)
/* Devargs filtermode and filtermask representation */
enum cxgbe_devargs_filter_mode_flags {

View File

@ -231,9 +231,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
}
new_link.link_status = cxgbe_force_linkup(adapter) ?
ETH_LINK_UP : pi->link_cfg.link_ok;
RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
return rte_eth_linkstatus_set(eth_dev, &new_link);
@ -374,7 +374,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
goto out;
}
if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
else
eth_dev->data->scattered_rx = 0;
@ -438,9 +438,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
CXGBE_FUNC_TRACE();
if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_RSS_HASH;
RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = cxgbe_setup_sge_fwevtq(adapter);
@ -1080,13 +1080,13 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
rx_pause = 1;
if (rx_pause && tx_pause)
fc_conf->mode = RTE_FC_FULL;
fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
fc_conf->mode = RTE_FC_RX_PAUSE;
fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
fc_conf->mode = RTE_FC_TX_PAUSE;
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
fc_conf->mode = RTE_FC_NONE;
fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
@ -1099,12 +1099,12 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
u8 tx_pause = 0, rx_pause = 0;
int ret;
if (fc_conf->mode == RTE_FC_FULL) {
if (fc_conf->mode == RTE_ETH_FC_FULL) {
tx_pause = 1;
rx_pause = 1;
} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
tx_pause = 1;
} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
rx_pause = 1;
}
@ -1200,9 +1200,9 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
}
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
@ -1246,8 +1246,8 @@ static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
@ -1277,8 +1277,8 @@ static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
idx = i / RTE_ETH_RETA_GROUP_SIZE;
shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
@ -1479,7 +1479,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
if (capa_arr) {
capa_arr[num].speed = ETH_SPEED_NUM_100G;
capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);
}
@ -1488,7 +1488,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
if (capa_arr) {
capa_arr[num].speed = ETH_SPEED_NUM_50G;
capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
}
@ -1497,7 +1497,7 @@ static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
if (capa_arr) {
capa_arr[num].speed = ETH_SPEED_NUM_25G;
capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);

View File

@ -1670,7 +1670,7 @@ int cxgbe_link_start(struct port_info *pi)
* that step explicitly.
*/
ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
!!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
!!(conf_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP),
true);
if (ret == 0) {
ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
@ -1694,7 +1694,7 @@ int cxgbe_link_start(struct port_info *pi)
}
if (ret == 0 && cxgbe_force_linkup(adapter))
pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
pi->eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return ret;
}
@ -1725,10 +1725,10 @@ int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_UDPEN;
@ -1865,7 +1865,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
{
#define SET_SPEED(__speed_name) \
do { \
*speed_caps |= ETH_LINK_ ## __speed_name; \
*speed_caps |= RTE_ETH_LINK_ ## __speed_name; \
} while (0)
#define FW_CAPS_TO_SPEED(__fw_name) \
@ -1952,7 +1952,7 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
speed_caps);
if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
*speed_caps |= ETH_LINK_SPEED_FIXED;
*speed_caps |= RTE_ETH_LINK_SPEED_FIXED;
}
/**

View File

@ -54,29 +54,29 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_SCATTER;
RTE_ETH_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_RSS_HASH;
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_RSS_HASH;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
DEV_TX_OFFLOAD_MT_LOCKFREE |
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_MULTI_SEGS;
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
@ -238,7 +238,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
DPAA_PMD_DEBUG("enabling scatter mode");
fman_if_set_sg(dev->process_private, 1);
dev->data->scattered_rx = 1;
@ -283,43 +283,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
/* Configure link only if link is UP*/
if (link->link_status) {
if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
/* Start autoneg only if link is not in autoneg mode */
if (!link->link_autoneg)
dpaa_restart_link_autoneg(__fif->node_name);
} else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
case ETH_LINK_SPEED_10M_HD:
speed = ETH_SPEED_NUM_10M;
duplex = ETH_LINK_HALF_DUPLEX;
} else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
switch (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
case RTE_ETH_LINK_SPEED_10M_HD:
speed = RTE_ETH_SPEED_NUM_10M;
duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case ETH_LINK_SPEED_10M:
speed = ETH_SPEED_NUM_10M;
duplex = ETH_LINK_FULL_DUPLEX;
case RTE_ETH_LINK_SPEED_10M:
speed = RTE_ETH_SPEED_NUM_10M;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case ETH_LINK_SPEED_100M_HD:
speed = ETH_SPEED_NUM_100M;
duplex = ETH_LINK_HALF_DUPLEX;
case RTE_ETH_LINK_SPEED_100M_HD:
speed = RTE_ETH_SPEED_NUM_100M;
duplex = RTE_ETH_LINK_HALF_DUPLEX;
break;
case ETH_LINK_SPEED_100M:
speed = ETH_SPEED_NUM_100M;
duplex = ETH_LINK_FULL_DUPLEX;
case RTE_ETH_LINK_SPEED_100M:
speed = RTE_ETH_SPEED_NUM_100M;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case ETH_LINK_SPEED_1G:
speed = ETH_SPEED_NUM_1G;
duplex = ETH_LINK_FULL_DUPLEX;
case RTE_ETH_LINK_SPEED_1G:
speed = RTE_ETH_SPEED_NUM_1G;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case ETH_LINK_SPEED_2_5G:
speed = ETH_SPEED_NUM_2_5G;
duplex = ETH_LINK_FULL_DUPLEX;
case RTE_ETH_LINK_SPEED_2_5G:
speed = RTE_ETH_SPEED_NUM_2_5G;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
case ETH_LINK_SPEED_10G:
speed = ETH_SPEED_NUM_10G;
duplex = ETH_LINK_FULL_DUPLEX;
case RTE_ETH_LINK_SPEED_10G:
speed = RTE_ETH_SPEED_NUM_10G;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
default:
speed = ETH_SPEED_NUM_NONE;
duplex = ETH_LINK_FULL_DUPLEX;
speed = RTE_ETH_SPEED_NUM_NONE;
duplex = RTE_ETH_LINK_FULL_DUPLEX;
break;
}
/* Set link speed */
@ -535,30 +535,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
if (fif->mac_type == fman_mac_1g) {
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
| ETH_LINK_SPEED_10M
| ETH_LINK_SPEED_100M_HD
| ETH_LINK_SPEED_100M
| ETH_LINK_SPEED_1G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
| RTE_ETH_LINK_SPEED_10M
| RTE_ETH_LINK_SPEED_100M_HD
| RTE_ETH_LINK_SPEED_100M
| RTE_ETH_LINK_SPEED_1G;
} else if (fif->mac_type == fman_mac_2_5g) {
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
| ETH_LINK_SPEED_10M
| ETH_LINK_SPEED_100M_HD
| ETH_LINK_SPEED_100M
| ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_2_5G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
| RTE_ETH_LINK_SPEED_10M
| RTE_ETH_LINK_SPEED_100M_HD
| RTE_ETH_LINK_SPEED_100M
| RTE_ETH_LINK_SPEED_1G
| RTE_ETH_LINK_SPEED_2_5G;
} else if (fif->mac_type == fman_mac_10g) {
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
| ETH_LINK_SPEED_10M
| ETH_LINK_SPEED_100M_HD
| ETH_LINK_SPEED_100M
| ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_2_5G
| ETH_LINK_SPEED_10G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
| RTE_ETH_LINK_SPEED_10M
| RTE_ETH_LINK_SPEED_100M_HD
| RTE_ETH_LINK_SPEED_100M
| RTE_ETH_LINK_SPEED_1G
| RTE_ETH_LINK_SPEED_2_5G
| RTE_ETH_LINK_SPEED_10G;
} else {
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, fif->mac_type);
@ -591,12 +591,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
{DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
{RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
{RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
};
/* Update Rx offload info */
@ -623,14 +623,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} tx_offload_map[] = {
{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
};
/* Update Tx offload info */
@ -664,7 +664,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
ret = dpaa_get_link_status(__fif->node_name, link);
if (ret)
return ret;
if (link->link_status == ETH_LINK_DOWN &&
if (link->link_status == RTE_ETH_LINK_DOWN &&
wait_to_complete)
rte_delay_ms(CHECK_INTERVAL);
else
@ -675,15 +675,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
}
if (ioctl_version < 2) {
link->link_duplex = ETH_LINK_FULL_DUPLEX;
link->link_autoneg = ETH_LINK_AUTONEG;
link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
link->link_autoneg = RTE_ETH_LINK_AUTONEG;
if (fif->mac_type == fman_mac_1g)
link->link_speed = ETH_SPEED_NUM_1G;
link->link_speed = RTE_ETH_SPEED_NUM_1G;
else if (fif->mac_type == fman_mac_2_5g)
link->link_speed = ETH_SPEED_NUM_2_5G;
link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
else if (fif->mac_type == fman_mac_10g)
link->link_speed = ETH_SPEED_NUM_10G;
link->link_speed = RTE_ETH_SPEED_NUM_10G;
else
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, fif->mac_type);
@ -962,7 +962,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (max_rx_pktlen <= buffsz) {
;
} else if (dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SCATTER) {
RTE_ETH_RX_OFFLOAD_SCATTER) {
if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
"MaxSGlist %d",
@ -1268,7 +1268,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
__fif = container_of(fif, struct __fman_if, __if);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
else
return dpaa_eth_dev_stop(dev);
return 0;
@ -1284,7 +1284,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
__fif = container_of(fif, struct __fman_if, __if);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
else
dpaa_eth_dev_start(dev);
return 0;
@ -1314,10 +1314,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
return -EINVAL;
}
if (fc_conf->mode == RTE_FC_NONE) {
if (fc_conf->mode == RTE_ETH_FC_NONE) {
return 0;
} else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
fc_conf->mode == RTE_FC_FULL) {
} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
fc_conf->mode == RTE_ETH_FC_FULL) {
fman_if_set_fc_threshold(dev->process_private,
fc_conf->high_water,
fc_conf->low_water,
@ -1361,11 +1361,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
}
ret = fman_if_get_fc_threshold(dev->process_private);
if (ret) {
fc_conf->mode = RTE_FC_TX_PAUSE;
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
fc_conf->pause_time =
fman_if_get_fc_quanta(dev->process_private);
} else {
fc_conf->mode = RTE_FC_NONE;
fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;
@ -1626,10 +1626,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
fc_conf = dpaa_intf->fc_conf;
ret = fman_if_get_fc_threshold(fman_intf);
if (ret) {
fc_conf->mode = RTE_FC_TX_PAUSE;
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
} else {
fc_conf->mode = RTE_FC_NONE;
fc_conf->mode = RTE_ETH_FC_NONE;
}
return 0;

View File

@ -74,11 +74,11 @@
#define DPAA_DEBUG_FQ_TX_ERROR 1
#define DPAA_RSS_OFFLOAD_ALL ( \
ETH_RSS_L2_PAYLOAD | \
ETH_RSS_IP | \
ETH_RSS_UDP | \
ETH_RSS_TCP | \
ETH_RSS_SCTP)
RTE_ETH_RSS_L2_PAYLOAD | \
RTE_ETH_RSS_IP | \
RTE_ETH_RSS_UDP | \
RTE_ETH_RSS_TCP | \
RTE_ETH_RSS_SCTP)
#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \

View File

@ -394,7 +394,7 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
if (req_dist_set % 2 != 0) {
dist_field = 1U << loop;
switch (dist_field) {
case ETH_RSS_L2_PAYLOAD:
case RTE_ETH_RSS_L2_PAYLOAD:
if (l2_configured)
break;
@ -404,9 +404,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_ETH;
break;
case ETH_RSS_IPV4:
case ETH_RSS_FRAG_IPV4:
case ETH_RSS_NONFRAG_IPV4_OTHER:
case RTE_ETH_RSS_IPV4:
case RTE_ETH_RSS_FRAG_IPV4:
case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
if (ipv4_configured)
break;
@ -415,10 +415,10 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_IPV4;
break;
case ETH_RSS_IPV6:
case ETH_RSS_FRAG_IPV6:
case ETH_RSS_NONFRAG_IPV6_OTHER:
case ETH_RSS_IPV6_EX:
case RTE_ETH_RSS_IPV6:
case RTE_ETH_RSS_FRAG_IPV6:
case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
case RTE_ETH_RSS_IPV6_EX:
if (ipv6_configured)
break;
@ -427,9 +427,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_IPV6;
break;
case ETH_RSS_NONFRAG_IPV4_TCP:
case ETH_RSS_NONFRAG_IPV6_TCP:
case ETH_RSS_IPV6_TCP_EX:
case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
case RTE_ETH_RSS_IPV6_TCP_EX:
if (tcp_configured)
break;
@ -438,9 +438,9 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_TCP;
break;
case ETH_RSS_NONFRAG_IPV4_UDP:
case ETH_RSS_NONFRAG_IPV6_UDP:
case ETH_RSS_IPV6_UDP_EX:
case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
case RTE_ETH_RSS_IPV6_UDP_EX:
if (udp_configured)
break;
@ -449,8 +449,8 @@ static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
= HEADER_TYPE_UDP;
break;
case ETH_RSS_NONFRAG_IPV4_SCTP:
case ETH_RSS_NONFRAG_IPV6_SCTP:
case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
if (sctp_configured)
break;

View File

@ -220,9 +220,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
if (req_dist_set % 2 != 0) {
dist_field = 1ULL << loop;
switch (dist_field) {
case ETH_RSS_L2_PAYLOAD:
case ETH_RSS_ETH:
case RTE_ETH_RSS_L2_PAYLOAD:
case RTE_ETH_RSS_ETH:
if (l2_configured)
break;
l2_configured = 1;
@ -238,7 +237,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_PPPOE:
case RTE_ETH_RSS_PPPOE:
if (pppoe_configured)
break;
kg_cfg->extracts[i].extract.from_hdr.prot =
@ -252,7 +251,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_ESP:
case RTE_ETH_RSS_ESP:
if (esp_configured)
break;
esp_configured = 1;
@ -268,7 +267,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_AH:
case RTE_ETH_RSS_AH:
if (ah_configured)
break;
ah_configured = 1;
@ -284,8 +283,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_C_VLAN:
case ETH_RSS_S_VLAN:
case RTE_ETH_RSS_C_VLAN:
case RTE_ETH_RSS_S_VLAN:
if (vlan_configured)
break;
vlan_configured = 1;
@ -301,7 +300,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_MPLS:
case RTE_ETH_RSS_MPLS:
if (mpls_configured)
break;
@ -338,13 +337,13 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_IPV4:
case ETH_RSS_FRAG_IPV4:
case ETH_RSS_NONFRAG_IPV4_OTHER:
case ETH_RSS_IPV6:
case ETH_RSS_FRAG_IPV6:
case ETH_RSS_NONFRAG_IPV6_OTHER:
case ETH_RSS_IPV6_EX:
case RTE_ETH_RSS_IPV4:
case RTE_ETH_RSS_FRAG_IPV4:
case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
case RTE_ETH_RSS_IPV6:
case RTE_ETH_RSS_FRAG_IPV6:
case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
case RTE_ETH_RSS_IPV6_EX:
if (l3_configured)
break;
@ -382,12 +381,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_NONFRAG_IPV4_TCP:
case ETH_RSS_NONFRAG_IPV6_TCP:
case ETH_RSS_NONFRAG_IPV4_UDP:
case ETH_RSS_NONFRAG_IPV6_UDP:
case ETH_RSS_IPV6_TCP_EX:
case ETH_RSS_IPV6_UDP_EX:
case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
case RTE_ETH_RSS_IPV6_TCP_EX:
case RTE_ETH_RSS_IPV6_UDP_EX:
if (l4_configured)
break;
@ -414,8 +413,8 @@ dpaa2_distset_to_dpkg_profile_cfg(
i++;
break;
case ETH_RSS_NONFRAG_IPV4_SCTP:
case ETH_RSS_NONFRAG_IPV6_SCTP:
case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
if (sctp_configured)
break;

View File

@ -38,33 +38,33 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_SCTP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_TIMESTAMP;
RTE_ETH_RX_OFFLOAD_CHECKSUM |
RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
RTE_ETH_RX_OFFLOAD_TIMESTAMP;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;
RTE_ETH_RX_OFFLOAD_RSS_HASH |
RTE_ETH_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_MT_LOCKFREE |
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
DEV_TX_OFFLOAD_MULTI_SEGS;
RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
/* enable timestamp in mbuf */
bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@ -142,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
PMD_INIT_FUNC_TRACE();
if (mask & ETH_VLAN_FILTER_MASK) {
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
@ -150,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER)
RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, true);
else
@ -251,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_rx_offloads_nodis;
dev_info->tx_offload_capa = dev_tx_offloads_sup |
dev_tx_offloads_nodis;
dev_info->speed_capa = ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
ETH_LINK_SPEED_10G;
dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
RTE_ETH_LINK_SPEED_2_5G |
RTE_ETH_LINK_SPEED_10G;
dev_info->max_hash_mac_addrs = 0;
dev_info->max_vfs = 0;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@ -270,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
if (dpaa2_svr_family == SVR_LX2160A) {
dev_info->speed_capa |= ETH_LINK_SPEED_25G |
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G;
dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
RTE_ETH_LINK_SPEED_40G |
RTE_ETH_LINK_SPEED_50G |
RTE_ETH_LINK_SPEED_100G;
}
return 0;
@ -291,15 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} rx_offload_map[] = {
{DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
{DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
{DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
{RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
{RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
{RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
{RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
{RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
{RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
{RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
};
/* Update Rx offload info */
@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
uint64_t flags;
const char *output;
} tx_offload_map[] = {
{DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
{DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
{DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
{DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
{DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
{DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
{DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
{RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
{RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
{RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
{RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
{RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
{RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
{RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
{RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
};
/* Update Tx offload info */
@ -573,7 +573,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
return -1;
}
if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
ret = dpaa2_setup_flow_dist(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf,
@ -587,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
}
if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
rx_l3_csum_offload = true;
if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
(rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
(rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
(rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
(rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
rx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@ -610,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
#if !defined(RTE_LIBRTE_IEEE1588)
if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
#endif
{
ret = rte_mbuf_dyn_rx_timestamp_register(
@ -623,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
dpaa2_enable_ts[dev->data->port_id] = true;
}
if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
tx_l3_csum_offload = true;
if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
(tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
(tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
tx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@ -660,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
}
}
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
dpaa2_tm_init(dev);
@ -1856,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
return -1;
}
if (state.up == ETH_LINK_DOWN &&
if (state.up == RTE_ETH_LINK_DOWN &&
wait_to_complete)
rte_delay_ms(CHECK_INTERVAL);
else
@ -1868,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
link.link_speed = state.rate;
if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
else
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
ret = rte_eth_linkstatus_set(dev, &link);
if (ret == -1)
@ -2031,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* No TX side flow control (send Pause frame disabled)
*/
if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
fc_conf->mode = RTE_FC_FULL;
fc_conf->mode = RTE_ETH_FC_FULL;
else
fc_conf->mode = RTE_FC_RX_PAUSE;
fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
} else {
/* DPNI_LINK_OPT_PAUSE not set
* if ASYM_PAUSE set,
@ -2043,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* Flow control disabled
*/
if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
fc_conf->mode = RTE_FC_TX_PAUSE;
fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
fc_conf->mode = RTE_FC_NONE;
fc_conf->mode = RTE_ETH_FC_NONE;
}
return ret;
@ -2089,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
/* update cfg with fc_conf */
switch (fc_conf->mode) {
case RTE_FC_FULL:
case RTE_ETH_FC_FULL:
/* Full flow control;
* OPT_PAUSE set, ASYM_PAUSE not set
*/
cfg.options |= DPNI_LINK_OPT_PAUSE;
cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
break;
case RTE_FC_TX_PAUSE:
case RTE_ETH_FC_TX_PAUSE:
/* Enable RX flow control
* OPT_PAUSE not set;
* ASYM_PAUSE set;
@ -2104,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
cfg.options &= ~DPNI_LINK_OPT_PAUSE;
break;
case RTE_FC_RX_PAUSE:
case RTE_ETH_FC_RX_PAUSE:
/* Enable TX Flow control
* OPT_PAUSE set
* ASYM_PAUSE set
@ -2112,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
cfg.options |= DPNI_LINK_OPT_PAUSE;
cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
break;
case RTE_FC_NONE:
case RTE_ETH_FC_NONE:
/* Disable Flow control
* OPT_PAUSE not set
* ASYM_PAUSE not set

View File

@ -65,17 +65,17 @@
#define DPAA2_TX_CONF_ENABLE 0x08
#define DPAA2_RSS_OFFLOAD_ALL ( \
ETH_RSS_L2_PAYLOAD | \
ETH_RSS_IP | \
ETH_RSS_UDP | \
ETH_RSS_TCP | \
ETH_RSS_SCTP | \
ETH_RSS_MPLS | \
ETH_RSS_C_VLAN | \
ETH_RSS_S_VLAN | \
ETH_RSS_ESP | \
ETH_RSS_AH | \
ETH_RSS_PPPOE)
RTE_ETH_RSS_L2_PAYLOAD | \
RTE_ETH_RSS_IP | \
RTE_ETH_RSS_UDP | \
RTE_ETH_RSS_TCP | \
RTE_ETH_RSS_SCTP | \
RTE_ETH_RSS_MPLS | \
RTE_ETH_RSS_C_VLAN | \
RTE_ETH_RSS_S_VLAN | \
RTE_ETH_RSS_ESP | \
RTE_ETH_RSS_AH | \
RTE_ETH_RSS_PPPOE)
/* LX2 FRC Parsed values (Little Endian) */
#define DPAA2_PKT_TYPE_ETHER 0x0060

Some files were not shown because too many files have changed in this diff Show More