net: add rte prefix to ether defines
Add 'RTE_' prefix to defines: - rename ETHER_ADDR_LEN as RTE_ETHER_ADDR_LEN. - rename ETHER_TYPE_LEN as RTE_ETHER_TYPE_LEN. - rename ETHER_CRC_LEN as RTE_ETHER_CRC_LEN. - rename ETHER_HDR_LEN as RTE_ETHER_HDR_LEN. - rename ETHER_MIN_LEN as RTE_ETHER_MIN_LEN. - rename ETHER_MAX_LEN as RTE_ETHER_MAX_LEN. - rename ETHER_MTU as RTE_ETHER_MTU. - rename ETHER_MAX_VLAN_FRAME_LEN as RTE_ETHER_MAX_VLAN_FRAME_LEN. - rename ETHER_MAX_VLAN_ID as RTE_ETHER_MAX_VLAN_ID. - rename ETHER_MAX_JUMBO_FRAME_LEN as RTE_ETHER_MAX_JUMBO_FRAME_LEN. - rename ETHER_MIN_MTU as RTE_ETHER_MIN_MTU. - rename ETHER_LOCAL_ADMIN_ADDR as RTE_ETHER_LOCAL_ADMIN_ADDR. - rename ETHER_GROUP_ADDR as RTE_ETHER_GROUP_ADDR. - rename ETHER_TYPE_IPv4 as RTE_ETHER_TYPE_IPv4. - rename ETHER_TYPE_IPv6 as RTE_ETHER_TYPE_IPv6. - rename ETHER_TYPE_ARP as RTE_ETHER_TYPE_ARP. - rename ETHER_TYPE_VLAN as RTE_ETHER_TYPE_VLAN. - rename ETHER_TYPE_RARP as RTE_ETHER_TYPE_RARP. - rename ETHER_TYPE_QINQ as RTE_ETHER_TYPE_QINQ. - rename ETHER_TYPE_ETAG as RTE_ETHER_TYPE_ETAG. - rename ETHER_TYPE_1588 as RTE_ETHER_TYPE_1588. - rename ETHER_TYPE_SLOW as RTE_ETHER_TYPE_SLOW. - rename ETHER_TYPE_TEB as RTE_ETHER_TYPE_TEB. - rename ETHER_TYPE_LLDP as RTE_ETHER_TYPE_LLDP. - rename ETHER_TYPE_MPLS as RTE_ETHER_TYPE_MPLS. - rename ETHER_TYPE_MPLSM as RTE_ETHER_TYPE_MPLSM. - rename ETHER_VXLAN_HLEN as RTE_ETHER_VXLAN_HLEN. - rename ETHER_ADDR_FMT_SIZE as RTE_ETHER_ADDR_FMT_SIZE. - rename VXLAN_GPE_TYPE_IPV4 as RTE_VXLAN_GPE_TYPE_IPV4. - rename VXLAN_GPE_TYPE_IPV6 as RTE_VXLAN_GPE_TYPE_IPV6. - rename VXLAN_GPE_TYPE_ETH as RTE_VXLAN_GPE_TYPE_ETH. - rename VXLAN_GPE_TYPE_NSH as RTE_VXLAN_GPE_TYPE_NSH. - rename VXLAN_GPE_TYPE_MPLS as RTE_VXLAN_GPE_TYPE_MPLS. - rename VXLAN_GPE_TYPE_GBP as RTE_VXLAN_GPE_TYPE_GBP. - rename VXLAN_GPE_TYPE_VBNG as RTE_VXLAN_GPE_TYPE_VBNG. - rename ETHER_VXLAN_GPE_HLEN as RTE_ETHER_VXLAN_GPE_HLEN. Do not update the command line library to avoid adding a dependency to librte_net. Signed-off-by: Olivier Matz <olivier.matz@6wind.com> Reviewed-by: Stephen Hemminger <stephen@networkplumber.org> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
538da7a1ca
commit
35b2d13fd6
@ -658,7 +658,7 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
|
||||
struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
|
@ -165,7 +165,7 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
|
||||
struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
.rx_adv_conf = {
|
||||
.rss_conf = {
|
||||
|
@ -1925,16 +1925,16 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
|
||||
uint64_t rx_offloads = port->dev_conf.rxmode.offloads;
|
||||
|
||||
if (!strcmp(res->name, "max-pkt-len")) {
|
||||
if (res->value < ETHER_MIN_LEN) {
|
||||
if (res->value < RTE_ETHER_MIN_LEN) {
|
||||
printf("max-pkt-len can not be less than %d\n",
|
||||
ETHER_MIN_LEN);
|
||||
RTE_ETHER_MIN_LEN);
|
||||
return;
|
||||
}
|
||||
if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
|
||||
return;
|
||||
|
||||
port->dev_conf.rxmode.max_rx_pkt_len = res->value;
|
||||
if (res->value > ETHER_MAX_LEN)
|
||||
if (res->value > RTE_ETHER_MAX_LEN)
|
||||
rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
@ -1996,8 +1996,8 @@ cmd_config_mtu_parsed(void *parsed_result,
|
||||
{
|
||||
struct cmd_config_mtu_result *res = parsed_result;
|
||||
|
||||
if (res->value < ETHER_MIN_LEN) {
|
||||
printf("mtu cannot be less than %d\n", ETHER_MIN_LEN);
|
||||
if (res->value < RTE_ETHER_MIN_LEN) {
|
||||
printf("mtu cannot be less than %d\n", RTE_ETHER_MIN_LEN);
|
||||
return;
|
||||
}
|
||||
port_mtu_set(res->port_id, res->value);
|
||||
@ -8236,7 +8236,7 @@ cmd_set_vf_macvlan_parsed(void *parsed_result,
|
||||
|
||||
memset(&filter, 0, sizeof(struct rte_eth_mac_filter));
|
||||
|
||||
rte_memcpy(&filter.mac_addr, &res->address, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&filter.mac_addr, &res->address, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
/* set VF MAC filter */
|
||||
filter.is_vf = 1;
|
||||
@ -9210,7 +9210,7 @@ cmd_set_mirror_mask_parsed(void *parsed_result,
|
||||
return;
|
||||
|
||||
for (i = 0; i < nb_item; i++) {
|
||||
if (vlan_list[i] > ETHER_MAX_VLAN_ID) {
|
||||
if (vlan_list[i] > RTE_ETHER_MAX_VLAN_ID) {
|
||||
printf("Invalid vlan_id: must be < 4096\n");
|
||||
return;
|
||||
}
|
||||
@ -15301,9 +15301,9 @@ static void cmd_set_vxlan_parsed(void *parsed_result,
|
||||
if (vxlan_encap_conf.select_vlan)
|
||||
vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
|
||||
rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
cmdline_parse_inst_t cmd_set_vxlan = {
|
||||
@ -15492,9 +15492,9 @@ static void cmd_set_nvgre_parsed(void *parsed_result,
|
||||
if (nvgre_encap_conf.select_vlan)
|
||||
nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
|
||||
rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
cmdline_parse_inst_t cmd_set_nvgre = {
|
||||
@ -15609,9 +15609,9 @@ static void cmd_set_l2_encap_parsed(void *parsed_result,
|
||||
if (l2_encap_conf.select_vlan)
|
||||
l2_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
|
||||
rte_memcpy(l2_encap_conf.eth_src, res->eth_src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(l2_encap_conf.eth_dst, res->eth_dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
cmdline_parse_inst_t cmd_set_l2_encap = {
|
||||
@ -15801,9 +15801,9 @@ static void cmd_set_mplsogre_encap_parsed(void *parsed_result,
|
||||
if (mplsogre_encap_conf.select_vlan)
|
||||
mplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
|
||||
rte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
cmdline_parse_inst_t cmd_set_mplsogre_encap = {
|
||||
@ -16039,9 +16039,9 @@ static void cmd_set_mplsoudp_encap_parsed(void *parsed_result,
|
||||
if (mplsoudp_encap_conf.select_vlan)
|
||||
mplsoudp_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
|
||||
rte_memcpy(mplsoudp_encap_conf.eth_src, res->eth_src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(mplsoudp_encap_conf.eth_dst, res->eth_dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
cmdline_parse_inst_t cmd_set_mplsoudp_encap = {
|
||||
|
@ -3493,9 +3493,9 @@ parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
|
||||
.item_vxlan.flags = 0,
|
||||
};
|
||||
memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
|
||||
vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
|
||||
vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
if (!vxlan_encap_conf.select_ipv4) {
|
||||
memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
|
||||
&vxlan_encap_conf.ipv6_src,
|
||||
@ -3616,9 +3616,9 @@ parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
|
||||
.item_nvgre.flow_id = 0,
|
||||
};
|
||||
memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
|
||||
nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
|
||||
nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
if (!nvgre_encap_conf.select_ipv4) {
|
||||
memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
|
||||
&nvgre_encap_conf.ipv6_src,
|
||||
@ -3680,22 +3680,22 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
|
||||
};
|
||||
header = action_encap_data->data;
|
||||
if (l2_encap_conf.select_vlan)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
else if (l2_encap_conf.select_ipv4)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(eth.dst.addr_bytes,
|
||||
l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(eth.src.addr_bytes,
|
||||
l2_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(header, ð, sizeof(eth));
|
||||
header += sizeof(eth);
|
||||
if (l2_encap_conf.select_vlan) {
|
||||
if (l2_encap_conf.select_ipv4)
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(header, &vlan, sizeof(vlan));
|
||||
header += sizeof(vlan);
|
||||
}
|
||||
@ -3744,7 +3744,7 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
|
||||
};
|
||||
header = action_decap_data->data;
|
||||
if (l2_decap_conf.select_vlan)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
memcpy(header, ð, sizeof(eth));
|
||||
header += sizeof(eth);
|
||||
if (l2_decap_conf.select_vlan) {
|
||||
@ -3815,22 +3815,22 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
|
||||
};
|
||||
header = action_encap_data->data;
|
||||
if (mplsogre_encap_conf.select_vlan)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
else if (mplsogre_encap_conf.select_ipv4)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(eth.dst.addr_bytes,
|
||||
mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(eth.src.addr_bytes,
|
||||
mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(header, ð, sizeof(eth));
|
||||
header += sizeof(eth);
|
||||
if (mplsogre_encap_conf.select_vlan) {
|
||||
if (mplsogre_encap_conf.select_ipv4)
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(header, &vlan, sizeof(vlan));
|
||||
header += sizeof(vlan);
|
||||
}
|
||||
@ -3910,22 +3910,22 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
|
||||
};
|
||||
header = action_decap_data->data;
|
||||
if (mplsogre_decap_conf.select_vlan)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
else if (mplsogre_encap_conf.select_ipv4)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(eth.dst.addr_bytes,
|
||||
mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(eth.src.addr_bytes,
|
||||
mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(header, ð, sizeof(eth));
|
||||
header += sizeof(eth);
|
||||
if (mplsogre_encap_conf.select_vlan) {
|
||||
if (mplsogre_encap_conf.select_ipv4)
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(header, &vlan, sizeof(vlan));
|
||||
header += sizeof(vlan);
|
||||
}
|
||||
@ -4006,22 +4006,22 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
|
||||
};
|
||||
header = action_encap_data->data;
|
||||
if (mplsoudp_encap_conf.select_vlan)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
else if (mplsoudp_encap_conf.select_ipv4)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(eth.dst.addr_bytes,
|
||||
mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(eth.src.addr_bytes,
|
||||
mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(header, ð, sizeof(eth));
|
||||
header += sizeof(eth);
|
||||
if (mplsoudp_encap_conf.select_vlan) {
|
||||
if (mplsoudp_encap_conf.select_ipv4)
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(header, &vlan, sizeof(vlan));
|
||||
header += sizeof(vlan);
|
||||
}
|
||||
@ -4103,22 +4103,22 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
|
||||
};
|
||||
header = action_decap_data->data;
|
||||
if (mplsoudp_decap_conf.select_vlan)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
else if (mplsoudp_encap_conf.select_ipv4)
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(eth.dst.addr_bytes,
|
||||
mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
|
||||
mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(eth.src.addr_bytes,
|
||||
mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
|
||||
mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(header, ð, sizeof(eth));
|
||||
header += sizeof(eth);
|
||||
if (mplsoudp_encap_conf.select_vlan) {
|
||||
if (mplsoudp_encap_conf.select_ipv4)
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
else
|
||||
vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
memcpy(header, &vlan, sizeof(vlan));
|
||||
header += sizeof(vlan);
|
||||
}
|
||||
|
@ -110,8 +110,8 @@ const struct rss_type_info rss_type_table[] = {
|
||||
static void
|
||||
print_ethaddr(const char *name, struct rte_ether_addr *eth_addr)
|
||||
{
|
||||
char buf[ETHER_ADDR_FMT_SIZE];
|
||||
rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
|
||||
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
|
||||
printf("%s%s", name, buf);
|
||||
}
|
||||
|
||||
|
@ -92,9 +92,9 @@ struct simple_gre_hdr {
|
||||
static uint16_t
|
||||
get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
|
||||
{
|
||||
if (ethertype == _htons(ETHER_TYPE_IPv4))
|
||||
if (ethertype == _htons(RTE_ETHER_TYPE_IPv4))
|
||||
return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
|
||||
else /* assume ethertype == ETHER_TYPE_IPv6 */
|
||||
else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
|
||||
return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
|
||||
info->l2_len = sizeof(struct rte_ether_hdr);
|
||||
info->ethertype = eth_hdr->ether_type;
|
||||
|
||||
if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
|
||||
if (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN)) {
|
||||
struct rte_vlan_hdr *vlan_hdr = (
|
||||
struct rte_vlan_hdr *)(eth_hdr + 1);
|
||||
|
||||
@ -159,11 +159,11 @@ parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
|
||||
}
|
||||
|
||||
switch (info->ethertype) {
|
||||
case _htons(ETHER_TYPE_IPv4):
|
||||
case _htons(RTE_ETHER_TYPE_IPv4):
|
||||
ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
break;
|
||||
case _htons(ETHER_TYPE_IPv6):
|
||||
case _htons(RTE_ETHER_TYPE_IPv6):
|
||||
ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
|
||||
parse_ipv6(ipv6_hdr, info);
|
||||
break;
|
||||
@ -201,7 +201,7 @@ parse_vxlan(struct udp_hdr *udp_hdr,
|
||||
sizeof(struct rte_vxlan_hdr));
|
||||
|
||||
parse_ethernet(eth_hdr, info);
|
||||
info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
|
||||
info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
|
||||
}
|
||||
|
||||
/* Parse a vxlan-gpe header */
|
||||
@ -223,7 +223,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
sizeof(struct udp_hdr));
|
||||
|
||||
if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
|
||||
VXLAN_GPE_TYPE_IPV4) {
|
||||
RTE_VXLAN_GPE_TYPE_IPV4) {
|
||||
info->is_tunnel = 1;
|
||||
info->outer_ethertype = info->ethertype;
|
||||
info->outer_l2_len = info->l2_len;
|
||||
@ -234,10 +234,10 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
vxlan_gpe_len);
|
||||
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
info->ethertype = _htons(ETHER_TYPE_IPv4);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
|
||||
info->l2_len = 0;
|
||||
|
||||
} else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) {
|
||||
} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
|
||||
info->is_tunnel = 1;
|
||||
info->outer_ethertype = info->ethertype;
|
||||
info->outer_l2_len = info->l2_len;
|
||||
@ -247,11 +247,11 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
|
||||
vxlan_gpe_len);
|
||||
|
||||
info->ethertype = _htons(ETHER_TYPE_IPv6);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
|
||||
parse_ipv6(ipv6_hdr, info);
|
||||
info->l2_len = 0;
|
||||
|
||||
} else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) {
|
||||
} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
|
||||
info->is_tunnel = 1;
|
||||
info->outer_ethertype = info->ethertype;
|
||||
info->outer_l2_len = info->l2_len;
|
||||
@ -265,7 +265,7 @@ parse_vxlan_gpe(struct udp_hdr *udp_hdr,
|
||||
} else
|
||||
return;
|
||||
|
||||
info->l2_len += ETHER_VXLAN_GPE_HLEN;
|
||||
info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
|
||||
}
|
||||
|
||||
/* Parse a gre header */
|
||||
@ -286,7 +286,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
|
||||
gre_len += GRE_EXT_LEN;
|
||||
|
||||
if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
|
||||
if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPv4)) {
|
||||
info->is_tunnel = 1;
|
||||
info->outer_ethertype = info->ethertype;
|
||||
info->outer_l2_len = info->l2_len;
|
||||
@ -296,10 +296,10 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
|
||||
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
info->ethertype = _htons(ETHER_TYPE_IPv4);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
|
||||
info->l2_len = 0;
|
||||
|
||||
} else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
|
||||
} else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPv6)) {
|
||||
info->is_tunnel = 1;
|
||||
info->outer_ethertype = info->ethertype;
|
||||
info->outer_l2_len = info->l2_len;
|
||||
@ -308,11 +308,11 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
|
||||
|
||||
ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
|
||||
|
||||
info->ethertype = _htons(ETHER_TYPE_IPv6);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
|
||||
parse_ipv6(ipv6_hdr, info);
|
||||
info->l2_len = 0;
|
||||
|
||||
} else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
|
||||
} else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {
|
||||
info->is_tunnel = 1;
|
||||
info->outer_ethertype = info->ethertype;
|
||||
info->outer_l2_len = info->l2_len;
|
||||
@ -349,10 +349,10 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
|
||||
|
||||
if (ip_version == 4) {
|
||||
parse_ipv4(ipv4_hdr, info);
|
||||
info->ethertype = _htons(ETHER_TYPE_IPv4);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv4);
|
||||
} else {
|
||||
parse_ipv6(ipv6_hdr, info);
|
||||
info->ethertype = _htons(ETHER_TYPE_IPv6);
|
||||
info->ethertype = _htons(RTE_ETHER_TYPE_IPv6);
|
||||
}
|
||||
info->l2_len = 0;
|
||||
}
|
||||
@ -384,7 +384,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
|
||||
tso_segsz = info->tunnel_tso_segsz;
|
||||
}
|
||||
|
||||
if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
|
||||
if (info->ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {
|
||||
ipv4_hdr = l3_hdr;
|
||||
ipv4_hdr->hdr_checksum = 0;
|
||||
|
||||
@ -398,7 +398,7 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
|
||||
ipv4_hdr->hdr_checksum =
|
||||
rte_ipv4_cksum(ipv4_hdr);
|
||||
}
|
||||
} else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
|
||||
} else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPv6))
|
||||
ol_flags |= PKT_TX_IPV6;
|
||||
else
|
||||
return 0; /* packet type not supported, nothing to do */
|
||||
@ -459,7 +459,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
|
||||
struct udp_hdr *udp_hdr;
|
||||
uint64_t ol_flags = 0;
|
||||
|
||||
if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
|
||||
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4)) {
|
||||
ipv4_hdr->hdr_checksum = 0;
|
||||
ol_flags |= PKT_TX_OUTER_IPV4;
|
||||
|
||||
@ -495,7 +495,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
|
||||
/* do not recalculate udp cksum if it was 0 */
|
||||
if (udp_hdr->dgram_cksum != 0) {
|
||||
udp_hdr->dgram_cksum = 0;
|
||||
if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
|
||||
if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPv4))
|
||||
udp_hdr->dgram_cksum =
|
||||
rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
|
||||
else
|
||||
|
@ -173,7 +173,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
|
||||
eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
rte_ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr);
|
||||
rte_ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr);
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
|
||||
/* Initialize IP header. */
|
||||
ip_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
|
||||
|
@ -223,9 +223,9 @@ ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf)
|
||||
static void
|
||||
ether_addr_dump(const char *what, const struct rte_ether_addr *ea)
|
||||
{
|
||||
char buf[ETHER_ADDR_FMT_SIZE];
|
||||
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
|
||||
rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, ea);
|
||||
rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, ea);
|
||||
if (what)
|
||||
printf("%s", what);
|
||||
printf("%s", buf);
|
||||
@ -330,7 +330,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
ether_addr_dump(" ETH: src=", ð_h->s_addr);
|
||||
ether_addr_dump(" dst=", ð_h->d_addr);
|
||||
}
|
||||
if (eth_type == ETHER_TYPE_VLAN) {
|
||||
if (eth_type == RTE_ETHER_TYPE_VLAN) {
|
||||
vlan_h = (struct rte_vlan_hdr *)
|
||||
((char *)eth_h + sizeof(struct rte_ether_hdr));
|
||||
l2_len += sizeof(struct rte_vlan_hdr);
|
||||
@ -346,7 +346,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
}
|
||||
|
||||
/* Reply to ARP requests */
|
||||
if (eth_type == ETHER_TYPE_ARP) {
|
||||
if (eth_type == RTE_ETHER_TYPE_ARP) {
|
||||
arp_h = (struct rte_arp_hdr *) ((char *)eth_h + l2_len);
|
||||
arp_op = RTE_BE_TO_CPU_16(arp_h->arp_opcode);
|
||||
arp_pro = RTE_BE_TO_CPU_16(arp_h->arp_protocol);
|
||||
@ -360,7 +360,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
}
|
||||
if ((RTE_BE_TO_CPU_16(arp_h->arp_hardware) !=
|
||||
RTE_ARP_HRD_ETHER) ||
|
||||
(arp_pro != ETHER_TYPE_IPv4) ||
|
||||
(arp_pro != RTE_ETHER_TYPE_IPv4) ||
|
||||
(arp_h->arp_hlen != 6) ||
|
||||
(arp_h->arp_plen != 4)
|
||||
) {
|
||||
@ -414,7 +414,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (eth_type != ETHER_TYPE_IPv4) {
|
||||
if (eth_type != RTE_ETHER_TYPE_IPv4) {
|
||||
rte_pktmbuf_free(pkt);
|
||||
continue;
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
|
||||
eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
|
||||
|
||||
if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
|
||||
if (eth_type == ETHER_TYPE_1588) {
|
||||
if (eth_type == RTE_ETHER_TYPE_1588) {
|
||||
printf("Port %u Received PTP packet not filtered"
|
||||
" by hardware\n",
|
||||
fs->rx_port);
|
||||
@ -128,7 +128,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
|
||||
rte_pktmbuf_free(mb);
|
||||
return;
|
||||
}
|
||||
if (eth_type != ETHER_TYPE_1588) {
|
||||
if (eth_type != RTE_ETHER_TYPE_1588) {
|
||||
printf("Port %u Received NON PTP packet incorrectly"
|
||||
" detected by hardware\n",
|
||||
fs->rx_port);
|
||||
|
@ -879,15 +879,15 @@ launch_args_parse(int argc, char** argv)
|
||||
}
|
||||
if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
|
||||
n = atoi(optarg);
|
||||
if (n >= ETHER_MIN_LEN) {
|
||||
if (n >= RTE_ETHER_MIN_LEN) {
|
||||
rx_mode.max_rx_pkt_len = (uint32_t) n;
|
||||
if (n > ETHER_MAX_LEN)
|
||||
if (n > RTE_ETHER_MAX_LEN)
|
||||
rx_offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
} else
|
||||
rte_exit(EXIT_FAILURE,
|
||||
"Invalid max-pkt-len=%d - should be > %d\n",
|
||||
n, ETHER_MIN_LEN);
|
||||
n, RTE_ETHER_MIN_LEN);
|
||||
}
|
||||
if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) {
|
||||
if (!strcmp(optarg, "signature"))
|
||||
|
@ -413,7 +413,8 @@ lcoreid_t latencystats_lcore_id = -1;
|
||||
* Ethernet device configuration.
|
||||
*/
|
||||
struct rte_eth_rxmode rx_mode = {
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
/**< Default maximum frame length. */
|
||||
};
|
||||
|
||||
struct rte_eth_txmode tx_mode = {
|
||||
@ -526,7 +527,7 @@ static void dev_event_callback(const char *device_name,
|
||||
static int all_ports_started(void);
|
||||
|
||||
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
|
||||
uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
|
||||
uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
|
||||
|
||||
/*
|
||||
* Helper function to check if socket is already discovered.
|
||||
@ -582,7 +583,7 @@ set_def_peer_eth_addrs(void)
|
||||
portid_t i;
|
||||
|
||||
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
||||
peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
|
||||
peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
|
||||
peer_eth_addrs[i].addr_bytes[5] = i;
|
||||
}
|
||||
}
|
||||
@ -1223,8 +1224,8 @@ init_config(void)
|
||||
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
|
||||
fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
|
||||
fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
|
||||
fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
|
||||
ETHER_CRC_LEN;
|
||||
fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
|
||||
RTE_ETHER_CRC_LEN;
|
||||
fwd_lcores[lc_id]->gso_ctx.flag = 0;
|
||||
}
|
||||
|
||||
|
@ -501,8 +501,8 @@ struct vxlan_encap_conf {
|
||||
rte_be16_t vlan_tci;
|
||||
uint8_t ip_tos;
|
||||
uint8_t ip_ttl;
|
||||
uint8_t eth_src[ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[ETHER_ADDR_LEN];
|
||||
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
||||
};
|
||||
struct vxlan_encap_conf vxlan_encap_conf;
|
||||
|
||||
@ -516,8 +516,8 @@ struct nvgre_encap_conf {
|
||||
uint8_t ipv6_src[16];
|
||||
uint8_t ipv6_dst[16];
|
||||
rte_be16_t vlan_tci;
|
||||
uint8_t eth_src[ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[ETHER_ADDR_LEN];
|
||||
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
||||
};
|
||||
struct nvgre_encap_conf nvgre_encap_conf;
|
||||
|
||||
@ -526,8 +526,8 @@ struct l2_encap_conf {
|
||||
uint32_t select_ipv4:1;
|
||||
uint32_t select_vlan:1;
|
||||
rte_be16_t vlan_tci;
|
||||
uint8_t eth_src[ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[ETHER_ADDR_LEN];
|
||||
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
||||
};
|
||||
struct l2_encap_conf l2_encap_conf;
|
||||
|
||||
@ -547,8 +547,8 @@ struct mplsogre_encap_conf {
|
||||
uint8_t ipv6_src[16];
|
||||
uint8_t ipv6_dst[16];
|
||||
rte_be16_t vlan_tci;
|
||||
uint8_t eth_src[ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[ETHER_ADDR_LEN];
|
||||
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
||||
};
|
||||
struct mplsogre_encap_conf mplsogre_encap_conf;
|
||||
|
||||
@ -571,8 +571,8 @@ struct mplsoudp_encap_conf {
|
||||
uint8_t ipv6_src[16];
|
||||
uint8_t ipv6_dst[16];
|
||||
rte_be16_t vlan_tci;
|
||||
uint8_t eth_src[ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[ETHER_ADDR_LEN];
|
||||
uint8_t eth_src[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t eth_dst[RTE_ETHER_ADDR_LEN];
|
||||
};
|
||||
struct mplsoudp_encap_conf mplsoudp_encap_conf;
|
||||
|
||||
|
@ -268,7 +268,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
|
||||
*/
|
||||
rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr);
|
||||
rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr);
|
||||
eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
|
||||
if (rte_mempool_get_bulk(mbp, (void **)pkts_burst,
|
||||
nb_pkt_per_burst) == 0) {
|
||||
|
@ -16,8 +16,8 @@
|
||||
static inline void
|
||||
print_ether_addr(const char *what, struct rte_ether_addr *eth_addr)
|
||||
{
|
||||
char buf[ETHER_ADDR_FMT_SIZE];
|
||||
rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
|
||||
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
|
||||
printf("%s%s", what, buf);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ initialize_eth_header(struct rte_ether_hdr *eth_hdr,
|
||||
struct rte_vlan_hdr *vhdr = (struct rte_vlan_hdr *)(
|
||||
(uint8_t *)eth_hdr + sizeof(struct rte_ether_hdr));
|
||||
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
|
||||
vhdr->eth_proto = rte_cpu_to_be_16(ether_type);
|
||||
vhdr->vlan_tci = van_id;
|
||||
@ -82,8 +82,8 @@ initialize_arp_header(struct rte_arp_hdr *arp_hdr,
|
||||
uint32_t opcode)
|
||||
{
|
||||
arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
|
||||
arp_hdr->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
arp_hdr->arp_hlen = ETHER_ADDR_LEN;
|
||||
arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN;
|
||||
arp_hdr->arp_plen = sizeof(uint32_t);
|
||||
arp_hdr->arp_opcode = rte_cpu_to_be_16(opcode);
|
||||
rte_ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha);
|
||||
@ -322,10 +322,10 @@ nomore_mbuf:
|
||||
pkt->l2_len = eth_hdr_size;
|
||||
|
||||
if (ipv4) {
|
||||
pkt->vlan_tci = ETHER_TYPE_IPv4;
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
|
||||
pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
} else {
|
||||
pkt->vlan_tci = ETHER_TYPE_IPv6;
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
|
||||
pkt->l3_len = sizeof(struct ipv6_hdr);
|
||||
}
|
||||
|
||||
@ -437,10 +437,10 @@ nomore_mbuf:
|
||||
pkt->l2_len = eth_hdr_size;
|
||||
|
||||
if (ipv4) {
|
||||
pkt->vlan_tci = ETHER_TYPE_IPv4;
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv4;
|
||||
pkt->l3_len = sizeof(struct ipv4_hdr);
|
||||
} else {
|
||||
pkt->vlan_tci = ETHER_TYPE_IPv6;
|
||||
pkt->vlan_tci = RTE_ETHER_TYPE_IPv6;
|
||||
pkt->l3_len = sizeof(struct ipv6_hdr);
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ static int
|
||||
is_addr_different(const struct rte_ether_addr addr, uint64_t num)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ETHER_ADDR_LEN; i++, num >>= 8)
|
||||
for (i = 0; i < RTE_ETHER_ADDR_LEN; i++, num >>= 8)
|
||||
if (addr.addr_bytes[i] != (num & 0xFF)) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ init_ipv4_udp_traffic(struct rte_mempool *mp,
|
||||
printf("Set up IPv4 UDP traffic\n");
|
||||
initialize_eth_header(&pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
|
||||
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
|
||||
printf("ETH pktlen %u\n", pktlen);
|
||||
|
||||
@ -541,7 +541,7 @@ init_ipv4_tcp_traffic(struct rte_mempool *mp,
|
||||
printf("Set up IPv4 TCP traffic\n");
|
||||
initialize_eth_header(&pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
|
||||
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
|
||||
printf("ETH pktlen %u\n", pktlen);
|
||||
|
||||
@ -578,7 +578,7 @@ init_ipv4_sctp_traffic(struct rte_mempool *mp,
|
||||
printf("Set up IPv4 SCTP traffic\n");
|
||||
initialize_eth_header(&pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
|
||||
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
|
||||
printf("ETH pktlen %u\n", pktlen);
|
||||
|
||||
|
@ -136,7 +136,7 @@ static struct rte_eth_conf default_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.split_hdr_size = 0,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
},
|
||||
.txmode = {
|
||||
.mq_mode = ETH_MQ_TX_NONE,
|
||||
@ -237,7 +237,7 @@ test_setup(void)
|
||||
for (i = 0; i < TEST_MAX_NUMBER_OF_PORTS; i++) {
|
||||
char pmd_name[RTE_ETH_NAME_MAX_LEN];
|
||||
|
||||
mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;
|
||||
mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;
|
||||
|
||||
snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_%d", i);
|
||||
|
||||
@ -396,7 +396,7 @@ test_remove_slave_from_bonded_device(void)
|
||||
|
||||
|
||||
mac_addr = (struct rte_ether_addr *)slave_mac;
|
||||
mac_addr->addr_bytes[ETHER_ADDR_LEN-1] =
|
||||
mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] =
|
||||
test_params->bonded_slave_count-1;
|
||||
|
||||
rte_eth_macaddr_get(
|
||||
@ -752,7 +752,7 @@ test_set_primary_slave(void)
|
||||
test_params->bonded_port_id);
|
||||
|
||||
expected_mac_addr = (struct rte_ether_addr *)&slave_mac;
|
||||
expected_mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;
|
||||
expected_mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;
|
||||
|
||||
/* Check primary slave MAC */
|
||||
rte_eth_macaddr_get(test_params->slave_port_ids[i], &read_mac_addr);
|
||||
@ -902,7 +902,8 @@ test_set_bonded_port_initialization_mac_assignment(void)
|
||||
for (i = 0; i < BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT; i++) {
|
||||
char pmd_name[RTE_ETH_NAME_MAX_LEN];
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = i + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
|
||||
i + 100;
|
||||
|
||||
snprintf(pmd_name, RTE_ETH_NAME_MAX_LEN,
|
||||
"eth_slave_%d", i);
|
||||
@ -942,8 +943,8 @@ test_set_bonded_port_initialization_mac_assignment(void)
|
||||
/*
|
||||
* 3. Set explicit MAC address on bonded ethdev
|
||||
*/
|
||||
bonded_mac_addr.addr_bytes[ETHER_ADDR_LEN-2] = 0xFF;
|
||||
bonded_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0xAA;
|
||||
bonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-2] = 0xFF;
|
||||
bonded_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0xAA;
|
||||
|
||||
TEST_ASSERT_SUCCESS(rte_eth_bond_mac_address_set(
|
||||
bonded_port_id, &bonded_mac_addr),
|
||||
@ -974,13 +975,13 @@ test_set_bonded_port_initialization_mac_assignment(void)
|
||||
sizeof(read_mac_addr)),
|
||||
"slave port 0 mac address not as expected");
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
"slave port 1 mac address not as expected");
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 2 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
@ -1005,13 +1006,13 @@ test_set_bonded_port_initialization_mac_assignment(void)
|
||||
sizeof(read_mac_addr)),
|
||||
"bonded port mac address not as expected");
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
"slave port 0 mac address not as expected");
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
@ -1042,19 +1043,19 @@ test_set_bonded_port_initialization_mac_assignment(void)
|
||||
"Number of slaves (%d) is great than expected (%d).",
|
||||
slave_count, 0);
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 0 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 0 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[0], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
"slave port 0 mac address not as expected");
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 1 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 1 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[1], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
"slave port 1 mac address not as expected");
|
||||
|
||||
slave_mac_addr.addr_bytes[ETHER_ADDR_LEN-1] = 2 + 100;
|
||||
slave_mac_addr.addr_bytes[RTE_ETHER_ADDR_LEN-1] = 2 + 100;
|
||||
rte_eth_macaddr_get(slave_port_ids[2], &read_mac_addr);
|
||||
TEST_ASSERT_SUCCESS(memcmp(&slave_mac_addr, &read_mac_addr,
|
||||
sizeof(read_mac_addr)),
|
||||
@ -1271,9 +1272,9 @@ generate_test_burst(struct rte_mbuf **pkts_burst, uint16_t burst_size,
|
||||
void *ip_hdr;
|
||||
|
||||
if (ipv4)
|
||||
ether_type = ETHER_TYPE_IPv4;
|
||||
ether_type = RTE_ETHER_TYPE_IPv4;
|
||||
else
|
||||
ether_type = ETHER_TYPE_IPv6;
|
||||
ether_type = RTE_ETHER_TYPE_IPv6;
|
||||
|
||||
if (toggle_dst_mac)
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
@ -1953,7 +1954,7 @@ test_roundrobin_verfiy_polling_slave_link_status_change(void)
|
||||
for (i = 0; i < TEST_RR_POLLING_LINK_STATUS_SLAVE_COUNT; i++) {
|
||||
/* Generate slave name / MAC address */
|
||||
snprintf(slave_name, RTE_ETH_NAME_MAX_LEN, "eth_virt_poll_%d", i);
|
||||
mac_addr->addr_bytes[ETHER_ADDR_LEN-1] = i;
|
||||
mac_addr->addr_bytes[RTE_ETHER_ADDR_LEN-1] = i;
|
||||
|
||||
/* Create slave devices with no ISR Support */
|
||||
if (polling_test_slaves[i] == -1) {
|
||||
@ -2046,7 +2047,7 @@ test_activebackup_tx_burst(void)
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac_0,
|
||||
ETHER_TYPE_IPv4, 0, 0);
|
||||
RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
|
||||
dst_port_0, 16);
|
||||
pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr,
|
||||
@ -2583,7 +2584,7 @@ test_balance_l2_tx_burst(void)
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac_0,
|
||||
ETHER_TYPE_IPv4, 0, 0);
|
||||
RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
|
||||
dst_port_0, 16);
|
||||
pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr,
|
||||
@ -2599,7 +2600,7 @@ test_balance_l2_tx_burst(void)
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac_1,
|
||||
ETHER_TYPE_IPv4, 0, 0);
|
||||
RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
|
||||
/* Generate a burst 2 of packets to transmit */
|
||||
TEST_ASSERT_EQUAL(generate_packet_burst(test_params->mbuf_pool, &pkts_burst[1][0],
|
||||
@ -3425,7 +3426,7 @@ test_broadcast_tx_burst(void)
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac_0,
|
||||
ETHER_TYPE_IPv4, 0, 0);
|
||||
RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
|
||||
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
|
||||
dst_port_0, 16);
|
||||
@ -4011,12 +4012,12 @@ test_tlb_tx_burst(void)
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac_0,
|
||||
ETHER_TYPE_IPv4, 0, 0);
|
||||
RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
} else {
|
||||
initialize_eth_header(test_params->pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)test_params->default_slave_mac,
|
||||
(struct rte_ether_addr *)dst_mac_0,
|
||||
ETHER_TYPE_IPv4, 0, 0);
|
||||
RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
}
|
||||
pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port,
|
||||
dst_port_0, 16);
|
||||
@ -4519,10 +4520,10 @@ test_alb_change_mac_in_reply_sent(void)
|
||||
* them through the bonding port.
|
||||
*/
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client1,
|
||||
@ -4530,10 +4531,10 @@ test_alb_change_mac_in_reply_sent(void)
|
||||
rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);
|
||||
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client2,
|
||||
@ -4541,10 +4542,10 @@ test_alb_change_mac_in_reply_sent(void)
|
||||
rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);
|
||||
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client3,
|
||||
@ -4552,10 +4553,10 @@ test_alb_change_mac_in_reply_sent(void)
|
||||
rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1);
|
||||
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client4,
|
||||
@ -4640,10 +4641,10 @@ test_alb_reply_from_client(void)
|
||||
* them in the rx queue to be received by the bonding driver on rx_burst.
|
||||
*/
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host,
|
||||
@ -4652,10 +4653,10 @@ test_alb_reply_from_client(void)
|
||||
1);
|
||||
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client2, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client2, ip_host,
|
||||
@ -4664,10 +4665,10 @@ test_alb_reply_from_client(void)
|
||||
1);
|
||||
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client3, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client3, ip_host,
|
||||
@ -4676,10 +4677,10 @@ test_alb_reply_from_client(void)
|
||||
1);
|
||||
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client4, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_ARP, 0, 0);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)eth_pkt +
|
||||
sizeof(struct rte_ether_hdr));
|
||||
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client4, ip_host,
|
||||
@ -4774,16 +4775,16 @@ test_alb_receive_vlan_reply(void)
|
||||
* Generating packet with double VLAN header and placing it in the rx queue.
|
||||
*/
|
||||
pkt = rte_pktmbuf_alloc(test_params->mbuf_pool);
|
||||
memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN);
|
||||
memcpy(client_mac.addr_bytes, mac_client1, RTE_ETHER_ADDR_LEN);
|
||||
eth_pkt = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_VLAN, 0,
|
||||
0);
|
||||
initialize_eth_header(eth_pkt, &bond_mac, &client_mac,
|
||||
RTE_ETHER_TYPE_VLAN, 0, 0);
|
||||
vlan_pkt = (struct rte_vlan_hdr *)((char *)(eth_pkt + 1));
|
||||
vlan_pkt->vlan_tci = rte_cpu_to_be_16(1);
|
||||
vlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
vlan_pkt->eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
vlan_pkt = vlan_pkt+1;
|
||||
vlan_pkt->vlan_tci = rte_cpu_to_be_16(2);
|
||||
vlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_ARP);
|
||||
vlan_pkt->eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);
|
||||
arp_pkt = (struct rte_arp_hdr *)((char *)(vlan_pkt + 1));
|
||||
initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host,
|
||||
RTE_ARP_OP_REPLY);
|
||||
@ -4810,7 +4811,8 @@ test_alb_receive_vlan_reply(void)
|
||||
retval = -1;
|
||||
goto test_end;
|
||||
}
|
||||
if (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
|
||||
if (vlan_pkt->eth_proto != rte_cpu_to_be_16(
|
||||
RTE_ETHER_TYPE_VLAN)) {
|
||||
retval = -1;
|
||||
goto test_end;
|
||||
}
|
||||
@ -4819,7 +4821,8 @@ test_alb_receive_vlan_reply(void)
|
||||
retval = -1;
|
||||
goto test_end;
|
||||
}
|
||||
if (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
|
||||
if (vlan_pkt->eth_proto != rte_cpu_to_be_16(
|
||||
RTE_ETHER_TYPE_ARP)) {
|
||||
retval = -1;
|
||||
goto test_end;
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ static struct link_bonding_unittest_params test_params = {
|
||||
static struct rte_eth_conf default_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
@ -233,7 +233,7 @@ add_slave(struct slave_conf *slave, uint8_t start)
|
||||
RTE_VERIFY(slave->port_id != INVALID_PORT_ID);
|
||||
|
||||
rte_ether_addr_copy(&slave_mac_default, &addr);
|
||||
addr.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
|
||||
rte_eth_dev_mac_addr_remove(slave->port_id, &addr);
|
||||
|
||||
@ -299,7 +299,7 @@ lacp_recv_cb(uint16_t slave_id, struct rte_mbuf *lacp_pkt)
|
||||
RTE_VERIFY(lacp_pkt != NULL);
|
||||
|
||||
hdr = rte_pktmbuf_mtod(lacp_pkt, struct rte_ether_hdr *);
|
||||
RTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_SLOW));
|
||||
RTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW));
|
||||
|
||||
slow_hdr = rte_pktmbuf_mtod(lacp_pkt, struct slow_protocol_frame *);
|
||||
RTE_VERIFY(slow_hdr->slow_protocol.subtype == SLOW_SUBTYPE_LACP);
|
||||
@ -480,7 +480,7 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt)
|
||||
|
||||
/* look for LACP */
|
||||
hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
|
||||
if (hdr->ether_type != rte_cpu_to_be_16(ETHER_TYPE_SLOW))
|
||||
if (hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW))
|
||||
return 1;
|
||||
|
||||
slow_hdr = rte_pktmbuf_mtod(pkt, struct slow_protocol_frame *);
|
||||
@ -492,7 +492,8 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt)
|
||||
|
||||
/* Change source address to partner address */
|
||||
rte_ether_addr_copy(&parnter_mac_default, &slow_hdr->eth_hdr.s_addr);
|
||||
slow_hdr->eth_hdr.s_addr.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
slow_hdr->eth_hdr.s_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
|
||||
slave->port_id;
|
||||
|
||||
lacp = (struct lacpdu *) &slow_hdr->slow_protocol;
|
||||
/* Save last received state */
|
||||
@ -930,11 +931,11 @@ test_mode4_rx(void)
|
||||
FOR_EACH_SLAVE(i, slave) {
|
||||
void *pkt = NULL;
|
||||
|
||||
dst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
retval = generate_and_put_packets(slave, &src_mac, &dst_mac, 1);
|
||||
TEST_ASSERT_SUCCESS(retval, "Failed to generate test packet burst.");
|
||||
|
||||
src_mac.addr_bytes[ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
src_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id;
|
||||
retval = generate_and_put_packets(slave, &src_mac, &bonded_mac, 1);
|
||||
TEST_ASSERT_SUCCESS(retval, "Failed to generate test packet burst.");
|
||||
|
||||
@ -995,7 +996,7 @@ test_mode4_tx_burst(void)
|
||||
|
||||
/* Prepare burst */
|
||||
for (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) {
|
||||
dst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = pkts_cnt;
|
||||
dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt;
|
||||
retval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]);
|
||||
|
||||
if (retval != 1)
|
||||
@ -1068,7 +1069,7 @@ test_mode4_tx_burst(void)
|
||||
|
||||
/* Prepare burst. */
|
||||
for (pkts_cnt = 0; pkts_cnt < RTE_DIM(pkts); pkts_cnt++) {
|
||||
dst_mac.addr_bytes[ETHER_ADDR_LEN - 1] = pkts_cnt;
|
||||
dst_mac.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = pkts_cnt;
|
||||
retval = generate_packets(&bonded_mac, &dst_mac, 1, &pkts[pkts_cnt]);
|
||||
|
||||
if (retval != 1)
|
||||
@ -1140,9 +1141,10 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave)
|
||||
|
||||
/* Init source address */
|
||||
rte_ether_addr_copy(&parnter_mac_default, &marker_hdr->eth_hdr.s_addr);
|
||||
marker_hdr->eth_hdr.s_addr.addr_bytes[ETHER_ADDR_LEN-1] = slave->port_id;
|
||||
marker_hdr->eth_hdr.s_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
|
||||
slave->port_id;
|
||||
|
||||
marker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
|
||||
marker_hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);
|
||||
|
||||
marker_hdr->marker.subtype = SLOW_SUBTYPE_MARKER;
|
||||
marker_hdr->marker.version_number = 1;
|
||||
@ -1168,7 +1170,7 @@ test_mode4_marker(void)
|
||||
int retval;
|
||||
uint16_t nb_pkts;
|
||||
uint8_t i, j;
|
||||
const uint16_t ethtype_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
|
||||
const uint16_t ethtype_slow_be = rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
|
||||
|
||||
retval = initialize_bonded_device_with_slaves(TEST_MARKER_SLAVE_COUT,
|
||||
0);
|
||||
@ -1368,7 +1370,7 @@ test_mode4_ext_ctrl(void)
|
||||
rte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);
|
||||
|
||||
initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,
|
||||
ETHER_TYPE_SLOW, 0, 0);
|
||||
RTE_ETHER_TYPE_SLOW, 0, 0);
|
||||
|
||||
for (i = 0; i < SLAVE_COUNT; i++) {
|
||||
lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);
|
||||
@ -1422,7 +1424,7 @@ test_mode4_ext_lacp(void)
|
||||
rte_ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);
|
||||
|
||||
initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,
|
||||
ETHER_TYPE_SLOW, 0, 0);
|
||||
RTE_ETHER_TYPE_SLOW, 0, 0);
|
||||
|
||||
for (i = 0; i < SLAVE_COUNT; i++) {
|
||||
lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);
|
||||
|
@ -81,7 +81,7 @@ static struct link_bonding_rssconf_unittest_params test_params = {
|
||||
static struct rte_eth_conf default_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
@ -93,7 +93,7 @@ static struct rte_eth_conf default_pmd_conf = {
|
||||
static struct rte_eth_conf rss_pmd_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_RSS,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
|
@ -63,7 +63,7 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
|
||||
static struct rte_eth_conf port_conf = {
|
||||
.rxmode = {
|
||||
.mq_mode = ETH_MQ_RX_NONE,
|
||||
.max_rx_pkt_len = ETHER_MAX_LEN,
|
||||
.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
|
||||
.split_hdr_size = 0,
|
||||
},
|
||||
.txmode = {
|
||||
@ -173,8 +173,8 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
|
||||
static void
|
||||
print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
|
||||
{
|
||||
char buf[ETHER_ADDR_FMT_SIZE];
|
||||
rte_ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
|
||||
char buf[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
|
||||
printf("%s%s", name, buf);
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ init_traffic(struct rte_mempool *mp,
|
||||
|
||||
initialize_eth_header(&pkt_eth_hdr,
|
||||
(struct rte_ether_addr *)src_mac,
|
||||
(struct rte_ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0);
|
||||
(struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPv4, 0, 0);
|
||||
|
||||
pktlen = initialize_ipv4_header(&pkt_ipv4_hdr,
|
||||
IPV4_ADDR(10, 0, 0, 1),
|
||||
|
@ -95,7 +95,7 @@ prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
|
||||
|
||||
vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
|
||||
vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
|
||||
|
||||
|
||||
|
@ -566,7 +566,7 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
|
||||
eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
|
||||
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
|
||||
|
||||
eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
|
||||
eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL)
|
||||
goto err;
|
||||
|
||||
|
@ -56,7 +56,7 @@ configuration:
|
||||
Interface name: kni#
|
||||
force bind kernel thread to a core : NO
|
||||
mbuf size: (rte_pktmbuf_data_room_size(pktmbuf_pool) - RTE_PKTMBUF_HEADROOM)
|
||||
mtu: (conf.mbuf_size - ETHER_HDR_LEN)
|
||||
mtu: (conf.mbuf_size - RTE_ETHER_HDR_LEN)
|
||||
|
||||
KNI control path is not supported with the PMD, since there is no physical
|
||||
backend device by default.
|
||||
|
@ -863,7 +863,7 @@ Item: ``VLAN``
|
||||
Matches an 802.1Q/ad VLAN tag.
|
||||
|
||||
The corresponding standard outer EtherType (TPID) values are
|
||||
``ETHER_TYPE_VLAN`` or ``ETHER_TYPE_QINQ``. It can be overridden by the
|
||||
``RTE_ETHER_TYPE_VLAN`` or ``RTE_ETHER_TYPE_QINQ``. It can be overridden by the
|
||||
preceding pattern item.
|
||||
|
||||
- ``tci``: tag control information.
|
||||
@ -940,7 +940,7 @@ Item: ``E_TAG``
|
||||
Matches an IEEE 802.1BR E-Tag header.
|
||||
|
||||
The corresponding standard outer EtherType (TPID) value is
|
||||
``ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.
|
||||
``RTE_ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.
|
||||
|
||||
- ``epcp_edei_in_ecid_b``: E-Tag control information (E-TCI), E-PCP (3b),
|
||||
E-DEI (1b), ingress E-CID base (12b).
|
||||
|
@ -326,7 +326,7 @@ The Ethernet ports are configured with default settings using the
|
||||
.. code-block:: c
|
||||
|
||||
static const struct rte_eth_conf port_conf_default = {
|
||||
.rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
|
||||
.rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
|
||||
};
|
||||
|
||||
For this example the ports are set up with 1 RX and 1 TX queue using the
|
||||
|
@ -229,7 +229,7 @@ The actual packet transmission is done in the mcast_send_pkt() function:
|
||||
|
||||
rte_ether_addr_copy(dest_addr, ðdr->d_addr);
|
||||
rte_ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
|
||||
ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
|
||||
ethdr->ether_type = rte_be_to_cpu_16(RTE_ETHER_TYPE_IPv4);
|
||||
|
||||
/* Put new packet into the output queue */
|
||||
|
||||
|
@ -160,7 +160,7 @@ The Ethernet ports are configured with default settings using the
|
||||
.. code-block:: c
|
||||
|
||||
static const struct rte_eth_conf port_conf_default = {
|
||||
.rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
|
||||
.rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
|
||||
};
|
||||
|
||||
For this example the ports are set up with 1 RX and 1 TX queue using the
|
||||
|
@ -109,6 +109,8 @@ typedef uint32_t phandle;
|
||||
typedef uint32_t gfp_t;
|
||||
typedef uint32_t irqreturn_t;
|
||||
|
||||
#define ETHER_ADDR_LEN 6
|
||||
|
||||
#define IRQ_HANDLED 0
|
||||
#define request_irq qbman_request_irq
|
||||
#define free_irq qbman_free_irq
|
||||
|
@ -352,7 +352,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->max_rx_queues = 1;
|
||||
dev_info->max_tx_queues = 1;
|
||||
|
||||
dev_info->min_mtu = ETHER_MIN_MTU;
|
||||
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
||||
dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
|
||||
|
||||
dev_info->default_rxportconf.nb_queues = 1;
|
||||
@ -816,7 +816,7 @@ get_iface_info(const char *if_name,
|
||||
if (ioctl(sock, SIOCGIFHWADDR, &ifr))
|
||||
goto error;
|
||||
|
||||
rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
|
||||
rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
close(sock);
|
||||
return 0;
|
||||
|
@ -318,7 +318,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
|
||||
|
||||
dev->dev_ops = &ark_eth_dev_ops;
|
||||
|
||||
dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
|
||||
dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0);
|
||||
if (!dev->data->mac_addrs) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Failed to allocated memory for storing mac address"
|
||||
@ -385,7 +385,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
|
||||
|
||||
rte_eth_copy_pci_info(eth_dev, pci_dev);
|
||||
|
||||
eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
|
||||
eth_dev->data->mac_addrs = rte_zmalloc(name,
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (!eth_dev->data->mac_addrs) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Memory allocation for MAC failed!"
|
||||
|
@ -416,7 +416,8 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
|
||||
atl_disable_intr(hw);
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_INIT_LOG(ERR, "MAC Malloc failed");
|
||||
return -ENOMEM;
|
||||
@ -897,7 +898,8 @@ int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
|
||||
ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
|
||||
|
||||
memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
|
||||
memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
|
||||
memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -909,7 +911,8 @@ int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
|
||||
ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
|
||||
|
||||
memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
|
||||
memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
|
||||
memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
cfg->aq_macsec.rxsc.pi = pi;
|
||||
|
||||
return 0;
|
||||
@ -1604,11 +1607,11 @@ static int
|
||||
atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
struct rte_eth_dev_info dev_info;
|
||||
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
|
||||
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
|
||||
atl_dev_info_get(dev, &dev_info);
|
||||
|
||||
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
|
||||
return -EINVAL;
|
||||
|
||||
/* update max frame size */
|
||||
|
@ -88,7 +88,7 @@ static void avp_dev_stats_reset(struct rte_eth_dev *dev);
|
||||
#define AVP_MAX_RX_BURST 64
|
||||
#define AVP_MAX_TX_BURST 64
|
||||
#define AVP_MAX_MAC_ADDRS 1
|
||||
#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
|
||||
#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN
|
||||
|
||||
|
||||
/*
|
||||
@ -867,7 +867,7 @@ avp_dev_create(struct rte_pci_device *pci_dev,
|
||||
avp->host_features = host_info->features;
|
||||
rte_spinlock_init(&avp->lock);
|
||||
memcpy(&avp->ethaddr.addr_bytes[0],
|
||||
host_info->ethaddr, ETHER_ADDR_LEN);
|
||||
host_info->ethaddr, RTE_ETHER_ADDR_LEN);
|
||||
/* adjust max values to not exceed our max */
|
||||
avp->max_tx_queues =
|
||||
RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
|
||||
@ -1006,10 +1006,11 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -345,7 +345,7 @@ struct rte_avp_device_info {
|
||||
/* Ethernet info */
|
||||
char ethaddr[ETH_ALEN];
|
||||
#else
|
||||
char ethaddr[ETHER_ADDR_LEN];
|
||||
char ethaddr[RTE_ETHER_ADDR_LEN];
|
||||
#endif
|
||||
|
||||
uint8_t mode; /**< device mode, i.e guest, host, trace */
|
||||
|
@ -10,8 +10,8 @@
|
||||
|
||||
static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
|
||||
{
|
||||
return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_HLEN;
|
||||
return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_HLEN;
|
||||
}
|
||||
|
||||
/* query busy bit */
|
||||
|
@ -626,11 +626,11 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
|
||||
pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
|
||||
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
|
||||
ETHER_ADDR_LEN, 0);
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (!eth_dev->data->mac_addrs) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Failed to alloc %u bytes needed to store MAC addr tbl",
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
|
||||
#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
|
||||
#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
|
||||
#define AXGBE_RX_MIN_BUF_SIZE (RTE_ETHER_MAX_LEN + VLAN_HLEN)
|
||||
#define AXGBE_MAX_MAC_ADDRS 1
|
||||
|
||||
#define AXGBE_RX_BUF_ALIGN 64
|
||||
|
@ -75,7 +75,7 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
|
||||
DMA_CH_RDTR_LO);
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
|
@ -9787,13 +9787,13 @@ int bnx2x_attach(struct bnx2x_softc *sc)
|
||||
bnx2x_get_phy_info(sc);
|
||||
} else {
|
||||
/* Left mac of VF unfilled, PF should set it for VF */
|
||||
memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
|
||||
memset(sc->link_params.mac_addr, 0, RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
sc->wol = 0;
|
||||
|
||||
/* set the default MTU (changed via ifconfig) */
|
||||
sc->mtu = ETHER_MTU;
|
||||
sc->mtu = RTE_ETHER_MTU;
|
||||
|
||||
bnx2x_set_modes_bitmap(sc);
|
||||
|
||||
|
@ -38,7 +38,7 @@ typedef rte_iova_t ecore_dma_addr_t; /* expected to be 64 bit wide */
|
||||
typedef volatile int ecore_atomic_t;
|
||||
|
||||
|
||||
#define ETH_ALEN ETHER_ADDR_LEN /* 6 */
|
||||
#define ETH_ALEN RTE_ETHER_ADDR_LEN /* 6 */
|
||||
|
||||
#define ECORE_SWCID_SHIFT 17
|
||||
#define ECORE_SWCID_MASK ((0x1 << ECORE_SWCID_SHIFT) - 1)
|
||||
|
@ -309,7 +309,7 @@ struct bnxt {
|
||||
struct bnxt_irq *irq_tbl;
|
||||
|
||||
#define MAX_NUM_MAC_ADDR 32
|
||||
uint8_t mac_addr[ETHER_ADDR_LEN];
|
||||
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
|
||||
uint16_t hwrm_cmd_seq;
|
||||
uint16_t kong_cmd_seq;
|
||||
@ -326,7 +326,7 @@ struct bnxt {
|
||||
uint8_t tx_cosq_id;
|
||||
|
||||
uint16_t fw_fid;
|
||||
uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
|
||||
uint8_t dflt_mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
uint16_t max_rsscos_ctx;
|
||||
uint16_t max_cp_rings;
|
||||
uint16_t max_tx_rings;
|
||||
|
@ -214,7 +214,7 @@ static int bnxt_init_chip(struct bnxt *bp)
|
||||
/* disable uio/vfio intr/eventfd mapping */
|
||||
rte_intr_disable(intr_handle);
|
||||
|
||||
if (bp->eth_dev->data->mtu > ETHER_MTU) {
|
||||
if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
|
||||
bp->eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
bp->flags |= BNXT_FLAG_JUMBO;
|
||||
@ -462,8 +462,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
|
||||
|
||||
/* Fast path specifics */
|
||||
dev_info->min_rx_bufsize = 1;
|
||||
dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
|
||||
+ VLAN_TAG_SIZE * 2;
|
||||
dev_info->max_rx_pktlen = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
|
||||
|
||||
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
|
||||
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
|
||||
@ -595,9 +595,9 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
eth_dev->data->mtu =
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
|
||||
BNXT_NUM_VLANS;
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
|
||||
BNXT_NUM_VLANS;
|
||||
bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
|
||||
}
|
||||
return 0;
|
||||
@ -750,7 +750,7 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
|
||||
bnxt_filter_info, next);
|
||||
bnxt_hwrm_clear_l2_filter(bp, filter);
|
||||
filter->mac_index = INVALID_MAC_INDEX;
|
||||
memset(&filter->l2_addr, 0, ETHER_ADDR_LEN);
|
||||
memset(&filter->l2_addr, 0, RTE_ETHER_ADDR_LEN);
|
||||
STAILQ_INSERT_TAIL(&bp->free_filter_list,
|
||||
filter, next);
|
||||
}
|
||||
@ -791,7 +791,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
|
||||
}
|
||||
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
|
||||
filter->mac_index = index;
|
||||
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
|
||||
memcpy(filter->l2_addr, mac_addr, RTE_ETHER_ADDR_LEN);
|
||||
return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
|
||||
}
|
||||
|
||||
@ -1312,7 +1312,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
|
||||
new_filter->mac_index =
|
||||
filter->mac_index;
|
||||
memcpy(new_filter->l2_addr, filter->l2_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
/* MAC only filter */
|
||||
rc = bnxt_hwrm_set_l2_filter(bp,
|
||||
vnic->fw_vnic_id,
|
||||
@ -1381,7 +1381,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
|
||||
/* Inherit MAC from the previous filter */
|
||||
new_filter->mac_index = filter->mac_index;
|
||||
memcpy(new_filter->l2_addr, filter->l2_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
/* MAC + VLAN ID filter */
|
||||
new_filter->l2_ivlan = vlan_id;
|
||||
new_filter->l2_ivlan_mask = 0xF000;
|
||||
@ -1472,8 +1472,8 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
|
||||
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
|
||||
if (rc)
|
||||
return rc;
|
||||
memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
|
||||
memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
|
||||
memcpy(filter->l2_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
|
||||
memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
|
||||
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
|
||||
filter->enables |=
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
|
||||
@ -1508,8 +1508,9 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
|
||||
/* TODO Check for Duplicate mcast addresses */
|
||||
vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
|
||||
for (i = 0; i < nb_mc_addr; i++) {
|
||||
memcpy(vnic->mc_list + off, &mc_addr_list[i], ETHER_ADDR_LEN);
|
||||
off += ETHER_ADDR_LEN;
|
||||
memcpy(vnic->mc_list + off, &mc_addr_list[i],
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
off += RTE_ETHER_ADDR_LEN;
|
||||
}
|
||||
|
||||
vnic->mc_addr_cnt = i;
|
||||
@ -1582,13 +1583,13 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
|
||||
bnxt_dev_info_get_op(eth_dev, &dev_info);
|
||||
|
||||
if (new_mtu < ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
|
||||
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > BNXT_MAX_MTU) {
|
||||
PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
|
||||
ETHER_MIN_MTU, BNXT_MAX_MTU);
|
||||
RTE_ETHER_MIN_MTU, BNXT_MAX_MTU);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new_mtu > ETHER_MTU) {
|
||||
if (new_mtu > RTE_ETHER_MTU) {
|
||||
bp->flags |= BNXT_FLAG_JUMBO;
|
||||
bp->eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
@ -1599,7 +1600,8 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
}
|
||||
|
||||
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
|
||||
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
|
||||
new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
||||
VLAN_TAG_SIZE * 2;
|
||||
|
||||
eth_dev->data->mtu = new_mtu;
|
||||
PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
|
||||
@ -1608,8 +1610,8 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
|
||||
uint16_t size = 0;
|
||||
|
||||
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
|
||||
vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
|
||||
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
|
||||
if (rc)
|
||||
break;
|
||||
@ -1794,8 +1796,8 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
|
||||
int match = 0;
|
||||
*ret = 0;
|
||||
|
||||
if (efilter->ether_type == ETHER_TYPE_IPv4 ||
|
||||
efilter->ether_type == ETHER_TYPE_IPv6) {
|
||||
if (efilter->ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
efilter->ether_type == RTE_ETHER_TYPE_IPv6) {
|
||||
PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
|
||||
" ethertype filter.", efilter->ether_type);
|
||||
*ret = -EINVAL;
|
||||
@ -1818,7 +1820,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
|
||||
if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) {
|
||||
STAILQ_FOREACH(mfilter, &vnic0->filter, next) {
|
||||
if ((!memcmp(efilter->mac_addr.addr_bytes,
|
||||
mfilter->l2_addr, ETHER_ADDR_LEN) &&
|
||||
mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
|
||||
mfilter->flags ==
|
||||
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP &&
|
||||
mfilter->ethertype == efilter->ether_type)) {
|
||||
@ -1829,7 +1831,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
|
||||
} else {
|
||||
STAILQ_FOREACH(mfilter, &vnic->filter, next)
|
||||
if ((!memcmp(efilter->mac_addr.addr_bytes,
|
||||
mfilter->l2_addr, ETHER_ADDR_LEN) &&
|
||||
mfilter->l2_addr, RTE_ETHER_ADDR_LEN) &&
|
||||
mfilter->ethertype == efilter->ether_type &&
|
||||
mfilter->flags ==
|
||||
HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) {
|
||||
@ -1884,9 +1886,9 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
|
||||
}
|
||||
bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER;
|
||||
memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR;
|
||||
bfilter->ethertype = efilter->ether_type;
|
||||
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
|
||||
@ -2397,7 +2399,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
|
||||
//filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
|
||||
} else {
|
||||
filter->dst_id = vnic->fw_vnic_id;
|
||||
for (i = 0; i < ETHER_ADDR_LEN; i++)
|
||||
for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
|
||||
if (filter->dst_macaddr[i] == 0x00)
|
||||
filter1 = STAILQ_FIRST(&vnic0->filter);
|
||||
else
|
||||
@ -2441,13 +2443,14 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
|
||||
mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
|
||||
mf->l2_ivlan == nf->l2_ivlan &&
|
||||
mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
|
||||
!memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->l2_addr, nf->l2_addr,
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
|
||||
ETHER_ADDR_LEN) &&
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->src_macaddr, nf->src_macaddr,
|
||||
ETHER_ADDR_LEN) &&
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->dst_macaddr, nf->dst_macaddr,
|
||||
ETHER_ADDR_LEN) &&
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->src_ipaddr, nf->src_ipaddr,
|
||||
sizeof(nf->src_ipaddr)) &&
|
||||
!memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
|
||||
@ -3354,16 +3357,16 @@ skip_ext_stats:
|
||||
goto error_free;
|
||||
}
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
|
||||
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
|
||||
RTE_ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Failed to alloc %u bytes needed to store MAC addr tbl",
|
||||
ETHER_ADDR_LEN * bp->max_l2_ctx);
|
||||
RTE_ETHER_ADDR_LEN * bp->max_l2_ctx);
|
||||
rc = -ENOMEM;
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
|
||||
if (bnxt_check_zero_bytes(bp->dflt_mac_addr, RTE_ETHER_ADDR_LEN)) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
|
||||
bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
|
||||
@ -3374,7 +3377,7 @@ skip_ext_stats:
|
||||
}
|
||||
/* Copy the permanent MAC from the qcap response address now. */
|
||||
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
|
||||
memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
|
||||
memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
|
||||
/* 1 ring is for default completion ring */
|
||||
|
@ -39,8 +39,8 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
|
||||
filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
|
||||
memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
|
||||
return filter;
|
||||
}
|
||||
|
||||
|
@ -25,14 +25,14 @@ struct bnxt_filter_info {
|
||||
/* Filter Characteristics */
|
||||
uint32_t flags;
|
||||
uint32_t enables;
|
||||
uint8_t l2_addr[ETHER_ADDR_LEN];
|
||||
uint8_t l2_addr_mask[ETHER_ADDR_LEN];
|
||||
uint8_t l2_addr[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t l2_addr_mask[RTE_ETHER_ADDR_LEN];
|
||||
uint16_t l2_ovlan;
|
||||
uint16_t l2_ovlan_mask;
|
||||
uint16_t l2_ivlan;
|
||||
uint16_t l2_ivlan_mask;
|
||||
uint8_t t_l2_addr[ETHER_ADDR_LEN];
|
||||
uint8_t t_l2_addr_mask[ETHER_ADDR_LEN];
|
||||
uint8_t t_l2_addr[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t t_l2_addr_mask[RTE_ETHER_ADDR_LEN];
|
||||
uint16_t t_l2_ovlan;
|
||||
uint16_t t_l2_ovlan_mask;
|
||||
uint16_t t_l2_ivlan;
|
||||
|
@ -682,7 +682,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
|
||||
f0 = STAILQ_FIRST(&vnic0->filter);
|
||||
|
||||
/* This flow has same DST MAC as the port/l2 filter. */
|
||||
if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
|
||||
if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
|
||||
return f0;
|
||||
|
||||
/* This flow needs DST MAC which is not same as port/l2 */
|
||||
@ -694,8 +694,8 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
|
||||
filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
|
||||
filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
|
||||
L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
|
||||
memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
|
||||
memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
|
||||
memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
|
||||
memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
|
||||
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
|
||||
filter1);
|
||||
if (rc) {
|
||||
@ -951,13 +951,14 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
|
||||
mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
|
||||
mf->l2_ivlan == nf->l2_ivlan &&
|
||||
mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
|
||||
!memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->l2_addr, nf->l2_addr,
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
|
||||
ETHER_ADDR_LEN) &&
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->src_macaddr, nf->src_macaddr,
|
||||
ETHER_ADDR_LEN) &&
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->dst_macaddr, nf->dst_macaddr,
|
||||
ETHER_ADDR_LEN) &&
|
||||
RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mf->src_ipaddr, nf->src_ipaddr,
|
||||
sizeof(nf->src_ipaddr)) &&
|
||||
!memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
|
||||
|
@ -393,11 +393,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
|
||||
if (enables &
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
|
||||
memcpy(req.l2_addr, filter->l2_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
if (enables &
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
|
||||
memcpy(req.l2_addr_mask, filter->l2_addr_mask,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
if (enables &
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
|
||||
req.l2_ovlan = filter->l2_ovlan;
|
||||
@ -571,7 +571,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
|
||||
}
|
||||
|
||||
bp->fw_fid = rte_le_to_cpu_32(resp->fid);
|
||||
memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
|
||||
memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
|
||||
bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
|
||||
bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
|
||||
bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
|
||||
@ -1329,8 +1329,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
|
||||
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
|
||||
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_TAG_SIZE;
|
||||
vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
|
||||
HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
|
||||
|
||||
if (vnic->func_default)
|
||||
@ -2516,8 +2516,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
|
||||
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
|
||||
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
|
||||
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
|
||||
req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_TAG_SIZE *
|
||||
req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
|
||||
BNXT_NUM_VLANS);
|
||||
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
|
||||
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
|
||||
@ -2554,11 +2554,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
|
||||
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
|
||||
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
|
||||
|
||||
req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_TAG_SIZE *
|
||||
req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
|
||||
BNXT_NUM_VLANS);
|
||||
req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + VLAN_TAG_SIZE *
|
||||
req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
|
||||
BNXT_NUM_VLANS);
|
||||
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
|
||||
(num_vfs + 1));
|
||||
@ -2589,7 +2589,8 @@ static void add_random_mac_if_needed(struct bnxt *bp,
|
||||
rte_eth_random_addr(cfg_req->dflt_mac_addr);
|
||||
bp->pf.vf_info[vf].random_mac = true;
|
||||
} else {
|
||||
memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
|
||||
memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3125,7 +3126,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
|
||||
|
||||
HWRM_CHECK_RESULT();
|
||||
|
||||
memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
|
||||
memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
HWRM_UNLOCK();
|
||||
|
||||
@ -3696,11 +3697,11 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp,
|
||||
if (enables &
|
||||
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
|
||||
memcpy(req.src_macaddr, filter->src_macaddr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
if (enables &
|
||||
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
|
||||
memcpy(req.dst_macaddr, filter->dst_macaddr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
if (enables &
|
||||
HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
|
||||
req.ovlan_vid = filter->l2_ovlan;
|
||||
@ -3799,11 +3800,11 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
|
||||
if (enables &
|
||||
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
|
||||
memcpy(req.src_macaddr, filter->src_macaddr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
//if (enables &
|
||||
//HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
|
||||
//memcpy(req.dst_macaddr, filter->dst_macaddr,
|
||||
//ETHER_ADDR_LEN);
|
||||
//RTE_ETHER_ADDR_LEN);
|
||||
if (enables &
|
||||
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
|
||||
req.ethertype = rte_cpu_to_be_16(filter->ethertype);
|
||||
|
@ -344,8 +344,8 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
|
||||
bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
|
||||
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
|
||||
|
||||
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
|
||||
rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
|
||||
|
||||
if (bp->eth_dev->data->rx_queue_state[queue_index] ==
|
||||
RTE_ETH_QUEUE_STATE_STARTED) {
|
||||
@ -452,8 +452,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
|
||||
bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
|
||||
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
|
||||
|
||||
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
|
||||
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
|
||||
rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
|
||||
if (bnxt_init_one_rx_ring(rxq)) {
|
||||
PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
|
||||
bnxt_rx_queue_release_op(rxq);
|
||||
|
@ -334,7 +334,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
|
||||
rxq->queue_id = queue_idx;
|
||||
rxq->port_id = eth_dev->data->port_id;
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
|
@ -640,8 +640,8 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
|
||||
struct bnxt_rx_ring_info *rxr;
|
||||
struct bnxt_ring *ring;
|
||||
|
||||
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
|
||||
(2 * VLAN_TAG_SIZE);
|
||||
rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
|
||||
RTE_ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
|
||||
rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
|
||||
|
||||
rxr = rte_zmalloc_socket("bnxt_rx_ring",
|
||||
|
@ -116,7 +116,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
|
||||
uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
|
||||
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
|
||||
HW_HASH_KEY_SIZE +
|
||||
BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN);
|
||||
BNXT_MAX_MC_ADDRS * RTE_ETHER_ADDR_LEN);
|
||||
uint16_t max_vnics;
|
||||
int i;
|
||||
rte_iova_t mz_phys_addr;
|
||||
|
@ -698,7 +698,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr,
|
||||
filter->enables ==
|
||||
(HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) &&
|
||||
memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) {
|
||||
memcmp(addr, filter->l2_addr, RTE_ETHER_ADDR_LEN) == 0) {
|
||||
bnxt_hwrm_clear_l2_filter(bp, filter);
|
||||
break;
|
||||
}
|
||||
@ -711,12 +711,12 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct rte_ether_addr *addr,
|
||||
filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
|
||||
filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
|
||||
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
|
||||
memcpy(filter->l2_addr, addr, ETHER_ADDR_LEN);
|
||||
memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
|
||||
memcpy(filter->l2_addr, addr, RTE_ETHER_ADDR_LEN);
|
||||
memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
/* Do not add a filter for the default MAC */
|
||||
if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) ||
|
||||
memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN))
|
||||
memcmp(filter->l2_addr, dflt_mac.addr_bytes, RTE_ETHER_ADDR_LEN))
|
||||
rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter);
|
||||
|
||||
exit:
|
||||
|
@ -577,7 +577,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
|
||||
/* Source and destination MAC */
|
||||
rte_ether_addr_copy(&lacp_mac_addr, &hdr->eth_hdr.d_addr);
|
||||
rte_eth_macaddr_get(slave_id, &hdr->eth_hdr.s_addr);
|
||||
hdr->eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_SLOW);
|
||||
hdr->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_SLOW);
|
||||
|
||||
lacpdu = &hdr->lacpdu;
|
||||
memset(lacpdu, 0, sizeof(*lacpdu));
|
||||
|
@ -216,9 +216,9 @@ bond_mode_alb_arp_upd(struct client_data *client_info,
|
||||
rte_ether_addr_copy(&client_info->app_mac, ð_h->s_addr);
|
||||
rte_ether_addr_copy(&client_info->cli_mac, ð_h->d_addr);
|
||||
if (client_info->vlan_count > 0)
|
||||
eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
|
||||
eth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
|
||||
else
|
||||
eth_h->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
|
||||
eth_h->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP);
|
||||
|
||||
arp_h = (struct rte_arp_hdr *)(
|
||||
(char *)eth_h + sizeof(struct rte_ether_hdr)
|
||||
@ -233,8 +233,8 @@ bond_mode_alb_arp_upd(struct client_data *client_info,
|
||||
arp_h->arp_data.arp_tip = client_info->cli_ip;
|
||||
|
||||
arp_h->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
|
||||
arp_h->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
arp_h->arp_hlen = ETHER_ADDR_LEN;
|
||||
arp_h->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
arp_h->arp_hlen = RTE_ETHER_ADDR_LEN;
|
||||
arp_h->arp_plen = sizeof(uint32_t);
|
||||
arp_h->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);
|
||||
|
||||
|
@ -37,15 +37,15 @@ get_vlan_offset(struct rte_ether_hdr *eth_hdr, uint16_t *proto)
|
||||
{
|
||||
size_t vlan_offset = 0;
|
||||
|
||||
if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||
|
||||
rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {
|
||||
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto ||
|
||||
rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) == *proto) {
|
||||
struct rte_vlan_hdr *vlan_hdr =
|
||||
(struct rte_vlan_hdr *)(eth_hdr + 1);
|
||||
|
||||
vlan_offset = sizeof(struct rte_vlan_hdr);
|
||||
*proto = vlan_hdr->eth_proto;
|
||||
|
||||
if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
|
||||
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) == *proto) {
|
||||
vlan_hdr = vlan_hdr + 1;
|
||||
*proto = vlan_hdr->eth_proto;
|
||||
vlan_offset += sizeof(struct rte_vlan_hdr);
|
||||
@ -108,7 +108,8 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
|
||||
static inline uint8_t
|
||||
is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
|
||||
{
|
||||
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
|
||||
const uint16_t ether_type_slow_be =
|
||||
rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
|
||||
|
||||
return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) &&
|
||||
(ethertype == ether_type_slow_be &&
|
||||
@ -122,7 +123,7 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf)
|
||||
static struct rte_flow_item_eth flow_item_eth_type_8023ad = {
|
||||
.dst.addr_bytes = { 0 },
|
||||
.src.addr_bytes = { 0 },
|
||||
.type = RTE_BE16(ETHER_TYPE_SLOW),
|
||||
.type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
|
||||
};
|
||||
|
||||
static struct rte_flow_item_eth flow_item_eth_mask_type_8023ad = {
|
||||
@ -398,7 +399,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
|
||||
struct rte_ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs;
|
||||
struct rte_ether_hdr *hdr;
|
||||
|
||||
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
|
||||
const uint16_t ether_type_slow_be =
|
||||
rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW);
|
||||
uint16_t num_rx_total = 0; /* Total number of received packets */
|
||||
uint16_t slaves[RTE_MAX_ETHPORTS];
|
||||
uint16_t slave_count, idx;
|
||||
@ -605,7 +607,7 @@ mode6_debug(const char __attribute__((unused)) *info,
|
||||
strlcpy(buf, info, 16);
|
||||
#endif
|
||||
|
||||
if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
|
||||
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4)) {
|
||||
ipv4_h = (struct ipv4_hdr *)((char *)(eth_h + 1) + offset);
|
||||
ipv4_addr_to_dot(ipv4_h->src_addr, src_ip, MaxIPv4String);
|
||||
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
|
||||
@ -615,7 +617,7 @@ mode6_debug(const char __attribute__((unused)) *info,
|
||||
update_client_stats(ipv4_h->src_addr, port, burstnumber);
|
||||
}
|
||||
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
|
||||
else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
|
||||
else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
|
||||
arp_h = (struct rte_arp_hdr *)((char *)(eth_h + 1) + offset);
|
||||
ipv4_addr_to_dot(arp_h->arp_data.arp_sip, src_ip, MaxIPv4String);
|
||||
ipv4_addr_to_dot(arp_h->arp_data.arp_tip, dst_ip, MaxIPv4String);
|
||||
@ -644,14 +646,14 @@ bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
ether_type = eth_h->ether_type;
|
||||
offset = get_vlan_offset(eth_h, ðer_type);
|
||||
|
||||
if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
|
||||
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
|
||||
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
|
||||
mode6_debug("RX ARP:", eth_h, bufs[i]->port, &burstnumberRX);
|
||||
#endif
|
||||
bond_mode_alb_arp_recv(eth_h, offset, internals);
|
||||
}
|
||||
#if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
|
||||
else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
|
||||
else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4))
|
||||
mode6_debug("RX IPv4:", eth_h, bufs[i]->port, &burstnumberRX);
|
||||
#endif
|
||||
}
|
||||
@ -809,12 +811,12 @@ burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
|
||||
vlan_offset = get_vlan_offset(eth_hdr, &proto);
|
||||
|
||||
if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
|
||||
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
|
||||
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
l3hash = ipv4_hash(ipv4_hdr);
|
||||
|
||||
} else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
|
||||
} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
|
||||
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
l3hash = ipv6_hash(ipv6_hdr);
|
||||
@ -849,7 +851,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
l3hash = 0;
|
||||
l4hash = 0;
|
||||
|
||||
if (rte_cpu_to_be_16(ETHER_TYPE_IPv4) == proto) {
|
||||
if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4) == proto) {
|
||||
struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
size_t ip_hdr_offset;
|
||||
@ -880,7 +882,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
|
||||
l4hash = HASH_L4_PORTS(udp_hdr);
|
||||
}
|
||||
}
|
||||
} else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
|
||||
} else if (rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6) == proto) {
|
||||
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
|
||||
((char *)(eth_hdr + 1) + vlan_offset);
|
||||
l3hash = ipv6_hash(ipv6_hdr);
|
||||
@ -1107,7 +1109,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
||||
ether_type = eth_h->ether_type;
|
||||
offset = get_vlan_offset(eth_h, ðer_type);
|
||||
|
||||
if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
|
||||
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) {
|
||||
slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals);
|
||||
|
||||
/* Change src mac in eth header */
|
||||
@ -2252,7 +2254,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
|
||||
dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
|
||||
internals->candidate_max_rx_pktlen :
|
||||
ETHER_MAX_JUMBO_FRAME_LEN;
|
||||
RTE_ETHER_MAX_JUMBO_FRAME_LEN;
|
||||
|
||||
/* Max number of tx/rx queues that the bonded device can support is the
|
||||
* minimum values of the bonded slaves, as all slaves must be capable
|
||||
@ -3084,12 +3086,12 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
|
||||
eth_dev->data->nb_tx_queues = (uint16_t)1;
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *
|
||||
eth_dev->data->mac_addrs = rte_zmalloc_socket(name, RTE_ETHER_ADDR_LEN *
|
||||
BOND_MAX_MAC_ADDRS, 0, socket_id);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
RTE_BOND_LOG(ERR,
|
||||
"Failed to allocate %u bytes needed to store MAC addresses",
|
||||
ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
|
||||
RTE_ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -3148,7 +3150,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
|
||||
}
|
||||
|
||||
vlan_filter_bmp_size =
|
||||
rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
|
||||
rte_bitmap_get_memory_footprint(RTE_ETHER_MAX_VLAN_ID + 1);
|
||||
internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (internals->vlan_filter_bmpmem == NULL) {
|
||||
@ -3158,7 +3160,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
|
||||
goto err;
|
||||
}
|
||||
|
||||
internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
|
||||
internals->vlan_filter_bmp = rte_bitmap_init(RTE_ETHER_MAX_VLAN_ID + 1,
|
||||
internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
|
||||
if (internals->vlan_filter_bmp == NULL) {
|
||||
RTE_BOND_LOG(ERR,
|
||||
|
@ -15,8 +15,9 @@
|
||||
#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
|
||||
#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
|
||||
|
||||
#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
|
||||
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
|
||||
#define CXGBE_MIN_RX_BUFSIZE RTE_ETHER_MIN_MTU /* min buf size */
|
||||
#define CXGBE_MAX_RX_PKTLEN (9000 + RTE_ETHER_HDR_LEN + \
|
||||
RTE_ETHER_CRC_LEN) /* max pkt */
|
||||
|
||||
/* Max poll time is 100 * 100msec = 10 sec */
|
||||
#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
|
||||
|
@ -101,6 +101,7 @@
|
||||
#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a)))
|
||||
|
||||
#define VLAN_HLEN 4
|
||||
#define ETHER_ADDR_LEN 6
|
||||
|
||||
#define rmb() rte_rmb() /* dpdk rte provided rmb */
|
||||
#define wmb() rte_wmb() /* dpdk rte provided wmb */
|
||||
|
@ -277,16 +277,16 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
struct adapter *adapter = pi->adapter;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
int err;
|
||||
uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
|
||||
uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
|
||||
cxgbe_dev_info_get(eth_dev, &dev_info);
|
||||
|
||||
/* Must accommodate at least ETHER_MIN_MTU */
|
||||
if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
|
||||
/* Must accommodate at least RTE_ETHER_MIN_MTU */
|
||||
if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
|
||||
return -EINVAL;
|
||||
|
||||
/* set to jumbo mode if needed */
|
||||
if (new_mtu > ETHER_MAX_LEN)
|
||||
if (new_mtu > RTE_ETHER_MAX_LEN)
|
||||
eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -587,7 +587,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
|
||||
cxgbe_dev_info_get(eth_dev, &dev_info);
|
||||
|
||||
/* Must accommodate at least ETHER_MIN_MTU */
|
||||
/* Must accommodate at least RTE_ETHER_MIN_MTU */
|
||||
if ((pkt_len < dev_info.min_rx_bufsize) ||
|
||||
(pkt_len > dev_info.max_rx_pktlen)) {
|
||||
dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
|
||||
@ -626,7 +626,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
|
||||
rxq->fl.size = temp_nb_desc;
|
||||
|
||||
/* Set to jumbo mode if necessary */
|
||||
if (pkt_len > ETHER_MAX_LEN)
|
||||
if (pkt_len > RTE_ETHER_MAX_LEN)
|
||||
eth_dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
|
@ -102,7 +102,7 @@ struct ch_filter_specification {
|
||||
uint32_t eport:2; /* egress port to switch packet out */
|
||||
uint32_t swapmac:1; /* swap SMAC/DMAC for loopback packet */
|
||||
uint32_t newvlan:2; /* rewrite VLAN Tag */
|
||||
uint8_t dmac[ETHER_ADDR_LEN]; /* new destination MAC address */
|
||||
uint8_t dmac[RTE_ETHER_ADDR_LEN]; /* new destination MAC address */
|
||||
uint16_t vlan; /* VLAN Tag to insert */
|
||||
|
||||
/*
|
||||
|
@ -233,7 +233,7 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
|
||||
item, "ttl/tos are not supported");
|
||||
|
||||
fs->type = FILTER_TYPE_IPV4;
|
||||
CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
|
||||
CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv4, 0xffff, ethtype);
|
||||
if (!val)
|
||||
return 0; /* ipv4 wild card */
|
||||
|
||||
@ -262,7 +262,7 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
|
||||
"tc/flow/hop are not supported");
|
||||
|
||||
fs->type = FILTER_TYPE_IPV6;
|
||||
CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
|
||||
CXGBE_FILL_FS(RTE_ETHER_TYPE_IPv6, 0xffff, ethtype);
|
||||
if (!val)
|
||||
return 0; /* ipv6 wild card */
|
||||
|
||||
@ -448,7 +448,7 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a,
|
||||
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
|
||||
pushvlan = (const struct rte_flow_action_of_push_vlan *)
|
||||
a->conf;
|
||||
if (pushvlan->ethertype != ETHER_TYPE_VLAN)
|
||||
if (pushvlan->ethertype != RTE_ETHER_TYPE_VLAN)
|
||||
return rte_flow_error_set(e, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ACTION, a,
|
||||
"only ethertype 0x8100 "
|
||||
|
@ -1348,7 +1348,7 @@ int cxgbe_link_start(struct port_info *pi)
|
||||
int ret;
|
||||
|
||||
mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
||||
(ETHER_HDR_LEN + ETHER_CRC_LEN);
|
||||
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
|
||||
|
||||
conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
|
||||
|
||||
@ -1841,7 +1841,7 @@ allocate_mac:
|
||||
rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
|
||||
|
||||
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
|
||||
ETHER_ADDR_LEN, 0);
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (!pi->eth_dev->data->mac_addrs) {
|
||||
dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
|
||||
__func__);
|
||||
|
@ -245,7 +245,7 @@ allocate_mac:
|
||||
|
||||
rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
|
||||
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
|
||||
ETHER_ADDR_LEN, 0);
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (!pi->eth_dev->data->mac_addrs) {
|
||||
dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
|
||||
__func__);
|
||||
|
@ -81,10 +81,10 @@ static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,
|
||||
V_L2T_W_NOREPLY(!sync));
|
||||
req->l2t_idx = cpu_to_be16(l2t_idx);
|
||||
req->vlan = cpu_to_be16(e->vlan);
|
||||
rte_memcpy(req->dst_mac, e->dmac, ETHER_ADDR_LEN);
|
||||
rte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
if (loopback)
|
||||
memset(req->dst_mac, 0, ETHER_ADDR_LEN);
|
||||
memset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN);
|
||||
|
||||
t4_mgmt_tx(ctrlq, mbuf);
|
||||
|
||||
@ -116,7 +116,7 @@ static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
|
||||
first_free = e;
|
||||
} else {
|
||||
if (e->state == L2T_STATE_SWITCHING) {
|
||||
if ((!memcmp(e->dmac, dmac, ETHER_ADDR_LEN)) &&
|
||||
if ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) &&
|
||||
e->vlan == vlan && e->lport == port)
|
||||
goto exists;
|
||||
}
|
||||
@ -154,7 +154,7 @@ static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
|
||||
e->state = L2T_STATE_SWITCHING;
|
||||
e->vlan = vlan;
|
||||
e->lport = port;
|
||||
rte_memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
|
||||
rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
|
||||
rte_atomic32_set(&e->refcnt, 1);
|
||||
ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
|
||||
if (ret < 0)
|
||||
|
@ -28,7 +28,7 @@ struct l2t_entry {
|
||||
u16 idx; /* entry index within in-memory table */
|
||||
u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
|
||||
u8 lport; /* destination port */
|
||||
u8 dmac[ETHER_ADDR_LEN]; /* destination MAC address */
|
||||
u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
|
||||
rte_spinlock_t lock; /* entry lock */
|
||||
rte_atomic32_t refcnt; /* entry reference count */
|
||||
};
|
||||
|
@ -8,8 +8,8 @@
|
||||
static inline bool
|
||||
match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask)
|
||||
{
|
||||
if (!memcmp(eth_addr, entry->eth_addr, ETHER_ADDR_LEN) &&
|
||||
!memcmp(mask, entry->mask, ETHER_ADDR_LEN))
|
||||
if (!memcmp(eth_addr, entry->eth_addr, RTE_ETHER_ADDR_LEN) &&
|
||||
!memcmp(mask, entry->mask, RTE_ETHER_ADDR_LEN))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -95,8 +95,8 @@ int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
|
||||
|
||||
/* Fill in the new values */
|
||||
entry = &mpstcam->entry[ret];
|
||||
memcpy(entry->eth_addr, eth_addr, ETHER_ADDR_LEN);
|
||||
memcpy(entry->mask, mask, ETHER_ADDR_LEN);
|
||||
memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
|
||||
memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
|
||||
rte_atomic32_set(&entry->refcnt, 1);
|
||||
entry->state = MPS_ENTRY_USED;
|
||||
|
||||
@ -139,7 +139,7 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
|
||||
|
||||
/* idx can now be different from what user provided */
|
||||
entry = &mpstcam->entry[idx];
|
||||
memcpy(entry->eth_addr, addr, ETHER_ADDR_LEN);
|
||||
memcpy(entry->eth_addr, addr, RTE_ETHER_ADDR_LEN);
|
||||
/* NOTE: we have considered the case that idx returned by t4_change_mac
|
||||
* will be different from the user provided value only if user
|
||||
* provided value is -1
|
||||
@ -161,8 +161,8 @@ int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
|
||||
*/
|
||||
static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
|
||||
{
|
||||
memset(entry->eth_addr, 0, ETHER_ADDR_LEN);
|
||||
memset(entry->mask, 0, ETHER_ADDR_LEN);
|
||||
memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
|
||||
memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
|
||||
rte_atomic32_clear(&entry->refcnt);
|
||||
entry->state = MPS_ENTRY_UNUSED;
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ struct mps_tcam_entry {
|
||||
u16 idx;
|
||||
|
||||
/* add data here which uniquely defines an entry */
|
||||
u8 eth_addr[ETHER_ADDR_LEN];
|
||||
u8 mask[ETHER_ADDR_LEN];
|
||||
u8 eth_addr[RTE_ETHER_ADDR_LEN];
|
||||
u8 mask[RTE_ETHER_ADDR_LEN];
|
||||
|
||||
struct mpstcam_table *mpstcam; /* backptr */
|
||||
rte_atomic32_t refcnt;
|
||||
|
@ -73,7 +73,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
|
||||
return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
|
||||
return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
|
||||
s->fl_align);
|
||||
}
|
||||
|
||||
@ -1128,7 +1128,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
|
||||
* The chip min packet length is 10 octets but play safe and reject
|
||||
* anything shorter than an Ethernet header.
|
||||
*/
|
||||
if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
|
||||
if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
|
||||
out_free:
|
||||
rte_pktmbuf_free(m);
|
||||
return 0;
|
||||
@ -1145,7 +1145,8 @@ out_free:
|
||||
/* align the end of coalesce WR to a 512 byte boundary */
|
||||
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
|
||||
|
||||
if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
|
||||
if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
|
||||
m->pkt_len > RTE_ETHER_MAX_LEN)) {
|
||||
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
|
||||
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
|
||||
dev_warn(adap, "%s: mapping err for coalesce\n",
|
||||
@ -1230,7 +1231,7 @@ out_free:
|
||||
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
|
||||
l3hdr_len = m->l3_len;
|
||||
l4hdr_len = m->l4_len;
|
||||
eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
|
||||
eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
|
||||
len += sizeof(*lso);
|
||||
wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
|
||||
FW_ETH_TX_PKT_WR :
|
||||
|
@ -146,13 +146,13 @@ static int
|
||||
dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
{
|
||||
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
||||
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
|
||||
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
|
||||
+ VLAN_TAG_SIZE;
|
||||
uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Refuse mtu that requires the support of scattered packets
|
||||
@ -172,7 +172,7 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -230,7 +230,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
|
||||
|
||||
fman_if_set_maxfrm(dpaa_intf->fif, max_len);
|
||||
dev->data->mtu = max_len
|
||||
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
|
||||
- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
|
||||
}
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
|
||||
@ -1364,11 +1364,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
|
||||
ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
|
||||
RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
|
||||
"store MAC addresses",
|
||||
ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
|
||||
RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
|
||||
ret = -ENOMEM;
|
||||
goto free_tx;
|
||||
}
|
||||
@ -1396,7 +1396,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
|
||||
fman_if_stats_reset(fman_intf);
|
||||
/* Disable SG by default */
|
||||
fman_if_set_sg(fman_intf, 0);
|
||||
fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE);
|
||||
fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -222,10 +222,10 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf)
|
||||
struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
|
||||
mbuf->l3_len);
|
||||
tcp_hdr->cksum = 0;
|
||||
if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
|
||||
if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
|
||||
tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
|
||||
tcp_hdr);
|
||||
else /* assume ethertype == ETHER_TYPE_IPv6 */
|
||||
else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
|
||||
tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
|
||||
tcp_hdr);
|
||||
} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
|
||||
@ -233,10 +233,10 @@ static inline void dpaa_checksum(struct rte_mbuf *mbuf)
|
||||
struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
|
||||
mbuf->l3_len);
|
||||
udp_hdr->dgram_cksum = 0;
|
||||
if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
|
||||
if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
|
||||
udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
|
||||
udp_hdr);
|
||||
else /* assume ethertype == ETHER_TYPE_IPv6 */
|
||||
else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
|
||||
udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
|
||||
udp_hdr);
|
||||
}
|
||||
|
@ -1086,7 +1086,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
int ret;
|
||||
struct dpaa2_dev_priv *priv = dev->data->dev_private;
|
||||
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
|
||||
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
|
||||
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
|
||||
+ VLAN_TAG_SIZE;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
@ -1097,10 +1097,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
}
|
||||
|
||||
/* check that mtu is within the allowed range */
|
||||
if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -2186,11 +2186,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
|
||||
* can add MAC entries when rte_eth_dev_mac_addr_add is called.
|
||||
*/
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
|
||||
ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
|
||||
RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
DPAA2_PMD_ERR(
|
||||
"Failed to allocate %d bytes needed to store MAC addresses",
|
||||
ETHER_ADDR_LEN * attr.mac_filter_entries);
|
||||
RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
|
||||
ret = -ENOMEM;
|
||||
goto init_err;
|
||||
}
|
||||
|
@ -92,7 +92,8 @@
|
||||
* The overhead from MTU to max frame size.
|
||||
* Considering VLAN so a tag needs to be counted.
|
||||
*/
|
||||
#define E1000_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE)
|
||||
#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
|
||||
VLAN_TAG_SIZE)
|
||||
|
||||
/*
|
||||
* Maximum number of Ring Descriptors.
|
||||
@ -155,7 +156,7 @@ struct e1000_vfta {
|
||||
*/
|
||||
#define E1000_MAX_VF_MC_ENTRIES 30
|
||||
struct e1000_vf_info {
|
||||
uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
|
||||
uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
|
||||
uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
|
||||
uint16_t num_vf_mc_hashes;
|
||||
uint16_t default_vf_vlan_id;
|
||||
|
@ -284,12 +284,12 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("e1000", RTE_ETHER_ADDR_LEN *
|
||||
hw->mac.rar_entry_count, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
|
||||
"store MAC addresses",
|
||||
ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -576,7 +576,7 @@ eth_em_start(struct rte_eth_dev *dev)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
|
||||
E1000_WRITE_REG(hw, E1000_VET, RTE_ETHER_TYPE_VLAN);
|
||||
|
||||
/* Configure for OS presence */
|
||||
em_init_manageability(hw);
|
||||
@ -821,7 +821,8 @@ em_hardware_init(struct e1000_hw *hw)
|
||||
*/
|
||||
rx_buf_size = em_get_rx_buffer_size(hw);
|
||||
|
||||
hw->fc.high_water = rx_buf_size - PMD_ROUNDUP(ETHER_MAX_LEN * 2, 1024);
|
||||
hw->fc.high_water = rx_buf_size -
|
||||
PMD_ROUNDUP(RTE_ETHER_MAX_LEN * 2, 1024);
|
||||
hw->fc.low_water = hw->fc.high_water - 1500;
|
||||
|
||||
if (hw->mac.type == e1000_80003es2lan)
|
||||
@ -1037,7 +1038,7 @@ em_get_max_pktlen(struct rte_eth_dev *dev)
|
||||
return 0x1000;
|
||||
/* Adapters that do not support jumbo frames */
|
||||
case e1000_ich8lan:
|
||||
return ETHER_MAX_LEN;
|
||||
return RTE_ETHER_MAX_LEN;
|
||||
default:
|
||||
return MAX_JUMBO_FRAME_SIZE;
|
||||
}
|
||||
@ -1697,7 +1698,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
|
||||
|
||||
/* At least reserve one Ethernet frame for watermark */
|
||||
max_high_water = rx_buf_size - ETHER_MAX_LEN;
|
||||
max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
|
||||
if ((fc_conf->high_water > max_high_water) ||
|
||||
(fc_conf->high_water < fc_conf->low_water)) {
|
||||
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
|
||||
@ -1747,7 +1748,7 @@ eth_em_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
|
||||
static void
|
||||
eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
|
||||
{
|
||||
uint8_t addr[ETHER_ADDR_LEN];
|
||||
uint8_t addr[RTE_ETHER_ADDR_LEN];
|
||||
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
memset(addr, 0, sizeof(addr));
|
||||
@ -1773,10 +1774,11 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
uint32_t rctl;
|
||||
|
||||
eth_em_infos_get(dev, &dev_info);
|
||||
frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE;
|
||||
frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
|
||||
VLAN_TAG_SIZE;
|
||||
|
||||
/* check that mtu is within the allowed range */
|
||||
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
|
||||
return -EINVAL;
|
||||
|
||||
/* refuse mtu that requires the support of scattered packets when this
|
||||
@ -1789,7 +1791,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if (frame_size > ETHER_MAX_LEN) {
|
||||
if (frame_size > RTE_ETHER_MAX_LEN) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rctl |= E1000_RCTL_LPE;
|
||||
|
@ -1005,17 +1005,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
*/
|
||||
rxm->next = NULL;
|
||||
if (unlikely(rxq->crc_len > 0)) {
|
||||
first_seg->pkt_len -= ETHER_CRC_LEN;
|
||||
if (data_len <= ETHER_CRC_LEN) {
|
||||
first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
|
||||
if (data_len <= RTE_ETHER_CRC_LEN) {
|
||||
rte_pktmbuf_free_seg(rxm);
|
||||
first_seg->nb_segs--;
|
||||
last_seg->data_len = (uint16_t)
|
||||
(last_seg->data_len -
|
||||
(ETHER_CRC_LEN - data_len));
|
||||
(RTE_ETHER_CRC_LEN - data_len));
|
||||
last_seg->next = NULL;
|
||||
} else
|
||||
rxm->data_len =
|
||||
(uint16_t) (data_len - ETHER_CRC_LEN);
|
||||
rxm->data_len = (uint16_t)
|
||||
(data_len - RTE_ETHER_CRC_LEN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1368,7 +1368,7 @@ em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
|
||||
DEV_RX_OFFLOAD_TCP_CKSUM |
|
||||
DEV_RX_OFFLOAD_KEEP_CRC |
|
||||
DEV_RX_OFFLOAD_SCATTER;
|
||||
if (max_rx_pktlen > ETHER_MAX_LEN)
|
||||
if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
|
||||
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
|
||||
return rx_offload_capa;
|
||||
@ -1463,7 +1463,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rxq->queue_id = queue_idx;
|
||||
rxq->port_id = dev->data->port_id;
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
@ -1799,7 +1799,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
|
||||
* call to configure
|
||||
*/
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
@ -1832,7 +1832,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
|
||||
* one buffer.
|
||||
*/
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
|
||||
rctl_bsize < ETHER_MAX_LEN) {
|
||||
rctl_bsize < RTE_ETHER_MAX_LEN) {
|
||||
if (!dev->data->scattered_rx)
|
||||
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
|
||||
dev->rx_pkt_burst =
|
||||
|
@ -830,11 +830,11 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("e1000",
|
||||
ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
|
||||
RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
|
||||
"store MAC addresses",
|
||||
ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
error = -ENOMEM;
|
||||
goto err_late;
|
||||
}
|
||||
@ -1028,13 +1028,13 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
diag = hw->mac.ops.reset_hw(hw);
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN *
|
||||
hw->mac.rar_entry_count, 0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Failed to allocate %d bytes needed to store MAC "
|
||||
"addresses",
|
||||
ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1322,7 +1322,8 @@ eth_igb_start(struct rte_eth_dev *dev)
|
||||
}
|
||||
adapter->stopped = 0;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
|
||||
E1000_WRITE_REG(hw, E1000_VET,
|
||||
RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
|
||||
|
||||
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
|
||||
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
|
||||
@ -1689,7 +1690,7 @@ igb_hardware_init(struct e1000_hw *hw)
|
||||
*/
|
||||
rx_buf_size = igb_get_rx_buffer_size(hw);
|
||||
|
||||
hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
|
||||
hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
|
||||
hw->fc.low_water = hw->fc.high_water - 1500;
|
||||
hw->fc.pause_time = IGB_FC_PAUSE_TIME;
|
||||
hw->fc.send_xon = 1;
|
||||
@ -1708,7 +1709,8 @@ igb_hardware_init(struct e1000_hw *hw)
|
||||
if (diag < 0)
|
||||
return diag;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
|
||||
E1000_WRITE_REG(hw, E1000_VET,
|
||||
RTE_ETHER_TYPE_VLAN << 16 | RTE_ETHER_TYPE_VLAN);
|
||||
e1000_get_phy_info(hw);
|
||||
e1000_check_for_link(hw);
|
||||
|
||||
@ -1772,10 +1774,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
|
||||
/* Workaround CRC bytes included in size, take away 4 bytes/packet */
|
||||
stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
|
||||
stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
|
||||
stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
|
||||
stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
|
||||
stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
|
||||
stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
|
||||
stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
|
||||
stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
|
||||
stats->ruc += E1000_READ_REG(hw, E1000_RUC);
|
||||
@ -1788,10 +1790,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
|
||||
|
||||
stats->tor += E1000_READ_REG(hw, E1000_TORL);
|
||||
stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
|
||||
stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
|
||||
stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
|
||||
stats->tot += E1000_READ_REG(hw, E1000_TOTL);
|
||||
stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
|
||||
stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
|
||||
stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
|
||||
stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
|
||||
@ -1825,10 +1827,10 @@ igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
|
||||
stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
|
||||
stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
|
||||
stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
|
||||
stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
|
||||
stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
|
||||
stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
|
||||
stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
|
||||
stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
|
||||
stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
|
||||
stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
|
||||
stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
|
||||
stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
|
||||
@ -2288,7 +2290,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
ETH_LINK_SPEED_1G;
|
||||
|
||||
dev_info->max_mtu = dev_info->max_rx_pktlen - E1000_ETH_OVERHEAD;
|
||||
dev_info->min_mtu = ETHER_MIN_MTU;
|
||||
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
||||
|
||||
}
|
||||
|
||||
@ -3081,7 +3083,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
||||
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
|
||||
|
||||
/* At least reserve one Ethernet frame for watermark */
|
||||
max_high_water = rx_buf_size - ETHER_MAX_LEN;
|
||||
max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
|
||||
if ((fc_conf->high_water > max_high_water) ||
|
||||
(fc_conf->high_water < fc_conf->low_water)) {
|
||||
PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
|
||||
@ -3137,7 +3139,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
|
||||
static void
|
||||
eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
|
||||
{
|
||||
uint8_t addr[ETHER_ADDR_LEN];
|
||||
uint8_t addr[RTE_ETHER_ADDR_LEN];
|
||||
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
memset(addr, 0, sizeof(addr));
|
||||
@ -4485,8 +4487,8 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
eth_igb_infos_get(dev, &dev_info);
|
||||
|
||||
/* check that mtu is within the allowed range */
|
||||
if ((mtu < ETHER_MIN_MTU) ||
|
||||
(frame_size > dev_info.max_rx_pktlen))
|
||||
if (mtu < RTE_ETHER_MIN_MTU ||
|
||||
frame_size > dev_info.max_rx_pktlen)
|
||||
return -EINVAL;
|
||||
|
||||
/* refuse mtu that requires the support of scattered packets when this
|
||||
@ -4498,7 +4500,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
|
||||
/* switch to jumbo mode if needed */
|
||||
if (frame_size > ETHER_MAX_LEN) {
|
||||
if (frame_size > RTE_ETHER_MAX_LEN) {
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
rctl |= E1000_RCTL_LPE;
|
||||
@ -4744,8 +4746,8 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
|
||||
uint32_t etqf = 0;
|
||||
int ret;
|
||||
|
||||
if (filter->ether_type == ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == ETHER_TYPE_IPv6) {
|
||||
if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == RTE_ETHER_TYPE_IPv6) {
|
||||
PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
|
||||
" ethertype filter.", filter->ether_type);
|
||||
return -EINVAL;
|
||||
@ -5156,7 +5158,7 @@ igb_timesync_enable(struct rte_eth_dev *dev)
|
||||
|
||||
/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
|
||||
E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
|
||||
(ETHER_TYPE_1588 |
|
||||
(RTE_ETHER_TYPE_1588 |
|
||||
E1000_ETQF_FILTER_ENABLE |
|
||||
E1000_ETQF_1588));
|
||||
|
||||
|
@ -700,8 +700,8 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (filter->ether_type == ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == ETHER_TYPE_IPv6) {
|
||||
if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == RTE_ETHER_TYPE_IPv6) {
|
||||
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
|
@ -37,7 +37,7 @@ dev_num_vf(struct rte_eth_dev *eth_dev)
|
||||
static inline
|
||||
int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
|
||||
{
|
||||
unsigned char vf_mac_addr[ETHER_ADDR_LEN];
|
||||
unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
struct e1000_vf_info *vfinfo =
|
||||
*E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
||||
uint16_t vfn;
|
||||
@ -46,7 +46,7 @@ int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
|
||||
rte_eth_random_addr(vf_mac_addr);
|
||||
/* keep the random address as default */
|
||||
memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -290,7 +290,7 @@ igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
|
||||
|
||||
/* reply to reset with ack and vf mac address */
|
||||
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
|
||||
rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
|
||||
rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
|
||||
e1000_write_mbx(hw, msgbuf, 3, vf);
|
||||
|
||||
return 0;
|
||||
@ -400,10 +400,11 @@ igb_vf_set_rlpml(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
||||
{
|
||||
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
uint16_t rlpml = msgbuf[1] & E1000_VMOLR_RLPML_MASK;
|
||||
uint32_t max_frame = rlpml + ETHER_HDR_LEN + ETHER_CRC_LEN;
|
||||
uint32_t max_frame = rlpml + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
uint32_t vmolr;
|
||||
|
||||
if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
|
||||
if (max_frame < RTE_ETHER_MIN_LEN ||
|
||||
max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
|
||||
return -1;
|
||||
|
||||
vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
|
||||
|
@ -1147,17 +1147,17 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
*/
|
||||
rxm->next = NULL;
|
||||
if (unlikely(rxq->crc_len > 0)) {
|
||||
first_seg->pkt_len -= ETHER_CRC_LEN;
|
||||
if (data_len <= ETHER_CRC_LEN) {
|
||||
first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
|
||||
if (data_len <= RTE_ETHER_CRC_LEN) {
|
||||
rte_pktmbuf_free_seg(rxm);
|
||||
first_seg->nb_segs--;
|
||||
last_seg->data_len = (uint16_t)
|
||||
(last_seg->data_len -
|
||||
(ETHER_CRC_LEN - data_len));
|
||||
(RTE_ETHER_CRC_LEN - data_len));
|
||||
last_seg->next = NULL;
|
||||
} else
|
||||
rxm->data_len =
|
||||
(uint16_t) (data_len - ETHER_CRC_LEN);
|
||||
rxm->data_len = (uint16_t)
|
||||
(data_len - RTE_ETHER_CRC_LEN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1725,7 +1725,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
|
||||
rxq->port_id = dev->data->port_id;
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
@ -2378,7 +2378,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
|
||||
* call to configure
|
||||
*/
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
|
||||
|
@ -198,7 +198,7 @@ struct ena_adapter {
|
||||
|
||||
int id_number;
|
||||
char name[ENA_NAME_MAX_LEN];
|
||||
u8 mac_addr[ETHER_ADDR_LEN];
|
||||
u8 mac_addr[RTE_ETHER_ADDR_LEN];
|
||||
|
||||
void *regs;
|
||||
void *dev_mem_base;
|
||||
|
@ -215,8 +215,8 @@ struct enetc_hw {
|
||||
};
|
||||
|
||||
struct enetc_eth_mac_info {
|
||||
uint8_t addr[ETHER_ADDR_LEN];
|
||||
uint8_t perm_addr[ETHER_ADDR_LEN];
|
||||
uint8_t addr[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t perm_addr[RTE_ETHER_ADDR_LEN];
|
||||
uint8_t get_link_status;
|
||||
};
|
||||
|
||||
|
@ -417,7 +417,7 @@ enetc_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
|
||||
ETHER_CRC_LEN : 0);
|
||||
RTE_ETHER_CRC_LEN : 0);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
@ -595,7 +595,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
struct enetc_eth_hw *hw =
|
||||
ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct enetc_hw *enetc_hw = &hw->hw;
|
||||
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
|
||||
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
||||
|
||||
/* check that mtu is within the allowed range */
|
||||
if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
|
||||
@ -612,7 +612,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -654,7 +654,8 @@ enetc_dev_configure(struct rte_eth_dev *dev)
|
||||
ENETC_MAC_MAXFRM_SIZE);
|
||||
enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
|
||||
2 * ENETC_MAC_MAXFRM_SIZE);
|
||||
dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
|
||||
dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN;
|
||||
}
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
|
||||
@ -830,11 +831,12 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
|
||||
/* Allocate memory for storing MAC addresses */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
|
||||
RTE_ETHER_ADDR_LEN, 0);
|
||||
if (!eth_dev->data->mac_addrs) {
|
||||
ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
|
||||
"store MAC addresses",
|
||||
ETHER_ADDR_LEN * 1);
|
||||
RTE_ETHER_ADDR_LEN * 1);
|
||||
error = -ENOMEM;
|
||||
return -1;
|
||||
}
|
||||
@ -845,8 +847,9 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
/* Set MTU */
|
||||
enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
|
||||
ENETC_SET_MAXFRM(ETHER_MAX_LEN));
|
||||
eth_dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
|
||||
ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
|
||||
eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
|
||||
RTE_ETHER_CRC_LEN;
|
||||
|
||||
ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
|
||||
eth_dev->data->port_id, pci_dev->id.vendor_id,
|
||||
|
@ -204,7 +204,7 @@ struct enic {
|
||||
static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
|
||||
{
|
||||
/* ethdev max size includes eth whereas NIC MTU does not */
|
||||
return mtu + ETHER_HDR_LEN;
|
||||
return mtu + RTE_ETHER_HDR_LEN;
|
||||
}
|
||||
|
||||
/* Get the CQ index from a Start of Packet(SOP) RQ index */
|
||||
|
@ -646,9 +646,9 @@ static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
|
||||
|
||||
static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
|
||||
{
|
||||
char mac_str[ETHER_ADDR_FMT_SIZE];
|
||||
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
|
||||
rte_ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
|
||||
rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
|
||||
PMD_INIT_LOG(DEBUG, " %s address %s\n",
|
||||
add ? "add" : "remove", mac_str);
|
||||
}
|
||||
@ -658,7 +658,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
|
||||
uint32_t nb_mc_addr)
|
||||
{
|
||||
struct enic *enic = pmd_priv(eth_dev);
|
||||
char mac_str[ETHER_ADDR_FMT_SIZE];
|
||||
char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
struct rte_ether_addr *addr;
|
||||
uint32_t i, j;
|
||||
int ret;
|
||||
@ -671,7 +671,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
|
||||
if (!rte_is_multicast_ether_addr(addr) ||
|
||||
rte_is_broadcast_ether_addr(addr)) {
|
||||
rte_ether_format_addr(mac_str,
|
||||
ETHER_ADDR_FMT_SIZE, addr);
|
||||
RTE_ETHER_ADDR_FMT_SIZE, addr);
|
||||
PMD_INIT_LOG(ERR, " invalid multicast address %s\n",
|
||||
mac_str);
|
||||
return -EINVAL;
|
||||
|
@ -593,7 +593,7 @@ enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
|
||||
arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
|
||||
return copy_inner_common(&arg->filter->u.generic_1, off,
|
||||
arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
|
||||
eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
|
||||
eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -609,7 +609,7 @@ enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
|
||||
arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
|
||||
return copy_inner_common(&arg->filter->u.generic_1, off,
|
||||
arg->item->spec, mask, sizeof(struct ipv4_hdr),
|
||||
arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
|
||||
arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4), 2);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -625,7 +625,7 @@ enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
|
||||
arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
|
||||
return copy_inner_common(&arg->filter->u.generic_1, off,
|
||||
arg->item->spec, mask, sizeof(struct ipv6_hdr),
|
||||
arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
|
||||
arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6), 2);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -679,14 +679,14 @@ enic_copy_item_eth_v2(struct copy_item_args *arg)
|
||||
mask = &rte_flow_item_eth_mask;
|
||||
|
||||
memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
|
||||
memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
enic_spec.ether_type = spec->type;
|
||||
enic_mask.ether_type = mask->type;
|
||||
|
||||
|
@ -61,9 +61,10 @@ int enic_get_vnic_config(struct enic *enic)
|
||||
* and will be 0 for legacy firmware and VICs
|
||||
*/
|
||||
if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
|
||||
enic->max_mtu = c->max_pkt_size - ETHER_HDR_LEN;
|
||||
enic->max_mtu = c->max_pkt_size - RTE_ETHER_HDR_LEN;
|
||||
else
|
||||
enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE - ETHER_HDR_LEN;
|
||||
enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE -
|
||||
RTE_ETHER_HDR_LEN;
|
||||
if (c->mtu == 0)
|
||||
c->mtu = 1500;
|
||||
|
||||
|
@ -376,7 +376,7 @@ fs_get_mac_addr_arg(const char *key __rte_unused,
|
||||
&ea->addr_bytes[0], &ea->addr_bytes[1],
|
||||
&ea->addr_bytes[2], &ea->addr_bytes[3],
|
||||
&ea->addr_bytes[4], &ea->addr_bytes[5]);
|
||||
return ret != ETHER_ADDR_LEN;
|
||||
return ret != RTE_ETHER_ADDR_LEN;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -172,9 +172,10 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
|
||||
ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
|
||||
PRIV(dev)->mac_addr_pool[i]);
|
||||
if (ret) {
|
||||
char ea_fmt[ETHER_ADDR_FMT_SIZE];
|
||||
char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
|
||||
|
||||
rte_ether_format_addr(ea_fmt, ETHER_ADDR_FMT_SIZE, ea);
|
||||
rte_ether_format_addr(ea_fmt,
|
||||
RTE_ETHER_ADDR_FMT_SIZE, ea);
|
||||
ERROR("Adding MAC address %s failed", ea_fmt);
|
||||
return ret;
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ fm10k_addr_alignment_valid(struct rte_mbuf *mb)
|
||||
/* 8B aligned, and max Ethernet frame would not cross a 4KB boundary? */
|
||||
if (RTE_ALIGN(addr, 8) == addr) {
|
||||
boundary1 = RTE_ALIGN_FLOOR(addr, 4096);
|
||||
boundary2 = RTE_ALIGN_FLOOR(addr + ETHER_MAX_VLAN_FRAME_LEN,
|
||||
boundary2 = RTE_ALIGN_FLOOR(addr + RTE_ETHER_MAX_VLAN_FRAME_LEN,
|
||||
4096);
|
||||
if (boundary1 == boundary2)
|
||||
return 1;
|
||||
|
@ -613,7 +613,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
|
||||
|
||||
/* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
|
||||
memset(dev->data->mac_addrs, 0,
|
||||
ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
|
||||
RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
|
||||
rte_ether_addr_copy((const struct rte_ether_addr *)hw->mac.addr,
|
||||
&dev->data->mac_addrs[0]);
|
||||
memset(macvlan, 0, sizeof(*macvlan));
|
||||
@ -3082,7 +3082,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
|
||||
|
||||
/* Initialize MAC address(es) */
|
||||
dev->data->mac_addrs = rte_zmalloc("fm10k",
|
||||
ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
|
||||
RTE_ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
|
||||
if (dev->data->mac_addrs == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
|
||||
return -ENOMEM;
|
||||
|
@ -1479,7 +1479,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
|
||||
/* Set the global registers with default ether type value */
|
||||
if (!pf->support_multi_driver) {
|
||||
ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
|
||||
ETHER_TYPE_VLAN);
|
||||
RTE_ETHER_TYPE_VLAN);
|
||||
if (ret != I40E_SUCCESS) {
|
||||
PMD_INIT_LOG(ERR,
|
||||
"Failed to set the default outer "
|
||||
@ -1510,9 +1510,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
|
||||
}
|
||||
|
||||
if (!vsi->max_macaddrs)
|
||||
len = ETHER_ADDR_LEN;
|
||||
len = RTE_ETHER_ADDR_LEN;
|
||||
else
|
||||
len = ETHER_ADDR_LEN * vsi->max_macaddrs;
|
||||
len = RTE_ETHER_ADDR_LEN * vsi->max_macaddrs;
|
||||
|
||||
/* Should be after VSI initialized */
|
||||
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
|
||||
@ -2835,7 +2835,7 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
||||
&nes->rx_broadcast);
|
||||
/* exclude CRC bytes */
|
||||
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
|
||||
nes->rx_broadcast) * ETHER_CRC_LEN;
|
||||
nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
|
||||
&oes->rx_discards, &nes->rx_discards);
|
||||
@ -2935,7 +2935,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
|
||||
/* exclude CRC size */
|
||||
pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
|
||||
pf->internal_stats.rx_multicast +
|
||||
pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
|
||||
pf->internal_stats.rx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
/* Get statistics of struct i40e_eth_stats */
|
||||
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
|
||||
@ -2955,10 +2955,11 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
|
||||
pf->offset_loaded, &os->eth.rx_broadcast,
|
||||
&ns->eth.rx_broadcast);
|
||||
/* Workaround: CRC size should not be included in byte statistics,
|
||||
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
|
||||
* so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
|
||||
* packet.
|
||||
*/
|
||||
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
|
||||
ns->eth.rx_broadcast) * ETHER_CRC_LEN;
|
||||
ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
/* exclude internal rx bytes
|
||||
* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
|
||||
@ -3012,7 +3013,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
|
||||
pf->offset_loaded, &os->eth.tx_broadcast,
|
||||
&ns->eth.tx_broadcast);
|
||||
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
|
||||
ns->eth.tx_broadcast) * ETHER_CRC_LEN;
|
||||
ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
/* exclude internal tx bytes
|
||||
* Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
|
||||
@ -3511,7 +3512,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->max_mac_addrs = vsi->max_macaddrs;
|
||||
dev_info->max_vfs = pci_dev->max_vfs;
|
||||
dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
|
||||
dev_info->min_mtu = ETHER_MIN_MTU;
|
||||
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
||||
dev_info->rx_queue_offload_capa = 0;
|
||||
dev_info->rx_offload_capa =
|
||||
DEV_RX_OFFLOAD_VLAN_STRIP |
|
||||
@ -3777,9 +3778,9 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
|
||||
i40e_vsi_config_double_vlan(vsi, TRUE);
|
||||
/* Set global registers with default ethertype. */
|
||||
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
|
||||
ETHER_TYPE_VLAN);
|
||||
RTE_ETHER_TYPE_VLAN);
|
||||
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
|
||||
ETHER_TYPE_VLAN);
|
||||
RTE_ETHER_TYPE_VLAN);
|
||||
}
|
||||
else
|
||||
i40e_vsi_config_double_vlan(vsi, FALSE);
|
||||
@ -4037,7 +4038,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&mac_filter.mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
|
||||
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
|
||||
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
|
||||
else
|
||||
@ -4142,11 +4143,11 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf,
|
||||
}
|
||||
|
||||
if (add) {
|
||||
rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&old_mac, hw->mac.addr, RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
|
||||
mac_filter.filter_type = filter->filter_type;
|
||||
ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
|
||||
@ -4157,7 +4158,7 @@ i40e_vf_mac_filter_set(struct i40e_pf *pf,
|
||||
rte_ether_addr_copy(new_mac, &pf->dev_addr);
|
||||
} else {
|
||||
rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
|
||||
if (ret != I40E_SUCCESS) {
|
||||
PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
|
||||
@ -5825,7 +5826,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
|
||||
}
|
||||
|
||||
/* MAC/VLAN configuration */
|
||||
rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
|
||||
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
|
||||
|
||||
ret = i40e_vsi_add_mac(vsi, &filter);
|
||||
@ -7111,7 +7112,7 @@ i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
|
||||
int mac_num;
|
||||
int ret = I40E_SUCCESS;
|
||||
|
||||
if (!vsi || vlan > ETHER_MAX_VLAN_ID)
|
||||
if (!vsi || vlan > RTE_ETHER_MAX_VLAN_ID)
|
||||
return I40E_ERR_PARAM;
|
||||
|
||||
/* If it's already set, just return */
|
||||
@ -7162,7 +7163,7 @@ i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
|
||||
* Vlan 0 is the generic filter for untagged packets
|
||||
* and can't be removed.
|
||||
*/
|
||||
if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
|
||||
if (!vsi || vlan == 0 || vlan > RTE_ETHER_MAX_VLAN_ID)
|
||||
return I40E_ERR_PARAM;
|
||||
|
||||
/* If can't find it, just return */
|
||||
@ -8623,7 +8624,7 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
|
||||
if (filter->inner_vlan > RTE_ETHER_MAX_VLAN_ID) {
|
||||
PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -9903,7 +9904,8 @@ static int
|
||||
i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
|
||||
struct i40e_ethertype_filter *filter)
|
||||
{
|
||||
rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&filter->input.mac_addr, &input->mac_addr,
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
filter->input.ether_type = input->ether_type;
|
||||
filter->flags = input->flags;
|
||||
filter->queue = input->queue;
|
||||
@ -9995,14 +9997,14 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
|
||||
PMD_DRV_LOG(ERR, "Invalid queue ID");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (filter->ether_type == ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == ETHER_TYPE_IPv6) {
|
||||
if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == RTE_ETHER_TYPE_IPv6) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"unsupported ether_type(0x%04x) in control packet filter.",
|
||||
filter->ether_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (filter->ether_type == ETHER_TYPE_VLAN)
|
||||
if (filter->ether_type == RTE_ETHER_TYPE_VLAN)
|
||||
PMD_DRV_LOG(WARNING,
|
||||
"filter vlan ether_type in first tag is not supported.");
|
||||
|
||||
@ -12011,7 +12013,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
int ret = 0;
|
||||
|
||||
/* check if mtu is within the allowed range */
|
||||
if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* mtu setting is forbidden if port is start */
|
||||
@ -12021,7 +12023,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev_data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
|
@ -268,7 +268,7 @@ enum i40e_flxpld_layer_idx {
|
||||
* Considering QinQ packet, the VLAN tag needs to be counted twice.
|
||||
*/
|
||||
#define I40E_ETH_OVERHEAD \
|
||||
(ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
|
||||
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
|
||||
|
||||
struct i40e_adapter;
|
||||
|
||||
|
@ -1505,12 +1505,12 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
/* copy mac addr */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
|
||||
ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
|
||||
0);
|
||||
RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
|
||||
0);
|
||||
if (eth_dev->data->mac_addrs == NULL) {
|
||||
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
|
||||
" store MAC addresses",
|
||||
ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
|
||||
RTE_ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
|
||||
return -ENOMEM;
|
||||
}
|
||||
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
|
||||
@ -1767,21 +1767,22 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
|
||||
* Check if the jumbo frame and maximum packet length are set correctly
|
||||
*/
|
||||
if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
|
||||
if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
|
||||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
|
||||
PMD_DRV_LOG(ERR, "maximum packet length must be "
|
||||
"larger than %u and smaller than %u, as jumbo "
|
||||
"frame is enabled", (uint32_t)ETHER_MAX_LEN,
|
||||
"frame is enabled", (uint32_t)RTE_ETHER_MAX_LEN,
|
||||
(uint32_t)I40E_FRAME_SIZE_MAX);
|
||||
return I40E_ERR_CONFIG;
|
||||
}
|
||||
} else {
|
||||
if (rxq->max_pkt_len < ETHER_MIN_LEN ||
|
||||
rxq->max_pkt_len > ETHER_MAX_LEN) {
|
||||
if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
|
||||
rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
|
||||
PMD_DRV_LOG(ERR, "maximum packet length must be "
|
||||
"larger than %u and smaller than %u, as jumbo "
|
||||
"frame is disabled", (uint32_t)ETHER_MIN_LEN,
|
||||
(uint32_t)ETHER_MAX_LEN);
|
||||
"frame is disabled",
|
||||
(uint32_t)RTE_ETHER_MIN_LEN,
|
||||
(uint32_t)RTE_ETHER_MAX_LEN);
|
||||
return I40E_ERR_CONFIG;
|
||||
}
|
||||
}
|
||||
@ -2218,7 +2219,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
|
||||
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
|
||||
dev_info->max_mtu = dev_info->max_rx_pktlen - I40E_ETH_OVERHEAD;
|
||||
dev_info->min_mtu = ETHER_MIN_MTU;
|
||||
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
||||
dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
|
||||
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
|
||||
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
|
||||
@ -2680,7 +2681,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
int ret = 0;
|
||||
|
||||
/* check if mtu is within the allowed range */
|
||||
if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > I40E_FRAME_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* mtu setting is forbidden if port is start */
|
||||
@ -2690,7 +2691,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev_data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
|
@ -113,7 +113,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
|
||||
#endif
|
||||
rx_ctx.dtype = i40e_header_split_none;
|
||||
rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
|
||||
rx_ctx.rxmax = ETHER_MAX_LEN;
|
||||
rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
|
||||
rx_ctx.tphrdesc_ena = 1;
|
||||
rx_ctx.tphwdesc_ena = 1;
|
||||
rx_ctx.tphdata_ena = 1;
|
||||
@ -725,7 +725,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
|
||||
case RTE_ETH_FLOW_FRAG_IPV4:
|
||||
ip = (struct ipv4_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
|
||||
/* set len to by default */
|
||||
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
|
||||
@ -752,7 +752,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
|
||||
case RTE_ETH_FLOW_FRAG_IPV6:
|
||||
ip6 = (struct ipv6_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
ip6->vtc_flow =
|
||||
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
|
||||
(fdir_input->flow.ipv6_flow.tc <<
|
||||
@ -910,7 +910,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf,
|
||||
* starts after the whole ARP header
|
||||
*/
|
||||
if (fdir_input->flow.l2_flow.ether_type ==
|
||||
rte_cpu_to_be_16(ETHER_TYPE_ARP))
|
||||
rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
|
||||
payload += sizeof(struct rte_arp_hdr);
|
||||
set_idx = I40E_FLXPLD_L2_IDX;
|
||||
break;
|
||||
@ -1009,7 +1009,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
|
||||
is_customized_pctype) {
|
||||
ip = (struct ipv4_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4);
|
||||
ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
|
||||
/* set len to by default */
|
||||
ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
|
||||
@ -1042,7 +1042,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
|
||||
pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
|
||||
ip6 = (struct ipv6_hdr *)raw_pkt;
|
||||
|
||||
*ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
|
||||
*ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6);
|
||||
ip6->vtc_flow =
|
||||
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
|
||||
(fdir_input->flow.ipv6_flow.tc <<
|
||||
@ -1196,7 +1196,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
|
||||
* starts after the whole ARP header
|
||||
*/
|
||||
if (fdir_input->flow.l2_flow.ether_type ==
|
||||
rte_cpu_to_be_16(ETHER_TYPE_ARP))
|
||||
rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
|
||||
payload += sizeof(struct rte_arp_hdr);
|
||||
set_idx = I40E_FLXPLD_L2_IDX;
|
||||
} else if (fdir_input->flow_ext.customized_pctype) {
|
||||
|
@ -2035,9 +2035,9 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
|
||||
}
|
||||
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
|
||||
|
||||
if (filter->ether_type == ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == ETHER_TYPE_IPv6 ||
|
||||
filter->ether_type == ETHER_TYPE_LLDP ||
|
||||
if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
filter->ether_type == RTE_ETHER_TYPE_IPv6 ||
|
||||
filter->ether_type == RTE_ETHER_TYPE_LLDP ||
|
||||
filter->ether_type == outer_tpid) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
@ -2507,9 +2507,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
|
||||
ether_type = rte_be_to_cpu_16(eth_spec->type);
|
||||
|
||||
if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
|
||||
ether_type == ETHER_TYPE_IPv4 ||
|
||||
ether_type == ETHER_TYPE_IPv6 ||
|
||||
ether_type == ETHER_TYPE_ARP ||
|
||||
ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
ether_type == RTE_ETHER_TYPE_IPv6 ||
|
||||
ether_type == RTE_ETHER_TYPE_ARP ||
|
||||
ether_type == outer_tpid) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
@ -2552,9 +2552,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
|
||||
ether_type =
|
||||
rte_be_to_cpu_16(vlan_spec->inner_type);
|
||||
|
||||
if (ether_type == ETHER_TYPE_IPv4 ||
|
||||
ether_type == ETHER_TYPE_IPv6 ||
|
||||
ether_type == ETHER_TYPE_ARP ||
|
||||
if (ether_type == RTE_ETHER_TYPE_IPv4 ||
|
||||
ether_type == RTE_ETHER_TYPE_IPv6 ||
|
||||
ether_type == RTE_ETHER_TYPE_ARP ||
|
||||
ether_type == outer_tpid) {
|
||||
rte_flow_error_set(error, EINVAL,
|
||||
RTE_FLOW_ERROR_TYPE_ITEM,
|
||||
@ -3338,12 +3338,12 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
|
||||
if (!vxlan_flag) {
|
||||
rte_memcpy(&filter->outer_mac,
|
||||
ð_spec->dst,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
filter_type |= ETH_TUNNEL_FILTER_OMAC;
|
||||
} else {
|
||||
rte_memcpy(&filter->inner_mac,
|
||||
ð_spec->dst,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
filter_type |= ETH_TUNNEL_FILTER_IMAC;
|
||||
}
|
||||
}
|
||||
@ -3568,12 +3568,12 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
|
||||
if (!nvgre_flag) {
|
||||
rte_memcpy(&filter->outer_mac,
|
||||
ð_spec->dst,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
filter_type |= ETH_TUNNEL_FILTER_OMAC;
|
||||
} else {
|
||||
rte_memcpy(&filter->inner_mac,
|
||||
ð_spec->dst,
|
||||
ETHER_ADDR_LEN);
|
||||
RTE_ETHER_ADDR_LEN);
|
||||
filter_type |= ETH_TUNNEL_FILTER_IMAC;
|
||||
}
|
||||
}
|
||||
|
@ -843,7 +843,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
|
||||
|
||||
for (i = 0; i < addr_list->num_elements; i++) {
|
||||
mac = (struct rte_ether_addr *)(addr_list->list[i].addr);
|
||||
rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&filter.mac_addr, mac, RTE_ETHER_ADDR_LEN);
|
||||
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
|
||||
if (rte_is_zero_ether_addr(mac) ||
|
||||
i40e_vsi_add_mac(vf->vsi, &filter)) {
|
||||
|
@ -889,17 +889,17 @@ i40e_recv_scattered_pkts(void *rx_queue,
|
||||
*/
|
||||
rxm->next = NULL;
|
||||
if (unlikely(rxq->crc_len > 0)) {
|
||||
first_seg->pkt_len -= ETHER_CRC_LEN;
|
||||
if (rx_packet_len <= ETHER_CRC_LEN) {
|
||||
first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
|
||||
if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
|
||||
rte_pktmbuf_free_seg(rxm);
|
||||
first_seg->nb_segs--;
|
||||
last_seg->data_len =
|
||||
(uint16_t)(last_seg->data_len -
|
||||
(ETHER_CRC_LEN - rx_packet_len));
|
||||
(RTE_ETHER_CRC_LEN - rx_packet_len));
|
||||
last_seg->next = NULL;
|
||||
} else
|
||||
rxm->data_len = (uint16_t)(rx_packet_len -
|
||||
ETHER_CRC_LEN);
|
||||
RTE_ETHER_CRC_LEN);
|
||||
}
|
||||
|
||||
first_seg->port = rxq->port_id;
|
||||
@ -1839,7 +1839,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
rxq->reg_idx = reg_idx;
|
||||
rxq->port_id = dev->data->port_id;
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
|
||||
rxq->crc_len = ETHER_CRC_LEN;
|
||||
rxq->crc_len = RTE_ETHER_CRC_LEN;
|
||||
else
|
||||
rxq->crc_len = 0;
|
||||
rxq->drop_en = rx_conf->rx_drop_en;
|
||||
@ -2634,23 +2634,23 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
|
||||
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
|
||||
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
|
||||
if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
|
||||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
|
||||
PMD_DRV_LOG(ERR, "maximum packet length must "
|
||||
"be larger than %u and smaller than %u,"
|
||||
"as jumbo frame is enabled",
|
||||
(uint32_t)ETHER_MAX_LEN,
|
||||
(uint32_t)RTE_ETHER_MAX_LEN,
|
||||
(uint32_t)I40E_FRAME_SIZE_MAX);
|
||||
return I40E_ERR_CONFIG;
|
||||
}
|
||||
} else {
|
||||
if (rxq->max_pkt_len < ETHER_MIN_LEN ||
|
||||
rxq->max_pkt_len > ETHER_MAX_LEN) {
|
||||
if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
|
||||
rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
|
||||
PMD_DRV_LOG(ERR, "maximum packet length must be "
|
||||
"larger than %u and smaller than %u, "
|
||||
"as jumbo frame is disabled",
|
||||
(uint32_t)ETHER_MIN_LEN,
|
||||
(uint32_t)ETHER_MAX_LEN);
|
||||
(uint32_t)RTE_ETHER_MIN_LEN,
|
||||
(uint32_t)RTE_ETHER_MAX_LEN);
|
||||
return I40E_ERR_CONFIG;
|
||||
}
|
||||
}
|
||||
|
@ -663,7 +663,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
|
||||
|
||||
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
|
||||
|
||||
if (vlan_id > ETHER_MAX_VLAN_ID) {
|
||||
if (vlan_id > RTE_ETHER_MAX_VLAN_ID) {
|
||||
PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -765,7 +765,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
|
||||
}
|
||||
|
||||
if (on) {
|
||||
rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
|
||||
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
|
||||
ret = i40e_vsi_add_mac(vsi, &filter);
|
||||
} else {
|
||||
@ -893,7 +893,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
|
||||
if (!is_i40e_supported(dev))
|
||||
return -ENOTSUP;
|
||||
|
||||
if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
|
||||
if (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) {
|
||||
PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@
|
||||
*/
|
||||
#define IAVF_VLAN_TAG_SIZE 4
|
||||
#define IAVF_ETH_OVERHEAD \
|
||||
(ETHER_HDR_LEN + ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
|
||||
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IAVF_VLAN_TAG_SIZE * 2)
|
||||
|
||||
#define IAVF_32_BIT_WIDTH (CHAR_BIT * 4)
|
||||
#define IAVF_48_BIT_WIDTH (CHAR_BIT * 6)
|
||||
|
@ -225,23 +225,23 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
|
||||
* correctly.
|
||||
*/
|
||||
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
||||
if (max_pkt_len <= ETHER_MAX_LEN ||
|
||||
if (max_pkt_len <= RTE_ETHER_MAX_LEN ||
|
||||
max_pkt_len > IAVF_FRAME_SIZE_MAX) {
|
||||
PMD_DRV_LOG(ERR, "maximum packet length must be "
|
||||
"larger than %u and smaller than %u, "
|
||||
"as jumbo frame is enabled",
|
||||
(uint32_t)ETHER_MAX_LEN,
|
||||
(uint32_t)RTE_ETHER_MAX_LEN,
|
||||
(uint32_t)IAVF_FRAME_SIZE_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (max_pkt_len < ETHER_MIN_LEN ||
|
||||
max_pkt_len > ETHER_MAX_LEN) {
|
||||
if (max_pkt_len < RTE_ETHER_MIN_LEN ||
|
||||
max_pkt_len > RTE_ETHER_MAX_LEN) {
|
||||
PMD_DRV_LOG(ERR, "maximum packet length must be "
|
||||
"larger than %u and smaller than %u, "
|
||||
"as jumbo frame is disabled",
|
||||
(uint32_t)ETHER_MIN_LEN,
|
||||
(uint32_t)ETHER_MAX_LEN);
|
||||
(uint32_t)RTE_ETHER_MIN_LEN,
|
||||
(uint32_t)RTE_ETHER_MAX_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -917,7 +917,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
uint32_t frame_size = mtu + IAVF_ETH_OVERHEAD;
|
||||
int ret = 0;
|
||||
|
||||
if (mtu < ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > IAVF_FRAME_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* mtu setting is forbidden if port is start */
|
||||
@ -926,7 +926,7 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev->data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -1305,13 +1305,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
|
||||
/* copy mac addr */
|
||||
eth_dev->data->mac_addrs = rte_zmalloc(
|
||||
"iavf_mac",
|
||||
ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX,
|
||||
0);
|
||||
"iavf_mac", RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX, 0);
|
||||
if (!eth_dev->data->mac_addrs) {
|
||||
PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
|
||||
" store MAC addresses",
|
||||
ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
|
||||
RTE_ETHER_ADDR_LEN * IAVF_NUM_MACADDR_MAX);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* If the MAC address is not configured by host,
|
||||
|
@ -1025,17 +1025,17 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
*/
|
||||
rxm->next = NULL;
|
||||
if (unlikely(rxq->crc_len > 0)) {
|
||||
first_seg->pkt_len -= ETHER_CRC_LEN;
|
||||
if (rx_packet_len <= ETHER_CRC_LEN) {
|
||||
first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
|
||||
if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
|
||||
rte_pktmbuf_free_seg(rxm);
|
||||
first_seg->nb_segs--;
|
||||
last_seg->data_len =
|
||||
(uint16_t)(last_seg->data_len -
|
||||
(ETHER_CRC_LEN - rx_packet_len));
|
||||
(RTE_ETHER_CRC_LEN - rx_packet_len));
|
||||
last_seg->next = NULL;
|
||||
} else
|
||||
rxm->data_len = (uint16_t)(rx_packet_len -
|
||||
ETHER_CRC_LEN);
|
||||
RTE_ETHER_CRC_LEN);
|
||||
}
|
||||
|
||||
first_seg->port = rxq->port_id;
|
||||
|
@ -656,7 +656,7 @@ ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
|
||||
struct ice_hw *hw;
|
||||
int ret = 0;
|
||||
|
||||
if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
|
||||
if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
|
||||
return -EINVAL;
|
||||
|
||||
hw = ICE_VSI_TO_HW(vsi);
|
||||
@ -727,7 +727,7 @@ ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
|
||||
* Vlan 0 is the generic filter for untagged packets
|
||||
* and can't be removed.
|
||||
*/
|
||||
if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
|
||||
if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
|
||||
return -EINVAL;
|
||||
|
||||
hw = ICE_VSI_TO_HW(vsi);
|
||||
@ -1235,12 +1235,12 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
|
||||
hw->port_info->mac.perm_addr,
|
||||
ETH_ADDR_LEN);
|
||||
|
||||
rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
|
||||
ret = ice_add_mac_filter(vsi, &mac_addr);
|
||||
if (ret != ICE_SUCCESS)
|
||||
PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
|
||||
|
||||
rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
|
||||
rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
|
||||
ret = ice_add_mac_filter(vsi, &mac_addr);
|
||||
if (ret != ICE_SUCCESS)
|
||||
PMD_INIT_LOG(ERR, "Failed to add MAC filter");
|
||||
@ -2025,7 +2025,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->max_mac_addrs = vsi->max_macaddrs;
|
||||
dev_info->max_vfs = pci_dev->max_vfs;
|
||||
dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
|
||||
dev_info->min_mtu = ETHER_MIN_MTU;
|
||||
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
||||
|
||||
dev_info->rx_offload_capa =
|
||||
DEV_RX_OFFLOAD_VLAN_STRIP |
|
||||
@ -2326,7 +2326,7 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
|
||||
|
||||
/* check if mtu is within the allowed range */
|
||||
if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
|
||||
if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
/* mtu setting is forbidden if port is start */
|
||||
@ -2337,7 +2337,7 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
if (frame_size > RTE_ETHER_MAX_LEN)
|
||||
dev_data->dev_conf.rxmode.offloads |=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
else
|
||||
@ -3201,7 +3201,7 @@ ice_update_vsi_stats(struct ice_vsi *vsi)
|
||||
&nes->rx_broadcast);
|
||||
/* exclude CRC bytes */
|
||||
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
|
||||
nes->rx_broadcast) * ETHER_CRC_LEN;
|
||||
nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
|
||||
&oes->rx_discards, &nes->rx_discards);
|
||||
@ -3274,10 +3274,11 @@ ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
|
||||
&ns->eth.rx_discards);
|
||||
|
||||
/* Workaround: CRC size should not be included in byte statistics,
|
||||
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
|
||||
* so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
|
||||
* packet.
|
||||
*/
|
||||
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
|
||||
ns->eth.rx_broadcast) * ETHER_CRC_LEN;
|
||||
ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
/* GLPRT_REPC not supported */
|
||||
/* GLPRT_RMPC not supported */
|
||||
@ -3302,7 +3303,7 @@ ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
|
||||
pf->offset_loaded, &os->eth.tx_broadcast,
|
||||
&ns->eth.tx_broadcast);
|
||||
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
|
||||
ns->eth.tx_broadcast) * ETHER_CRC_LEN;
|
||||
ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
|
||||
|
||||
/* GLPRT_TEPC not supported */
|
||||
|
||||
|
@ -122,7 +122,7 @@
|
||||
* Considering QinQ packet, the VLAN tag needs to be counted twice.
|
||||
*/
|
||||
#define ICE_ETH_OVERHEAD \
|
||||
(ETHER_HDR_LEN + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
|
||||
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
|
||||
|
||||
struct ice_adapter;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user