net/enic: use the new ethdev offloads API
The following commits deprecate the use of the offload bit fields (e.g. header_split) in rte_eth_rxmode and txq_flags in rte_eth_txconf. commitce17eddefc
("ethdev: introduce Rx queue offloads API") commitcba7f53b71
("ethdev: introduce Tx queue offloads API") For enic, the required changes are mechanical. Use the new 'offloads' field in rxmode instead of the bit fields. And, no changes required with respect to txq_flags, as enic does not use it at all. Per-queue RX offload capabilities are not set, as all offloads are per-port at the moment. Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com> Reviewed-by: John Daley <johndale@cisco.com>
This commit is contained in:
parent
5d4f3ad644
commit
a062bafa62
@ -370,7 +370,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
|
||||
ENICPMD_FUNC_TRACE();
|
||||
|
||||
if (mask & ETH_VLAN_STRIP_MASK) {
|
||||
if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
|
||||
if (eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_VLAN_STRIP)
|
||||
enic->ig_vlan_strip_en = 1;
|
||||
else
|
||||
enic->ig_vlan_strip_en = 0;
|
||||
@ -407,13 +408,15 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
}
|
||||
|
||||
if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
|
||||
eth_dev->data->dev_conf.rxmode.header_split) {
|
||||
(eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_HEADER_SPLIT)) {
|
||||
/* Enable header-data-split */
|
||||
enic_set_hdr_split_size(enic,
|
||||
eth_dev->data->dev_conf.rxmode.split_hdr_size);
|
||||
}
|
||||
|
||||
enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
|
||||
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_CHECKSUM);
|
||||
ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
|
||||
|
||||
return ret;
|
||||
|
@ -634,7 +634,8 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
|
||||
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
|
||||
RTE_PKTMBUF_HEADROOM);
|
||||
|
||||
if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
|
||||
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_SCATTER) {
|
||||
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
|
||||
/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
|
||||
mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
|
||||
@ -1208,7 +1209,8 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
|
||||
/* The easy case is when scatter is disabled. However if the MTU
|
||||
* becomes greater than the mbuf data size, packet drops will ensue.
|
||||
*/
|
||||
if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
|
||||
if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_SCATTER)) {
|
||||
eth_dev->data->mtu = new_mtu;
|
||||
goto set_mtu_done;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user