net/e1000: convert to new Rx offloads API

Ethdev Rx offloads API has changed since:
commit ce17eddefc ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Wei Dai 2018-04-03 10:54:55 +08:00 committed by Ferruh Yigit
parent 51215925a3
commit ef990fb56e
5 changed files with 240 additions and 57 deletions

View File

@ -357,6 +357,9 @@ void eth_igb_rx_queue_release(void *rxq);
void igb_dev_clear_queues(struct rte_eth_dev *dev);
void igb_dev_free_queues(struct rte_eth_dev *dev);
uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@ -417,6 +420,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
/*
* RX/TX EM function prototypes
*/
@ -426,6 +431,9 @@ void eth_em_rx_queue_release(void *rxq);
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,

View File

@ -451,9 +451,21 @@ eth_em_configure(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct rte_eth_dev_info dev_info;
uint64_t rx_offloads;
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
eth_em_infos_get(dev, &dev_info);
rx_offloads = dev->data->dev_conf.rxmode.offloads;
if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
rx_offloads, dev_info.rx_offload_capa);
return -ENOTSUP;
}
PMD_INIT_FUNC_TRACE();
return 0;
@ -1017,9 +1029,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu
return 0;
}
static uint32_t
em_get_max_pktlen(const struct e1000_hw *hw)
uint32_t
em_get_max_pktlen(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
@ -1050,13 +1064,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
@ -1083,6 +1092,10 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
@ -1400,15 +1413,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
static int
eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
em_vlan_hw_strip_enable(dev);
else
em_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
em_vlan_hw_filter_enable(dev);
else
em_vlan_hw_filter_disable(dev);
@ -1775,10 +1791,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.jumbo_frame = 1;
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
dev->data->dev_conf.rxmode.jumbo_frame = 0;
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);

View File

@ -85,6 +85,7 @@ struct em_rx_queue {
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
@ -1313,6 +1314,59 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
uint64_t
em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
{
uint64_t rx_offload_capa;
uint32_t max_rx_pktlen;
max_rx_pktlen = em_get_max_pktlen(dev);
rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
if (max_rx_pktlen > ETHER_MAX_LEN)
rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
}
uint64_t
em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
{
uint64_t rx_queue_offload_capa;
/*
* As only one Rx queue can be used, let per queue offloading
* capability be same to per port queue offloading capability
* for better convenience.
*/
rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
return rx_queue_offload_capa;
}
static int
em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
{
uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
if ((requested & (queue_supported | port_supported)) != requested)
return 0;
if ((port_offloads ^ requested) & port_supported)
return 0;
return 1;
}
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@ -1328,6 +1382,19 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
" don't match port offloads 0x%" PRIx64
" or supported port offloads 0x%" PRIx64
" or supported queue offloads 0x%" PRIx64,
(void *)dev,
rx_conf->offloads,
dev->data->dev_conf.rxmode.offloads,
em_get_rx_port_offloads_capa(dev),
em_get_rx_queue_offloads_capa(dev));
return -ENOTSUP;
}
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@ -1382,8 +1449,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@ -1395,6 +1462,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
rxq->offloads = rx_conf->offloads;
return 0;
}
@ -1646,6 +1714,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct em_rx_queue *rxq;
struct rte_eth_rxmode *rxmode;
uint32_t rctl;
uint32_t rfctl;
uint32_t rxcsum;
@ -1654,6 +1723,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rxmode = &dev->data->dev_conf.rxmode;
/*
* Make sure receives are disabled while setting
@ -1714,8 +1784,8 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* call to configure
*/
rxq->crc_len =
(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
0 : ETHER_CRC_LEN);
(uint8_t)(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
@ -1745,7 +1815,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* to avoid splitting packets that don't fit into
* one buffer.
*/
if (dev->data->dev_conf.rxmode.jumbo_frame ||
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
rctl_bsize < ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@ -1755,7 +1825,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
}
if (dev->data->dev_conf.rxmode.enable_scatter) {
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@ -1768,7 +1838,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
@ -1780,21 +1850,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
dev->data->dev_conf.rxmode.jumbo_frame == 1) {
rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
/* Setup the Receive Control Register. */
if (dev->data->dev_conf.rxmode.hw_strip_crc)
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
else
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
@ -1814,7 +1884,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/*
* Configure support of jumbo frames, if any.
*/
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
@ -1894,6 +1964,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.offloads = rxq->offloads;
}
void

View File

@ -2148,11 +2148,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
@ -2222,6 +2220,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
.offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@ -2277,10 +2276,6 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@ -2301,6 +2296,10 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
break;
}
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
dev_info->rx_queue_offload_capa;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IGB_DEFAULT_RX_PTHRESH,
@ -2309,6 +2308,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
.offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@ -2644,7 +2644,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
VLAN_TAG_SIZE);
@ -2663,7 +2663,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE);
@ -2672,22 +2672,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
static int
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct rte_eth_rxmode *rxmode;
rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
if(mask & ETH_VLAN_EXTEND_MASK){
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
@ -3189,14 +3192,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
if (conf->rxmode.hw_strip_crc) {
if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@ -4438,10 +4441,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.jumbo_frame = 1;
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
dev->data->dev_conf.rxmode.jumbo_frame = 0;
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);

View File

@ -107,6 +107,7 @@ struct igb_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
};
/**
@ -1593,6 +1594,61 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
uint64_t
igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
{
uint64_t rx_offload_capa;
RTE_SET_USED(dev);
rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER;
return rx_offload_capa;
}
uint64_t
igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint64_t rx_queue_offload_capa;
switch (hw->mac.type) {
case e1000_vfadapt_i350:
/*
* As only one Rx queue can be used, let per queue offloading
* capability be same to per port queue offloading capability
* for better convenience.
*/
rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
break;
default:
rx_queue_offload_capa = 0;
}
return rx_queue_offload_capa;
}
static int
igb_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
{
uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
uint64_t queue_supported = igb_get_rx_queue_offloads_capa(dev);
uint64_t port_supported = igb_get_rx_port_offloads_capa(dev);
if ((requested & (queue_supported | port_supported)) != requested)
return 0;
if ((port_offloads ^ requested) & port_supported)
return 0;
return 1;
}
int
eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@ -1606,6 +1662,19 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
struct e1000_hw *hw;
unsigned int size;
if (!igb_check_rx_queue_offloads(dev, rx_conf->offloads)) {
PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
" don't match port offloads 0x%" PRIx64
" or supported port offloads 0x%" PRIx64
" or supported queue offloads 0x%" PRIx64,
(void *)dev,
rx_conf->offloads,
dev->data->dev_conf.rxmode.offloads,
igb_get_rx_port_offloads_capa(dev),
igb_get_rx_queue_offloads_capa(dev));
return -ENOTSUP;
}
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
@ -1630,6 +1699,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
return -ENOMEM;
rxq->offloads = rx_conf->offloads;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
@ -1644,8 +1714,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
ETHER_CRC_LEN);
rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
@ -2227,6 +2297,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
int
eth_igb_rx_init(struct rte_eth_dev *dev)
{
struct rte_eth_rxmode *rxmode;
struct e1000_hw *hw;
struct igb_rx_queue *rxq;
uint32_t rctl;
@ -2247,10 +2318,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rctl = E1000_READ_REG(hw, E1000_RCTL);
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
rxmode = &dev->data->dev_conf.rxmode;
/*
* Configure support of jumbo frames, if any.
*/
if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
rctl |= E1000_RCTL_LPE;
/*
@ -2292,9 +2365,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
rxq->crc_len =
(uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
0 : ETHER_CRC_LEN);
rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN);
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@ -2362,7 +2434,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
}
if (dev->data->dev_conf.rxmode.enable_scatter) {
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@ -2406,16 +2478,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxcsum |= E1000_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
E1000_RXCSUM_CRCOFL);
if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
E1000_RXCSUM_CRCOFL);
rxcsum &= ~E1000_RXCSUM_IPOFL;
if (rxmode->offloads &
(DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
rxcsum |= E1000_RXCSUM_TUOFL;
else
rxcsum &= ~E1000_RXCSUM_TUOFL;
if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_CRCOFL;
else
rxcsum &= ~E1000_RXCSUM_CRCOFL;
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
/* set STRCRC bit in all queues */
@ -2654,7 +2734,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
if (dev->data->dev_conf.rxmode.enable_scatter) {
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@ -2741,6 +2821,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.offloads = rxq->offloads;
}
void