net/e1000: convert to new Tx offloads API
Ethdev Tx offloads API has changed since:
commit cba7f53b71
("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Wei Dai <wei.dai@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
parent
ef990fb56e
commit
e5c05e6590
@ -373,6 +373,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
|
||||
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
|
||||
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
||||
|
||||
uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
|
||||
uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
|
||||
|
||||
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
uint16_t nb_tx_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
@ -447,6 +450,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
|
||||
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
|
||||
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
||||
|
||||
uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
|
||||
uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
|
||||
|
||||
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
||||
uint16_t nb_tx_desc, unsigned int socket_id,
|
||||
const struct rte_eth_txconf *tx_conf);
|
||||
|
@ -453,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev)
|
||||
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
||||
struct rte_eth_dev_info dev_info;
|
||||
uint64_t rx_offloads;
|
||||
uint64_t tx_offloads;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
|
||||
@ -465,6 +466,13 @@ eth_em_configure(struct rte_eth_dev *dev)
|
||||
rx_offloads, dev_info.rx_offload_capa);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
tx_offloads = dev->data->dev_conf.txmode.offloads;
|
||||
if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
|
||||
PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
|
||||
"requested 0x%" PRIx64 " supported 0x%" PRIx64,
|
||||
tx_offloads, dev_info.tx_offload_capa);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
@ -1066,11 +1074,6 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
|
||||
dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
|
||||
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
|
||||
dev_info->tx_offload_capa =
|
||||
DEV_TX_OFFLOAD_VLAN_INSERT |
|
||||
DEV_TX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM;
|
||||
|
||||
/*
|
||||
* Starting with 631xESB hw supports 2 TX/RX queues per port.
|
||||
@ -1095,6 +1098,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
|
||||
dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
|
||||
dev_info->rx_queue_offload_capa;
|
||||
dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
|
||||
dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
|
||||
dev_info->tx_queue_offload_capa;
|
||||
|
||||
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
|
||||
.nb_max = E1000_MAX_RING_DESC,
|
||||
|
@ -164,6 +164,7 @@ struct em_tx_queue {
|
||||
uint8_t wthresh; /**< Write-back threshold register. */
|
||||
struct em_ctx_info ctx_cache;
|
||||
/**< Hardware context history.*/
|
||||
uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
|
||||
};
|
||||
|
||||
#if 1
|
||||
@ -1152,6 +1153,52 @@ em_reset_tx_queue(struct em_tx_queue *txq)
|
||||
memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
|
||||
}
|
||||
|
||||
uint64_t
|
||||
em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
|
||||
{
|
||||
uint64_t tx_offload_capa;
|
||||
|
||||
RTE_SET_USED(dev);
|
||||
tx_offload_capa =
|
||||
DEV_TX_OFFLOAD_VLAN_INSERT |
|
||||
DEV_TX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM;
|
||||
|
||||
return tx_offload_capa;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
|
||||
{
|
||||
uint64_t tx_queue_offload_capa;
|
||||
|
||||
/*
|
||||
* As only one Tx queue can be used, let per queue offloading
|
||||
* capability be same to per port queue offloading capability
|
||||
* for better convenience.
|
||||
*/
|
||||
tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
|
||||
|
||||
return tx_queue_offload_capa;
|
||||
}
|
||||
|
||||
static int
|
||||
em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
|
||||
{
|
||||
uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
|
||||
uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
|
||||
uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
|
||||
|
||||
if ((requested & (queue_supported | port_supported)) != requested)
|
||||
return 0;
|
||||
|
||||
if ((port_offloads ^ requested) & port_supported)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t queue_idx,
|
||||
@ -1167,6 +1214,19 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
|
||||
PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
|
||||
" don't match port offloads 0x%" PRIx64
|
||||
" or supported port offloads 0x%" PRIx64
|
||||
" or supported queue offloads 0x%" PRIx64,
|
||||
(void *)dev,
|
||||
tx_conf->offloads,
|
||||
dev->data->dev_conf.txmode.offloads,
|
||||
em_get_tx_port_offloads_capa(dev),
|
||||
em_get_tx_queue_offloads_capa(dev));
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate number of transmit descriptors.
|
||||
* It must not exceed hardware maximum, and must be multiple
|
||||
@ -1270,6 +1330,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
em_reset_tx_queue(txq);
|
||||
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
txq->offloads = tx_conf->offloads;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1982,4 +2043,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
|
||||
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
|
||||
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
|
||||
qinfo->conf.offloads = txq->offloads;
|
||||
}
|
||||
|
@ -2151,13 +2151,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
|
||||
dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
|
||||
dev_info->rx_queue_offload_capa;
|
||||
dev_info->tx_offload_capa =
|
||||
DEV_TX_OFFLOAD_VLAN_INSERT |
|
||||
DEV_TX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM |
|
||||
DEV_TX_OFFLOAD_SCTP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_TSO;
|
||||
dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
|
||||
dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
|
||||
dev_info->tx_queue_offload_capa;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82575:
|
||||
@ -2230,6 +2226,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
.wthresh = IGB_DEFAULT_TX_WTHRESH,
|
||||
},
|
||||
.txq_flags = 0,
|
||||
.offloads = 0,
|
||||
};
|
||||
|
||||
dev_info->rx_desc_lim = rx_desc_lim;
|
||||
@ -2299,6 +2296,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
|
||||
dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
|
||||
dev_info->rx_queue_offload_capa;
|
||||
dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
|
||||
dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
|
||||
dev_info->tx_queue_offload_capa;
|
||||
|
||||
dev_info->default_rxconf = (struct rte_eth_rxconf) {
|
||||
.rx_thresh = {
|
||||
@ -2318,6 +2318,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
.wthresh = IGB_DEFAULT_TX_WTHRESH,
|
||||
},
|
||||
.txq_flags = 0,
|
||||
.offloads = 0,
|
||||
};
|
||||
|
||||
dev_info->rx_desc_lim = rx_desc_lim;
|
||||
|
@ -181,6 +181,7 @@ struct igb_tx_queue {
|
||||
/**< Start context position for transmit queue. */
|
||||
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
|
||||
/**< Hardware context history.*/
|
||||
uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
|
||||
};
|
||||
|
||||
#if 1
|
||||
@ -1448,6 +1449,48 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
|
||||
igb_reset_tx_queue_stat(txq);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
|
||||
{
|
||||
uint64_t rx_offload_capa;
|
||||
|
||||
RTE_SET_USED(dev);
|
||||
rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
|
||||
DEV_TX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_TX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_CKSUM |
|
||||
DEV_TX_OFFLOAD_SCTP_CKSUM |
|
||||
DEV_TX_OFFLOAD_TCP_TSO;
|
||||
|
||||
return rx_offload_capa;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
|
||||
{
|
||||
uint64_t rx_queue_offload_capa;
|
||||
|
||||
rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
|
||||
|
||||
return rx_queue_offload_capa;
|
||||
}
|
||||
|
||||
static int
|
||||
igb_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
|
||||
{
|
||||
uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
|
||||
uint64_t queue_supported = igb_get_tx_queue_offloads_capa(dev);
|
||||
uint64_t port_supported = igb_get_tx_port_offloads_capa(dev);
|
||||
|
||||
if ((requested & (queue_supported | port_supported)) != requested)
|
||||
return 0;
|
||||
|
||||
if ((port_offloads ^ requested) & port_supported)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
uint16_t queue_idx,
|
||||
@ -1460,6 +1503,19 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
struct e1000_hw *hw;
|
||||
uint32_t size;
|
||||
|
||||
if (!igb_check_tx_queue_offloads(dev, tx_conf->offloads)) {
|
||||
PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
|
||||
" don't match port offloads 0x%" PRIx64
|
||||
" or supported port offloads 0x%" PRIx64
|
||||
" or supported queue offloads 0x%" PRIx64,
|
||||
(void *)dev,
|
||||
tx_conf->offloads,
|
||||
dev->data->dev_conf.txmode.offloads,
|
||||
igb_get_tx_port_offloads_capa(dev),
|
||||
igb_get_tx_queue_offloads_capa(dev));
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
|
||||
/*
|
||||
@ -1543,6 +1599,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
dev->tx_pkt_burst = eth_igb_xmit_pkts;
|
||||
dev->tx_pkt_prepare = ð_igb_prep_pkts;
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
txq->offloads = tx_conf->offloads;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2837,6 +2894,7 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
|
||||
qinfo->conf.tx_thresh.pthresh = txq->pthresh;
|
||||
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
|
||||
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
|
||||
qinfo->conf.offloads = txq->offloads;
|
||||
}
|
||||
|
||||
int
|
||||
|
Loading…
Reference in New Issue
Block a user