net/txgbe: support VF Rx/Tx

Configure VF device with RX port. Initialize receive and transmit unit,
set the receive and transmit functions. And support to check the
status of RX and TX descriptors.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
This commit is contained in:
Jiawen Wu 2021-02-25 16:08:51 +08:00 committed by Ferruh Yigit
parent 7d9c9667b7
commit 92144bb36c
7 changed files with 250 additions and 2 deletions

View File

@ -7,6 +7,10 @@
Link status = Y
Unicast MAC filter = Y
Rx interrupt = Y
Jumbo frame = Y
Scattered Rx = Y
LRO = Y
TSO = Y
CRC offload = P
VLAN offload = P
QinQ offload = P
@ -14,6 +18,8 @@ L3 checksum offload = P
L4 checksum offload = P
Inner L3 checksum = P
Inner L4 checksum = P
Rx descriptor status = Y
Tx descriptor status = Y
Multiprocess aware = Y
Linux = Y
ARMv8 = Y

View File

@ -63,6 +63,10 @@ Please note that enabling debugging options may affect system performance.
Toggle display of transmit descriptor clean messages.
- ``RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC`` (undefined by default)
Decide to enable or disable HW CRC in VF PMD.
Dynamic Logging Parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -33,6 +33,7 @@ s32 txgbe_init_ops_vf(struct txgbe_hw *hw)
/* RAR, Multicast, VLAN */
mac->set_rar = txgbe_set_rar_vf;
mac->set_uc_addr = txgbevf_set_uc_addr_vf;
mac->set_rlpml = txgbevf_rlpml_set_vf;
mac->max_tx_queues = 1;
mac->max_rx_queues = 1;
@ -396,6 +397,29 @@ out:
return ret_val;
}
/**
* txgbevf_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
s32 txgbevf_rlpml_set_vf(struct txgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
s32 retval;
msgbuf[0] = TXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
retval = txgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
if (retval)
return retval;
if ((msgbuf[0] & TXGBE_VF_SET_LPE) &&
(msgbuf[0] & TXGBE_VT_MSGTYPE_NACK))
return TXGBE_ERR_MBX;
return 0;
}
/**
* txgbevf_negotiate_api_version - Negotiate supported API version
* @hw: pointer to the HW structure

View File

@ -20,6 +20,7 @@ s32 txgbe_check_mac_link_vf(struct txgbe_hw *hw, u32 *speed,
s32 txgbe_set_rar_vf(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 txgbevf_set_uc_addr_vf(struct txgbe_hw *hw, u32 index, u8 *addr);
s32 txgbevf_rlpml_set_vf(struct txgbe_hw *hw, u16 max_size);
int txgbevf_negotiate_api_version(struct txgbe_hw *hw, int api);
int txgbevf_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);

View File

@ -475,6 +475,10 @@ void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
int txgbevf_dev_rx_init(struct rte_eth_dev *dev);
void txgbevf_dev_tx_init(struct rte_eth_dev *dev);
uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);

View File

@ -17,6 +17,7 @@
static int txgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int txgbevf_dev_configure(struct rte_eth_dev *dev);
static int txgbevf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int txgbevf_dev_close(struct rte_eth_dev *dev);
@ -110,6 +111,10 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &txgbevf_eth_dev_ops;
eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@ -363,6 +368,43 @@ txgbevf_intr_enable(struct rte_eth_dev *dev)
intr->mask_misc = 0;
}
static int
txgbevf_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
}
#else
if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
}
#endif
/*
* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
*/
adapter->rx_bulk_alloc_allowed = true;
return 0;
}
static int
txgbevf_dev_close(struct rte_eth_dev *dev)
{
@ -384,6 +426,9 @@ txgbevf_dev_close(struct rte_eth_dev *dev)
**/
txgbevf_remove_mac_addr(dev, 0);
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
/* Disable the interrupts for VF */
txgbevf_intr_disable(dev);
@ -660,6 +705,7 @@ txgbevf_dev_interrupt_handler(void *param)
* operation have been implemented
*/
static const struct eth_dev_ops txgbevf_eth_dev_ops = {
.dev_configure = txgbevf_dev_configure,
.link_update = txgbevf_dev_link_update,
.dev_infos_get = txgbevf_dev_info_get,
.rx_queue_intr_enable = txgbevf_dev_rx_queue_intr_enable,

View File

@ -2837,8 +2837,10 @@ txgbe_rss_disable(struct rte_eth_dev *dev)
struct txgbe_hw *hw;
hw = TXGBE_DEV_HW(dev);
wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
if (hw->mac.type == txgbe_mac_raptor_vf)
wr32m(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_RSSENA, 0);
else
wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
}
int
@ -4722,6 +4724,167 @@ txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
/*
* [VF] Initializes Receive Unit.
*/
int __rte_cold
txgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw;
struct txgbe_rx_queue *rxq;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype;
uint16_t buf_size;
uint16_t i;
int ret;
PMD_INIT_FUNC_TRACE();
hw = TXGBE_DEV_HW(dev);
if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
"it should be power of 2");
return -1;
}
if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
"it should be equal to or less than %d",
hw->mac.max_rx_queues);
return -1;
}
/*
* When the VF driver issues a TXGBE_VF_RESET request, the PF driver
* disables the VF receipt of packets if the PF MTU is > 1500.
* This is done to deal with limitations that imposes
* the PF and all VFs to share the same MTU.
* Then, the PF driver enables again the VF receipt of packet when
* the VF driver issues a TXGBE_VF_SET_LPE request.
* In the meantime, the VF device cannot be used, even if the VF driver
* and the Guest VM network stack are ready to accept packets with a
* size up to the PF MTU.
* As a work-around to this PF behaviour, force the call to
* txgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
* VF packets received can work in all cases.
*/
if (txgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
dev->data->dev_conf.rxmode.max_rx_pkt_len);
return -EINVAL;
}
/*
* Assume no header split and no VLAN strip support
* on any Rx queue first .
*/
rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Set PSR type for VF RSS according to max Rx queue */
psrtype = TXGBE_VFPLCFG_PSRL4HDR |
TXGBE_VFPLCFG_PSRL4HDR |
TXGBE_VFPLCFG_PSRL2HDR |
TXGBE_VFPLCFG_PSRTUNHDR |
TXGBE_VFPLCFG_PSRTUNMAC;
wr32(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_PSR(psrtype));
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
/* Allocate buffers for descriptor rings */
ret = txgbe_alloc_rx_queue_mbufs(rxq);
if (ret)
return ret;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
wr32(hw, TXGBE_RXBAL(i),
(uint32_t)(bus_addr & BIT_MASK32));
wr32(hw, TXGBE_RXBAH(i),
(uint32_t)(bus_addr >> 32));
wr32(hw, TXGBE_RXRP(i), 0);
wr32(hw, TXGBE_RXWP(i), 0);
/* Configure the RXCFG register */
srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
srrctl |= TXGBE_RXCFG_DROP;
/*
* Configure the RX buffer size in the PKTLEN field of
* the RXCFG register of the queue.
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM);
buf_size = ROUND_UP(buf_size, 1 << 10);
srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
/*
* VF modification to write virtual function RXCFG register
*/
wr32(hw, TXGBE_RXCFG(i), srrctl);
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
(rxmode->max_rx_pkt_len +
2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
/*
* Device configured with multiple RX queues.
*/
txgbe_dev_mq_rx_configure(dev);
txgbe_set_rx_function(dev);
return 0;
}
/*
* [VF] Initializes Transmit Unit.
*/
void __rte_cold
txgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw;
struct txgbe_tx_queue *txq;
uint64_t bus_addr;
uint16_t i;
PMD_INIT_FUNC_TRACE();
hw = TXGBE_DEV_HW(dev);
/* Setup the Base and Length of the Tx Descriptor Rings */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
bus_addr = txq->tx_ring_phys_addr;
wr32(hw, TXGBE_TXBAL(i),
(uint32_t)(bus_addr & BIT_MASK32));
wr32(hw, TXGBE_TXBAH(i),
(uint32_t)(bus_addr >> 32));
wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_BUFLEN_MASK,
TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
/* Setup the HW Tx Head and TX Tail descriptor pointers */
wr32(hw, TXGBE_TXRP(i), 0);
wr32(hw, TXGBE_TXWP(i), 0);
}
}
int
txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in)