e1000: update RX/TX queue configuration

Queues are allocated by rte_ether.

Signed-off-by: Intel
This commit is contained in:
Intel 2012-12-20 00:00:00 +01:00 committed by Thomas Monjalon
parent 7da9ffee83
commit 0197e3ecf5
3 changed files with 39 additions and 108 deletions

View File

@ -85,10 +85,10 @@ struct e1000_adapter {
(&((struct e1000_adapter *)adapter)->shadow_vfta)
/*
* RX/TX function prototypes
* RX/TX IGB function prototypes
*/
int igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
int igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
void eth_igb_tx_queue_release(void *txq);
void eth_igb_rx_queue_release(void *rxq);
void igb_dev_clear_queues(struct rte_eth_dev *dev);
int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
@ -104,13 +104,13 @@ int eth_igb_rx_init(struct rte_eth_dev *dev);
void eth_igb_tx_init(struct rte_eth_dev *dev);
uint16_t eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq,
uint16_t eth_igb_recv_scattered_pkts(void *rxq,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
int eth_igbvf_rx_init(struct rte_eth_dev *dev);

View File

@ -57,8 +57,7 @@
#include "e1000/e1000_api.h"
#include "e1000_ethdev.h"
static int eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
uint16_t nb_tx_q);
static int eth_igb_configure(struct rte_eth_dev *dev);
static int eth_igb_start(struct rte_eth_dev *dev);
static void eth_igb_stop(struct rte_eth_dev *dev);
static void eth_igb_close(struct rte_eth_dev *dev);
@ -177,7 +176,9 @@ static struct eth_dev_ops eth_igb_ops = {
.vlan_tpid_set = eth_igb_vlan_tpid_set,
.vlan_offload_set = eth_igb_vlan_offload_set,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.dev_led_on = eth_igb_led_on,
.dev_led_off = eth_igb_led_off,
.flow_ctrl_set = eth_igb_flow_ctrl_set,
@ -500,35 +501,15 @@ rte_igbvf_pmd_init(void)
}
static int
eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
eth_igb_configure(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int diag;
PMD_INIT_LOG(DEBUG, ">>");
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
/* Allocate the array of pointers to RX structures */
diag = igb_dev_rx_queue_alloc(dev, nb_rx_q);
if (diag != 0) {
PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
" pointers to RX queues failed",
dev->data->port_id, nb_rx_q);
return diag;
}
/* Allocate the array of pointers to TX structures */
diag = igb_dev_tx_queue_alloc(dev, nb_tx_q);
if (diag != 0) {
PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
" pointers to TX queues failed",
dev->data->port_id, nb_tx_q);
return diag;
}
PMD_INIT_LOG(DEBUG, "<<");
return (0);
@ -539,7 +520,7 @@ eth_igb_start(struct rte_eth_dev *dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret, i;
int ret, i, mask;
PMD_INIT_LOG(DEBUG, ">>");
@ -566,7 +547,7 @@ eth_igb_start(struct rte_eth_dev *dev)
/* Initialize the hardware */
if (igb_hardware_init(hw)) {
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
return (-1);
return (-EIO);
}
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
@ -580,6 +561,7 @@ eth_igb_start(struct rte_eth_dev *dev)
ret = eth_igb_rx_init(dev);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
igb_dev_clear_queues(dev);
return ret;
}
@ -685,7 +667,8 @@ eth_igb_start(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
dev->data->dev_conf.link_speed,
dev->data->dev_conf.link_duplex, dev->data->port_id);
return -1;
igb_dev_clear_queues(dev);
return (-EINVAL);
}
/*********************************************************************

View File

@ -315,9 +315,10 @@ tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
}
uint16_t
eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct igb_tx_queue *txq;
struct igb_tx_entry *sw_ring;
struct igb_tx_entry *txe, *txn;
volatile union e1000_adv_tx_desc *txr;
@ -339,6 +340,7 @@ eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
uint32_t ctx;
uint32_t vlan_macip_lens;
txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
@ -610,9 +612,10 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
}
uint16_t
eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct igb_rx_queue *rxq;
volatile union e1000_adv_rx_desc *rx_ring;
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
@ -631,6 +634,7 @@ eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
nb_rx = 0;
nb_hold = 0;
rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
@ -786,9 +790,10 @@ eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
}
uint16_t
eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct igb_rx_queue *rxq;
volatile union e1000_adv_rx_desc *rx_ring;
volatile union e1000_adv_rx_desc *rxdp;
struct igb_rx_entry *sw_ring;
@ -809,6 +814,7 @@ eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
nb_rx = 0;
nb_hold = 0;
rxq = rx_queue;
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
@ -1092,47 +1098,17 @@ igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
static void
igb_tx_queue_release(struct igb_tx_queue *txq)
{
if (txq != NULL) {
igb_tx_queue_release_mbufs(txq);
rte_free(txq->sw_ring);
rte_free(txq);
}
}
int
igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
void
eth_igb_tx_queue_release(void *txq)
{
uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
struct igb_tx_queue **txq;
if (dev->data->tx_queues == NULL) {
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
sizeof(struct igb_tx_queue *) * nb_queues,
CACHE_LINE_SIZE);
if (dev->data->tx_queues == NULL) {
dev->data->nb_tx_queues = 0;
return -ENOMEM;
}
} else {
if (nb_queues < old_nb_queues)
for (i = nb_queues; i < old_nb_queues; i++)
igb_tx_queue_release(dev->data->tx_queues[i]);
if (nb_queues != old_nb_queues) {
txq = rte_realloc(dev->data->tx_queues,
sizeof(struct igb_tx_queue *) * nb_queues,
CACHE_LINE_SIZE);
if (txq == NULL)
return -ENOMEM;
else
dev->data->tx_queues = txq;
if (nb_queues > old_nb_queues)
memset(&(txq[old_nb_queues]), 0,
sizeof(struct igb_tx_queue *) *
(nb_queues - old_nb_queues));
}
}
dev->data->nb_tx_queues = nb_queues;
return 0;
igb_tx_queue_release(txq);
}
static void
@ -1293,47 +1269,17 @@ igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
static void
igb_rx_queue_release(struct igb_rx_queue *rxq)
{
if (rxq != NULL) {
igb_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
rte_free(rxq);
}
}
int
igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
void
eth_igb_rx_queue_release(void *rxq)
{
uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
struct igb_rx_queue **rxq;
if (dev->data->rx_queues == NULL) {
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
sizeof(struct igb_rx_queue *) * nb_queues,
CACHE_LINE_SIZE);
if (dev->data->rx_queues == NULL) {
dev->data->nb_rx_queues = 0;
return -ENOMEM;
}
} else {
for (i = nb_queues; i < old_nb_queues; i++) {
igb_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
if (nb_queues != old_nb_queues) {
rxq = rte_realloc(dev->data->rx_queues,
sizeof(struct igb_rx_queue *) * nb_queues,
CACHE_LINE_SIZE);
if (rxq == NULL)
return -ENOMEM;
else
dev->data->rx_queues = rxq;
if (nb_queues > old_nb_queues)
memset(&(rxq[old_nb_queues]), 0,
sizeof(struct igb_rx_queue *) *
(nb_queues - old_nb_queues));
}
}
dev->data->nb_rx_queues = nb_queues;
return 0;
igb_rx_queue_release(rxq);
}
static void
@ -1442,15 +1388,19 @@ igb_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq != NULL) {
igb_tx_queue_release_mbufs(txq);
igb_reset_tx_queue(txq, dev);
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
igb_rx_queue_release_mbufs(rxq);
igb_reset_rx_queue(rxq);
}
}
}
/**
@ -1647,10 +1597,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
/* Allocate buffers for descriptor rings and set up queue */
ret = igb_alloc_rx_queue_mbufs(rxq);
if (ret) {
igb_dev_clear_queues(dev);
if (ret)
return ret;
}
/*
* Reset crc_len in case it was changed after queue setup by a