net/qede: fix port (re)configuration
Some applications set port configuration params like promisc mode
before calling dev_start(). This config results in a firmware exception
since this operation internally translates to sending of VPORT-UPDATE
before VPORT-START ramrod which is considered illegal from firmware
standpoint. So the fix is to send VPORT-START ramrod sooner
in dev_configure() rather than deferring it to dev_start().
This requires a bit of reshuffling in the code to move sending of
VPORT-START from qede_start_queues() to qede_dev_configure()
and VPORT-STOP from qede_stop_queues() to qede_dev_stop().
This sequence change also exposes a flaw in the port restart
flows where the fastpath resource allocation routine qede_init_fp()
functionalities need to be split, so that appropriate action is taken
based on the current port state. Eg: Do not re-initialize the status
block in a port restart case. This change ensures port start/stop
can be paired.
A new port state QEDE_DEV_CONFIG is added to distinguish between
port started from scratch vs port requiring a reconfig (like MTU).
The function qede_config_rx_mode() is removed since the individual
port config will be replayed anyways on a restart.
Fixes: 2ea6f76aff
("qede: add core driver")
Signed-off-by: Harish Patil <harish.patil@qlogic.com>
This commit is contained in:
parent
cfe28a9885
commit
dbac54c2d3
@ -40,8 +40,6 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
|
||||
return rc;
|
||||
}
|
||||
|
||||
ecore_hw_start_fastpath(p_hwfn);
|
||||
|
||||
DP_VERBOSE(edev, ECORE_MSG_SPQ,
|
||||
"Started V-PORT %d with MTU %d\n",
|
||||
p_params->vport_id, p_params->mtu);
|
||||
@ -295,6 +293,17 @@ static int qed_fastpath_stop(struct ecore_dev *edev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qed_fastpath_start(struct ecore_dev *edev)
|
||||
{
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
int i;
|
||||
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
ecore_hw_start_fastpath(p_hwfn);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
|
||||
{
|
||||
@ -444,6 +453,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
|
||||
INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
|
||||
INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
|
||||
INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
|
||||
INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
|
||||
INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
|
||||
INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
|
||||
};
|
||||
|
@ -158,6 +158,8 @@ struct qed_eth_ops {
|
||||
|
||||
int (*fastpath_stop)(struct ecore_dev *edev);
|
||||
|
||||
void (*fastpath_start)(struct ecore_dev *edev);
|
||||
|
||||
void (*get_vport_stats)(struct ecore_dev *edev,
|
||||
struct ecore_eth_stats *stats);
|
||||
|
||||
|
@ -348,52 +348,6 @@ static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
|
||||
}
|
||||
}
|
||||
|
||||
void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
/* TODO: - QED_FILTER_TYPE_UCAST */
|
||||
enum qed_filter_rx_mode_type accept_flags =
|
||||
QED_FILTER_RX_MODE_TYPE_REGULAR;
|
||||
struct qed_filter_params rx_mode;
|
||||
int rc;
|
||||
|
||||
/* Configure the struct for the Rx mode */
|
||||
memset(&rx_mode, 0, sizeof(struct qed_filter_params));
|
||||
rx_mode.type = QED_FILTER_TYPE_RX_MODE;
|
||||
|
||||
rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
|
||||
eth_dev->data->mac_addrs[0].addr_bytes);
|
||||
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
|
||||
accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
|
||||
} else {
|
||||
rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
|
||||
eth_dev->data->
|
||||
mac_addrs[0].addr_bytes);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Unable to add filter\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* take care of VLAN mode */
|
||||
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
|
||||
qede_config_accept_any_vlan(qdev, true);
|
||||
} else if (!qdev->non_configured_vlans) {
|
||||
/* If we dont have non-configured VLANs and promisc
|
||||
* is not set, then check if we need to disable
|
||||
* accept_any_vlan mode.
|
||||
* Because in this case, accept_any_vlan mode is set
|
||||
* as part of IFF_RPOMISC flag handling.
|
||||
*/
|
||||
qede_config_accept_any_vlan(qdev, false);
|
||||
}
|
||||
rx_mode.filter.accept_flags = accept_flags;
|
||||
rc = qdev->ops->filter_config(edev, &rx_mode);
|
||||
if (rc)
|
||||
DP_ERR(edev, "Filter config failed rc=%d\n", rc);
|
||||
}
|
||||
|
||||
static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
|
||||
{
|
||||
struct qed_update_vport_params vport_update_params;
|
||||
@ -488,11 +442,39 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qede_init_vport(struct qede_dev *qdev)
|
||||
{
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct qed_start_vport_params start = {0};
|
||||
int rc;
|
||||
|
||||
start.remove_inner_vlan = 1;
|
||||
start.gro_enable = 0;
|
||||
start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
|
||||
start.vport_id = 0;
|
||||
start.drop_ttl0 = false;
|
||||
start.clear_stats = 1;
|
||||
start.handle_ptp_pkts = 0;
|
||||
|
||||
rc = qdev->ops->vport_start(edev, &start);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_INFO(edev,
|
||||
"Start vport ramrod passed, vport_id = %d, MTU = %u\n",
|
||||
start.vport_id, ETHER_MTU);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
|
||||
int rc;
|
||||
|
||||
PMD_INIT_FUNC_TRACE(edev);
|
||||
|
||||
@ -517,11 +499,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
|
||||
qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
|
||||
|
||||
/* Initial state */
|
||||
qdev->state = QEDE_CLOSE;
|
||||
|
||||
/* Sanity checks and throw warnings */
|
||||
|
||||
if (rxmode->enable_scatter == 1) {
|
||||
DP_ERR(edev, "RX scatter packets is not supported\n");
|
||||
return -EINVAL;
|
||||
@ -539,16 +517,33 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
|
||||
"in hw\n");
|
||||
|
||||
/* Check for the port restart case */
|
||||
if (qdev->state != QEDE_DEV_INIT) {
|
||||
rc = qdev->ops->vport_stop(edev, 0);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
}
|
||||
|
||||
DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
|
||||
QEDE_RSS_CNT(qdev), qdev->num_tc);
|
||||
/* Fastpath status block should be initialized before sending
|
||||
* VPORT-START in the case of VF. Anyway, do it for both VF/PF.
|
||||
*/
|
||||
rc = qede_alloc_fp_resc(qdev);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
|
||||
" port %u first_on_engine %d\n",
|
||||
edev->hwfns[0].my_id,
|
||||
edev->hwfns[0].rel_pf_id,
|
||||
edev->hwfns[0].abs_pf_id,
|
||||
edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
|
||||
/* Issue VPORT-START with default config values to allow
|
||||
* other port configurations early on.
|
||||
*/
|
||||
rc = qede_init_vport(qdev);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
/* Add primary mac for PF */
|
||||
if (IS_PF(edev))
|
||||
qede_mac_addr_set(eth_dev, &qdev->primary_mac);
|
||||
|
||||
qdev->state = QEDE_DEV_CONFIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -719,8 +714,9 @@ static void qede_poll_sp_sb_cb(void *param)
|
||||
|
||||
static void qede_dev_close(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
int rc;
|
||||
|
||||
PMD_INIT_FUNC_TRACE(edev);
|
||||
|
||||
@ -729,16 +725,16 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
|
||||
* by the app without reconfiguration. However, in dev_close() we
|
||||
* can release all the resources and device can be brought up newly
|
||||
*/
|
||||
if (qdev->state != QEDE_STOP)
|
||||
if (qdev->state != QEDE_DEV_STOP)
|
||||
qede_dev_stop(eth_dev);
|
||||
else
|
||||
DP_INFO(edev, "Device is already stopped\n");
|
||||
|
||||
qede_free_mem_load(qdev);
|
||||
rc = qdev->ops->vport_stop(edev, 0);
|
||||
if (rc != 0)
|
||||
DP_ERR(edev, "Failed to stop VPORT\n");
|
||||
|
||||
qede_free_fp_arrays(qdev);
|
||||
|
||||
qede_dev_set_link_state(eth_dev, false);
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
|
||||
qdev->ops->common->slowpath_stop(edev);
|
||||
|
||||
@ -752,7 +748,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
|
||||
if (edev->num_hwfns > 1)
|
||||
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
|
||||
|
||||
qdev->state = QEDE_CLOSE;
|
||||
qdev->state = QEDE_DEV_INIT; /* Go back to init state */
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1388,6 +1384,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
|
||||
do_once = false;
|
||||
}
|
||||
|
||||
adapter->state = QEDE_DEV_INIT;
|
||||
|
||||
DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
adapter->primary_mac.addr_bytes[0],
|
||||
adapter->primary_mac.addr_bytes[1],
|
||||
|
@ -109,10 +109,11 @@
|
||||
extern char fw_file[];
|
||||
|
||||
/* Port/function states */
|
||||
enum dev_state {
|
||||
QEDE_START,
|
||||
QEDE_STOP,
|
||||
QEDE_CLOSE
|
||||
enum qede_dev_state {
|
||||
QEDE_DEV_INIT, /* Init the chip and Slowpath */
|
||||
QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
|
||||
QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
|
||||
QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
|
||||
};
|
||||
|
||||
struct qed_int_param {
|
||||
@ -148,7 +149,7 @@ struct qede_dev {
|
||||
uint8_t fp_num_tx;
|
||||
uint8_t fp_num_rx;
|
||||
|
||||
enum dev_state state;
|
||||
enum qede_dev_state state;
|
||||
|
||||
/* Vlans */
|
||||
osal_list_t vlan_list;
|
||||
@ -165,6 +166,5 @@ struct qede_dev {
|
||||
int qed_fill_eth_dev_info(struct ecore_dev *edev,
|
||||
struct qed_dev_eth_info *info);
|
||||
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
|
||||
void qede_config_rx_mode(struct rte_eth_dev *eth_dev);
|
||||
|
||||
#endif /* _QEDE_ETHDEV_H_ */
|
||||
|
@ -324,11 +324,10 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
}
|
||||
|
||||
/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
|
||||
static void qede_init_fp(struct rte_eth_dev *eth_dev)
|
||||
static void qede_init_fp(struct qede_dev *qdev)
|
||||
{
|
||||
struct qede_fastpath *fp;
|
||||
uint8_t i, rss_id, index, tc;
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
uint8_t i, rss_id, tc;
|
||||
int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
|
||||
|
||||
memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
|
||||
@ -343,30 +342,9 @@ static void qede_init_fp(struct rte_eth_dev *eth_dev)
|
||||
} else{
|
||||
fp->type = QEDE_FASTPATH_TX;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_queue(i) {
|
||||
fp = &qdev->fp_array[i];
|
||||
fp->qdev = qdev;
|
||||
fp->id = i;
|
||||
|
||||
/* Point rxq to generic rte queues that was created
|
||||
* as part of queue creation.
|
||||
*/
|
||||
if (fp->type & QEDE_FASTPATH_RX) {
|
||||
fp->rxq = eth_dev->data->rx_queues[i];
|
||||
fp->rxq->queue_id = rxq++;
|
||||
}
|
||||
fp->sb_info = &qdev->sb_array[i];
|
||||
|
||||
if (fp->type & QEDE_FASTPATH_TX) {
|
||||
for (tc = 0; tc < qdev->num_tc; tc++) {
|
||||
index = tc * QEDE_TSS_CNT(qdev) + txq;
|
||||
fp->txqs[tc] = eth_dev->data->tx_queues[index];
|
||||
fp->txqs[tc]->queue_id = index;
|
||||
}
|
||||
txq++;
|
||||
}
|
||||
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
|
||||
}
|
||||
|
||||
@ -444,6 +422,39 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qede_alloc_fp_resc(struct qede_dev *qdev)
|
||||
{
|
||||
struct qede_fastpath *fp;
|
||||
int rc, i;
|
||||
|
||||
if (qdev->fp_array)
|
||||
qede_free_fp_arrays(qdev);
|
||||
|
||||
rc = qede_alloc_fp_array(qdev);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
qede_init_fp(qdev);
|
||||
|
||||
for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
|
||||
fp = &qdev->fp_array[i];
|
||||
if (qede_alloc_mem_sb(qdev, fp->sb_info, i)) {
|
||||
qede_free_fp_arrays(qdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
|
||||
qede_free_mem_load(eth_dev);
|
||||
qede_free_fp_arrays(qdev);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
|
||||
{
|
||||
@ -564,43 +575,21 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
|
||||
struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
|
||||
struct qed_dev_info *qed_info = &qdev->dev_info.common;
|
||||
struct qed_update_vport_params vport_update_params;
|
||||
struct qed_start_vport_params start = { 0 };
|
||||
struct qede_tx_queue *txq;
|
||||
struct qede_fastpath *fp;
|
||||
dma_addr_t p_phys_table;
|
||||
int txq_index;
|
||||
uint16_t page_cnt;
|
||||
int vlan_removal_en = 1;
|
||||
int rc, tc, i;
|
||||
|
||||
if (!qdev->fp_num_rx) {
|
||||
DP_ERR(edev,
|
||||
"Cannot update V-VPORT as active as "
|
||||
"there are no Rx queues\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
start.remove_inner_vlan = vlan_removal_en;
|
||||
start.gro_enable = !qdev->gro_disable;
|
||||
start.mtu = qdev->mtu;
|
||||
start.vport_id = 0;
|
||||
start.drop_ttl0 = true;
|
||||
start.clear_stats = clear_stats;
|
||||
|
||||
rc = qdev->ops->vport_start(edev, &start);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_INFO(edev,
|
||||
"Start vport ramrod passed, vport_id = %d,"
|
||||
" MTU = %d, vlan_removal_en = %d\n",
|
||||
start.vport_id, qdev->mtu, vlan_removal_en);
|
||||
|
||||
for_each_queue(i) {
|
||||
struct qede_fastpath *fp = &qdev->fp_array[i];
|
||||
dma_addr_t tbl;
|
||||
uint16_t cnt;
|
||||
|
||||
fp = &qdev->fp_array[i];
|
||||
if (fp->type & QEDE_FASTPATH_RX) {
|
||||
tbl = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
|
||||
cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
|
||||
p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
|
||||
rx_comp_ring);
|
||||
page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
|
||||
rx_comp_ring);
|
||||
|
||||
ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
|
||||
|
||||
@ -610,17 +599,17 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
|
||||
RX_PI,
|
||||
fp->rxq->rx_buf_size,
|
||||
fp->rxq->rx_bd_ring.p_phys_addr,
|
||||
tbl,
|
||||
cnt,
|
||||
&fp->rxq->hw_rxq_prod_addr);
|
||||
p_phys_table,
|
||||
page_cnt,
|
||||
&fp->rxq->hw_rxq_prod_addr);
|
||||
if (rc) {
|
||||
DP_ERR(edev,
|
||||
"Start rxq #%d failed %d\n",
|
||||
DP_ERR(edev, "Start rxq #%d failed %d\n",
|
||||
fp->rxq->queue_id, rc);
|
||||
return rc;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
|
||||
fp->rxq->hw_cons_ptr =
|
||||
&fp->sb_info->sb_virt->pi_array[RX_PI];
|
||||
|
||||
qede_update_rx_prod(qdev, fp->rxq);
|
||||
}
|
||||
@ -628,16 +617,16 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
|
||||
if (!(fp->type & QEDE_FASTPATH_TX))
|
||||
continue;
|
||||
for (tc = 0; tc < qdev->num_tc; tc++) {
|
||||
struct qede_tx_queue *txq = fp->txqs[tc];
|
||||
int txq_index = tc * QEDE_RSS_CNT(qdev) + i;
|
||||
txq = fp->txqs[tc];
|
||||
txq_index = tc * QEDE_RSS_CNT(qdev) + i;
|
||||
|
||||
tbl = ecore_chain_get_pbl_phys(&txq->tx_pbl);
|
||||
cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
|
||||
p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
|
||||
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
|
||||
rc = qdev->ops->q_tx_start(edev, i, txq->queue_id,
|
||||
0,
|
||||
fp->sb_info->igu_sb_id,
|
||||
TX_PI(tc),
|
||||
tbl, cnt,
|
||||
p_phys_table, page_cnt,
|
||||
&txq->doorbell_addr);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Start txq %u failed %d\n",
|
||||
@ -661,7 +650,7 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
|
||||
|
||||
/* Prepare and send the vport enable */
|
||||
memset(&vport_update_params, 0, sizeof(vport_update_params));
|
||||
vport_update_params.vport_id = start.vport_id;
|
||||
vport_update_params.vport_id = 0;
|
||||
vport_update_params.update_vport_active_flg = 1;
|
||||
vport_update_params.vport_active_flg = 1;
|
||||
|
||||
@ -1116,6 +1105,32 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
return nb_pkt_sent;
|
||||
}
|
||||
|
||||
static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
struct qede_fastpath *fp;
|
||||
uint8_t i, rss_id, txq_index, tc;
|
||||
int rxq = 0, txq = 0;
|
||||
|
||||
for_each_queue(i) {
|
||||
fp = &qdev->fp_array[i];
|
||||
if (fp->type & QEDE_FASTPATH_RX) {
|
||||
fp->rxq = eth_dev->data->rx_queues[i];
|
||||
fp->rxq->queue_id = rxq++;
|
||||
}
|
||||
|
||||
if (fp->type & QEDE_FASTPATH_TX) {
|
||||
for (tc = 0; tc < qdev->num_tc; tc++) {
|
||||
txq_index = tc * QEDE_TSS_CNT(qdev) + txq;
|
||||
fp->txqs[tc] =
|
||||
eth_dev->data->tx_queues[txq_index];
|
||||
fp->txqs[tc]->queue_id = txq_index;
|
||||
}
|
||||
txq++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int qede_dev_start(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
@ -1126,47 +1141,34 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
|
||||
|
||||
DP_INFO(edev, "Device state is %d\n", qdev->state);
|
||||
|
||||
switch (qdev->state) {
|
||||
case QEDE_START:
|
||||
DP_INFO(edev, "Device already started\n");
|
||||
if (qdev->state == QEDE_DEV_START) {
|
||||
DP_INFO(edev, "Port is already started\n");
|
||||
return 0;
|
||||
case QEDE_CLOSE:
|
||||
if (qede_alloc_fp_array(qdev))
|
||||
return -ENOMEM;
|
||||
qede_init_fp(eth_dev);
|
||||
/* Fall-thru */
|
||||
case QEDE_STOP:
|
||||
for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
|
||||
fp = &qdev->fp_array[i];
|
||||
if (qede_alloc_mem_sb(qdev, fp->sb_info, i))
|
||||
return -ENOMEM;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DP_INFO(edev, "Unknown state for port %u\n",
|
||||
eth_dev->data->port_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qdev->state == QEDE_DEV_CONFIG)
|
||||
qede_init_fp_queue(eth_dev);
|
||||
|
||||
rc = qede_start_queues(eth_dev, true);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to start queues\n");
|
||||
/* TBD: free */
|
||||
return rc;
|
||||
}
|
||||
DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
|
||||
QEDE_RSS_CNT(qdev), qdev->num_tc);
|
||||
|
||||
/* Bring-up the link */
|
||||
qede_dev_set_link_state(eth_dev, true);
|
||||
qdev->state = QEDE_START;
|
||||
qede_config_rx_mode(eth_dev);
|
||||
|
||||
/* Init the queues */
|
||||
/* Reset ring */
|
||||
if (qede_reset_fp_rings(qdev))
|
||||
return -ENOMEM;
|
||||
|
||||
DP_INFO(edev, "dev_state is QEDE_START\n");
|
||||
/* Start/resume traffic */
|
||||
qdev->ops->fastpath_start(edev);
|
||||
|
||||
qdev->state = QEDE_DEV_START;
|
||||
|
||||
DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1222,7 +1224,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
|
||||
vport_update_params.vport_active_flg = 0;
|
||||
vport_update_params.update_rss_flg = 0;
|
||||
|
||||
DP_INFO(edev, "vport_update\n");
|
||||
DP_INFO(edev, "Deactivate vport\n");
|
||||
|
||||
rc = qdev->ops->vport_update(edev, &vport_update_params);
|
||||
if (rc) {
|
||||
@ -1288,14 +1290,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
|
||||
}
|
||||
}
|
||||
|
||||
DP_INFO(edev, "Stopping vports\n");
|
||||
|
||||
/* Stop the vport */
|
||||
rc = qdev->ops->vport_stop(edev, 0);
|
||||
if (rc)
|
||||
DP_ERR(edev, "Failed to stop VPORT\n");
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qede_reset_fp_rings(struct qede_dev *qdev)
|
||||
@ -1306,15 +1301,17 @@ int qede_reset_fp_rings(struct qede_dev *qdev)
|
||||
uint16_t id, i;
|
||||
|
||||
for_each_queue(id) {
|
||||
DP_INFO(&qdev->edev, "Reset FP chain for RSS %u\n", id);
|
||||
fp = &qdev->fp_array[id];
|
||||
|
||||
if (fp->type & QEDE_FASTPATH_RX) {
|
||||
DP_INFO(&qdev->edev,
|
||||
"Reset FP chain for RSS %u\n", id);
|
||||
qede_rx_queue_release_mbufs(fp->rxq);
|
||||
ecore_chain_reset(&fp->rxq->rx_bd_ring);
|
||||
ecore_chain_reset(&fp->rxq->rx_comp_ring);
|
||||
fp->rxq->sw_rx_prod = 0;
|
||||
fp->rxq->sw_rx_cons = 0;
|
||||
*fp->rxq->hw_cons_ptr = 0;
|
||||
for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
|
||||
if (qede_alloc_rx_buffer(fp->rxq)) {
|
||||
DP_ERR(&qdev->edev,
|
||||
@ -1329,6 +1326,7 @@ int qede_reset_fp_rings(struct qede_dev *qdev)
|
||||
ecore_chain_reset(&txq->tx_pbl);
|
||||
txq->sw_tx_cons = 0;
|
||||
txq->sw_tx_prod = 0;
|
||||
*txq->hw_cons_ptr = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1337,24 +1335,30 @@ int qede_reset_fp_rings(struct qede_dev *qdev)
|
||||
}
|
||||
|
||||
/* This function frees all memory of a single fp */
|
||||
static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
|
||||
static void qede_free_mem_fp(struct rte_eth_dev *eth_dev,
|
||||
struct qede_fastpath *fp)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
uint8_t tc;
|
||||
|
||||
qede_rx_queue_release(fp->rxq);
|
||||
for (tc = 0; tc < qdev->num_tc; tc++)
|
||||
for (tc = 0; tc < qdev->num_tc; tc++) {
|
||||
qede_tx_queue_release(fp->txqs[tc]);
|
||||
eth_dev->data->tx_queues[tc] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void qede_free_mem_load(struct qede_dev *qdev)
|
||||
void qede_free_mem_load(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct qede_fastpath *fp;
|
||||
uint8_t rss_id;
|
||||
|
||||
for_each_queue(rss_id) {
|
||||
struct qede_fastpath *fp = &qdev->fp_array[rss_id];
|
||||
qede_free_mem_fp(qdev, fp);
|
||||
fp = &qdev->fp_array[rss_id];
|
||||
qede_free_mem_fp(eth_dev, fp);
|
||||
eth_dev->data->rx_queues[rss_id] = NULL;
|
||||
}
|
||||
/* qdev->num_rss = 0; */
|
||||
}
|
||||
|
||||
void qede_dev_stop(struct rte_eth_dev *eth_dev)
|
||||
@ -1364,7 +1368,7 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
|
||||
|
||||
DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
|
||||
|
||||
if (qdev->state != QEDE_START) {
|
||||
if (qdev->state != QEDE_DEV_START) {
|
||||
DP_INFO(edev, "Device not yet started\n");
|
||||
return;
|
||||
}
|
||||
@ -1379,7 +1383,7 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
|
||||
/* Bring the link down */
|
||||
qede_dev_set_link_state(eth_dev, false);
|
||||
|
||||
qdev->state = QEDE_STOP;
|
||||
qdev->state = QEDE_DEV_STOP;
|
||||
|
||||
DP_INFO(edev, "dev_state is QEDE_STOP\n");
|
||||
DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ int qede_reset_fp_rings(struct qede_dev *qdev);
|
||||
|
||||
void qede_free_fp_arrays(struct qede_dev *qdev);
|
||||
|
||||
void qede_free_mem_load(struct qede_dev *qdev);
|
||||
void qede_free_mem_load(struct rte_eth_dev *eth_dev);
|
||||
|
||||
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
@ -181,4 +181,9 @@ uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
|
||||
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
|
||||
/* Fastpath resource alloc/dealloc helpers */
|
||||
int qede_alloc_fp_resc(struct qede_dev *qdev);
|
||||
|
||||
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
|
||||
|
||||
#endif /* _QEDE_RXTX_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user