net/qede: refactoring vport handling code
The refactoring is mainly for two reasons: - To remove an additional layer of internal callbacks for all vport related operations from the struct qed_eth_ops_pass. Instead, we can invoke base APIs directly. - Splitting a single large vport-update configuration into multiple and independent vport-update operations. Each configuration would touch only the required config bits that needs an update. Signed-off-by: Harish Patil <harish.patil@cavium.com>
This commit is contained in:
parent
a1158a04b5
commit
9a6d30ae6d
@ -8,112 +8,6 @@
|
||||
|
||||
#include "qede_ethdev.h"
|
||||
|
||||
static int
|
||||
qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
for_each_hwfn(edev, i) {
|
||||
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
|
||||
u8 tx_switching = 0;
|
||||
struct ecore_sp_vport_start_params start = { 0 };
|
||||
|
||||
start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
|
||||
ECORE_TPA_MODE_NONE;
|
||||
start.remove_inner_vlan = p_params->remove_inner_vlan;
|
||||
start.tx_switching = tx_switching;
|
||||
start.only_untagged = false; /* untagged only */
|
||||
start.drop_ttl0 = p_params->drop_ttl0;
|
||||
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
|
||||
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
|
||||
start.handle_ptp_pkts = p_params->handle_ptp_pkts;
|
||||
start.vport_id = p_params->vport_id;
|
||||
start.mtu = p_params->mtu;
|
||||
/* @DPDK - Disable FW placement */
|
||||
start.zero_placement_offset = 1;
|
||||
|
||||
rc = ecore_sp_vport_start(p_hwfn, &start);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to start VPORT\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_VERBOSE(edev, ECORE_MSG_SPQ,
|
||||
"Started V-PORT %d with MTU %d\n",
|
||||
p_params->vport_id, p_params->mtu);
|
||||
}
|
||||
|
||||
ecore_reset_vport_stats(edev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
for_each_hwfn(edev, i) {
|
||||
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
|
||||
rc = ecore_sp_vport_stop(p_hwfn,
|
||||
p_hwfn->hw_info.opaque_fid, vport_id);
|
||||
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to stop VPORT\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
|
||||
{
|
||||
struct ecore_sp_vport_update_params sp_params;
|
||||
struct ecore_rss_params sp_rss_params;
|
||||
int rc, i;
|
||||
|
||||
memset(&sp_params, 0, sizeof(sp_params));
|
||||
memset(&sp_rss_params, 0, sizeof(sp_rss_params));
|
||||
|
||||
/* Translate protocol params into sp params */
|
||||
sp_params.vport_id = params->vport_id;
|
||||
sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
|
||||
sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
|
||||
sp_params.vport_active_rx_flg = params->vport_active_flg;
|
||||
sp_params.vport_active_tx_flg = params->vport_active_flg;
|
||||
sp_params.update_inner_vlan_removal_flg =
|
||||
params->update_inner_vlan_removal_flg;
|
||||
sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
|
||||
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
|
||||
sp_params.tx_switching_flg = params->tx_switching_flg;
|
||||
sp_params.accept_any_vlan = params->accept_any_vlan;
|
||||
sp_params.update_accept_any_vlan_flg =
|
||||
params->update_accept_any_vlan_flg;
|
||||
sp_params.mtu = params->mtu;
|
||||
sp_params.sge_tpa_params = params->sge_tpa_params;
|
||||
|
||||
for_each_hwfn(edev, i) {
|
||||
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
|
||||
|
||||
sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_update(p_hwfn, &sp_params,
|
||||
ECORE_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to update VPORT\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_VERBOSE(edev, ECORE_MSG_SPQ,
|
||||
"Updated V-PORT %d: active_flag %d [update %d]\n",
|
||||
params->vport_id, params->vport_active_flg,
|
||||
params->update_vport_active_flg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qed_start_rxq(struct ecore_dev *edev,
|
||||
uint8_t rss_num,
|
||||
@ -299,9 +193,6 @@ int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
|
||||
static const struct qed_eth_ops qed_eth_ops_pass = {
|
||||
INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
|
||||
INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
|
||||
INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
|
||||
INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
|
||||
INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
|
||||
INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
|
||||
INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
|
||||
INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
|
||||
|
@ -16,8 +16,6 @@ struct eth_slow_path_rx_cqe;
|
||||
|
||||
#define INIT_STRUCT_FIELD(field, value) .field = value
|
||||
|
||||
#define QED_ETH_INTERFACE_VERSION 609
|
||||
|
||||
#define QEDE_MAX_MCAST_FILTERS 64
|
||||
|
||||
enum qed_filter_rx_mode_type {
|
||||
@ -47,45 +45,12 @@ struct qed_dev_eth_info {
|
||||
bool is_legacy;
|
||||
};
|
||||
|
||||
struct qed_update_vport_params {
|
||||
uint8_t vport_id;
|
||||
uint8_t update_vport_active_flg;
|
||||
uint8_t vport_active_flg;
|
||||
uint8_t update_inner_vlan_removal_flg;
|
||||
uint8_t inner_vlan_removal_flg;
|
||||
uint8_t update_tx_switching_flg;
|
||||
uint8_t tx_switching_flg;
|
||||
uint8_t update_accept_any_vlan_flg;
|
||||
uint8_t accept_any_vlan;
|
||||
uint8_t update_rss_flg;
|
||||
uint16_t mtu;
|
||||
struct ecore_sge_tpa_params *sge_tpa_params;
|
||||
};
|
||||
|
||||
struct qed_start_vport_params {
|
||||
bool remove_inner_vlan;
|
||||
bool handle_ptp_pkts;
|
||||
bool enable_lro;
|
||||
bool drop_ttl0;
|
||||
uint8_t vport_id;
|
||||
uint16_t mtu;
|
||||
bool clear_stats;
|
||||
};
|
||||
|
||||
struct qed_eth_ops {
|
||||
const struct qed_common_ops *common;
|
||||
|
||||
int (*fill_dev_info)(struct ecore_dev *edev,
|
||||
struct qed_dev_eth_info *info);
|
||||
|
||||
int (*vport_start)(struct ecore_dev *edev,
|
||||
struct qed_start_vport_params *params);
|
||||
|
||||
int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
|
||||
|
||||
int (*vport_update)(struct ecore_dev *edev,
|
||||
struct qed_update_vport_params *params);
|
||||
|
||||
int (*q_rx_start)(struct ecore_dev *cdev,
|
||||
uint8_t rss_num,
|
||||
struct ecore_queue_start_common_params *p_params,
|
||||
|
@ -329,6 +329,178 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
|
||||
{
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct ecore_sp_vport_start_params params;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.vport_id = 0;
|
||||
params.mtu = mtu;
|
||||
/* @DPDK - Disable FW placement */
|
||||
params.zero_placement_offset = 1;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
params.concrete_fid = p_hwfn->hw_info.concrete_fid;
|
||||
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_start(p_hwfn, ¶ms);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
ecore_reset_vport_stats(edev);
|
||||
DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qede_stop_vport(struct ecore_dev *edev)
|
||||
{
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
uint8_t vport_id;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
vport_id = 0;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
|
||||
vport_id);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Activate or deactivate vport via vport-update */
|
||||
int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct ecore_sp_vport_update_params params;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
uint8_t i;
|
||||
int rc = -1;
|
||||
|
||||
memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
|
||||
params.vport_id = 0;
|
||||
params.update_vport_active_rx_flg = 1;
|
||||
params.update_vport_active_tx_flg = 1;
|
||||
params.vport_active_rx_flg = flg;
|
||||
params.vport_active_tx_flg = flg;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_update(p_hwfn, ¶ms,
|
||||
ECORE_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Failed to update vport\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
|
||||
uint16_t mtu, bool enable)
|
||||
{
|
||||
/* Enable LRO in split mode */
|
||||
sge_tpa_params->tpa_ipv4_en_flg = enable;
|
||||
sge_tpa_params->tpa_ipv6_en_flg = enable;
|
||||
sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
|
||||
sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
|
||||
/* set if tpa enable changes */
|
||||
sge_tpa_params->update_tpa_en_flg = 1;
|
||||
/* set if tpa parameters should be handled */
|
||||
sge_tpa_params->update_tpa_param_flg = enable;
|
||||
|
||||
sge_tpa_params->max_buffers_per_cqe = 20;
|
||||
/* Enable TPA in split mode. In this mode each TPA segment
|
||||
* starts on the new BD, so there is one BD per segment.
|
||||
*/
|
||||
sge_tpa_params->tpa_pkt_split_flg = 1;
|
||||
sge_tpa_params->tpa_hdr_data_split_flg = 0;
|
||||
sge_tpa_params->tpa_gro_consistent_flg = 0;
|
||||
sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
|
||||
sge_tpa_params->tpa_max_size = 0x7FFF;
|
||||
sge_tpa_params->tpa_min_size_to_start = mtu / 2;
|
||||
sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
|
||||
}
|
||||
|
||||
/* Enable/disable LRO via vport-update */
|
||||
int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct ecore_sp_vport_update_params params;
|
||||
struct ecore_sge_tpa_params tpa_params;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
|
||||
memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
|
||||
qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
|
||||
params.vport_id = 0;
|
||||
params.sge_tpa_params = &tpa_params;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_update(p_hwfn, ¶ms,
|
||||
ECORE_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Failed to update LRO\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Update MTU via vport-update without doing port restart.
|
||||
* The vport must be deactivated before calling this API.
|
||||
*/
|
||||
int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
|
||||
{
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct ecore_sp_vport_update_params params;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
|
||||
params.vport_id = 0;
|
||||
params.mtu = mtu;
|
||||
params.vport_id = 0;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_update(p_hwfn, ¶ms,
|
||||
ECORE_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Failed to update MTU\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
DP_INFO(edev, "MTU updated to %u\n", mtu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
|
||||
{
|
||||
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
|
||||
@ -565,49 +737,57 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
|
||||
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
|
||||
}
|
||||
|
||||
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
|
||||
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
|
||||
{
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct qed_update_vport_params params = {
|
||||
.vport_id = 0,
|
||||
.accept_any_vlan = action,
|
||||
.update_accept_any_vlan_flg = 1,
|
||||
};
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct ecore_sp_vport_update_params params;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
uint8_t i;
|
||||
int rc;
|
||||
|
||||
/* Proceed only if action actually needs to be performed */
|
||||
if (qdev->accept_any_vlan == action)
|
||||
return;
|
||||
|
||||
rc = qdev->ops->vport_update(edev, ¶ms);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to %s accept-any-vlan\n",
|
||||
action ? "enable" : "disable");
|
||||
} else {
|
||||
DP_INFO(edev, "%s accept-any-vlan\n",
|
||||
action ? "enabled" : "disabled");
|
||||
qdev->accept_any_vlan = action;
|
||||
memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
|
||||
params.vport_id = 0;
|
||||
params.update_accept_any_vlan_flg = 1;
|
||||
params.accept_any_vlan = flg;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_update(p_hwfn, ¶ms,
|
||||
ECORE_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Failed to configure accept-any-vlan\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
|
||||
}
|
||||
|
||||
static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
|
||||
static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
|
||||
{
|
||||
struct qed_update_vport_params vport_update_params;
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
struct ecore_sp_vport_update_params params;
|
||||
struct ecore_hwfn *p_hwfn;
|
||||
uint8_t i;
|
||||
int rc;
|
||||
|
||||
memset(&vport_update_params, 0, sizeof(vport_update_params));
|
||||
vport_update_params.vport_id = 0;
|
||||
vport_update_params.update_inner_vlan_removal_flg = 1;
|
||||
vport_update_params.inner_vlan_removal_flg = set_stripping;
|
||||
rc = qdev->ops->vport_update(edev, &vport_update_params);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
|
||||
return rc;
|
||||
memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
|
||||
params.vport_id = 0;
|
||||
params.update_inner_vlan_removal_flg = 1;
|
||||
params.inner_vlan_removal_flg = flg;
|
||||
for_each_hwfn(edev, i) {
|
||||
p_hwfn = &edev->hwfns[i];
|
||||
params.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
rc = ecore_sp_vport_update(p_hwfn, ¶ms,
|
||||
ECORE_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_ERR(edev, "Failed to update vport\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
qdev->vlan_strip_flg = set_stripping;
|
||||
|
||||
DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -741,33 +921,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
|
||||
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
|
||||
}
|
||||
|
||||
static int qede_init_vport(struct qede_dev *qdev)
|
||||
{
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct qed_start_vport_params start = {0};
|
||||
int rc;
|
||||
|
||||
start.remove_inner_vlan = 1;
|
||||
start.enable_lro = qdev->enable_lro;
|
||||
start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
|
||||
start.vport_id = 0;
|
||||
start.drop_ttl0 = false;
|
||||
start.clear_stats = 1;
|
||||
start.handle_ptp_pkts = 0;
|
||||
|
||||
rc = qdev->ops->vport_start(edev, &start);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_INFO(edev,
|
||||
"Start vport ramrod passed, vport_id = %d, MTU = %u\n",
|
||||
start.vport_id, ETHER_MTU);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qede_prandom_bytes(uint32_t *buff)
|
||||
{
|
||||
uint8_t i;
|
||||
@ -863,9 +1016,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
|
||||
/* Check for the port restart case */
|
||||
if (qdev->state != QEDE_DEV_INIT) {
|
||||
rc = qdev->ops->vport_stop(edev, 0);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
qede_stop_vport(edev);
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
}
|
||||
|
||||
@ -880,17 +1031,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
/* Issue VPORT-START with default config values to allow
|
||||
* other port configurations early on.
|
||||
/* VF's MTU has to be set using vport-start where as
|
||||
* PF's MTU can be updated via vport-update.
|
||||
*/
|
||||
rc = qede_init_vport(qdev);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
if (IS_VF(edev)) {
|
||||
if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
|
||||
return -1;
|
||||
} else {
|
||||
if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
|
||||
return -1;
|
||||
}
|
||||
|
||||
qdev->mtu = rxmode->max_rx_pkt_len;
|
||||
qdev->new_mtu = qdev->mtu;
|
||||
|
||||
if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
|
||||
rxmode->mq_mode == ETH_MQ_RX_NONE)) {
|
||||
DP_ERR(edev, "Unsupported RSS mode\n");
|
||||
qdev->ops->vport_stop(edev, 0);
|
||||
qede_stop_vport(edev);
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -898,7 +1056,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
/* Flow director mode check */
|
||||
rc = qede_check_fdir_support(eth_dev);
|
||||
if (rc) {
|
||||
qdev->ops->vport_stop(edev, 0);
|
||||
qede_stop_vport(edev);
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1106,7 +1264,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
|
||||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
||||
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
||||
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
||||
int rc;
|
||||
|
||||
PMD_INIT_FUNC_TRACE(edev);
|
||||
|
||||
@ -1122,9 +1279,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
|
||||
else
|
||||
DP_INFO(edev, "Device is already stopped\n");
|
||||
|
||||
rc = qdev->ops->vport_stop(edev, 0);
|
||||
if (rc != 0)
|
||||
DP_ERR(edev, "Failed to stop VPORT\n");
|
||||
qede_stop_vport(edev);
|
||||
|
||||
qede_dealloc_fp_resc(eth_dev);
|
||||
|
||||
@ -2322,6 +2477,11 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
|
||||
}
|
||||
|
||||
adapter->state = QEDE_DEV_INIT;
|
||||
adapter->mtu = ETHER_MTU;
|
||||
adapter->new_mtu = ETHER_MTU;
|
||||
if (!is_vf)
|
||||
if (qede_start_vport(adapter, adapter->mtu))
|
||||
return -1;
|
||||
|
||||
DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
adapter->primary_mac.addr_bytes[0],
|
||||
|
@ -190,6 +190,7 @@ struct qede_dev {
|
||||
struct qede_fastpath *fp_array;
|
||||
uint8_t num_tc;
|
||||
uint16_t mtu;
|
||||
uint16_t new_mtu;
|
||||
bool rss_enable;
|
||||
struct rte_eth_rss_conf rss_conf;
|
||||
uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
|
||||
@ -248,4 +249,10 @@ uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
|
||||
|
||||
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
|
||||
|
||||
int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
|
||||
|
||||
int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
|
||||
|
||||
int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
|
||||
|
||||
#endif /* _QEDE_ETHDEV_H_ */
|
||||
|
@ -506,42 +506,11 @@ qede_update_rx_prod(__rte_unused struct qede_dev *edev,
|
||||
PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
|
||||
}
|
||||
|
||||
static void
|
||||
qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
|
||||
uint16_t mtu, bool enable)
|
||||
{
|
||||
/* Enable LRO in split mode */
|
||||
sge_tpa_params->tpa_ipv4_en_flg = enable;
|
||||
sge_tpa_params->tpa_ipv6_en_flg = enable;
|
||||
sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
|
||||
sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
|
||||
/* set if tpa enable changes */
|
||||
sge_tpa_params->update_tpa_en_flg = 1;
|
||||
/* set if tpa parameters should be handled */
|
||||
sge_tpa_params->update_tpa_param_flg = enable;
|
||||
|
||||
sge_tpa_params->max_buffers_per_cqe = 20;
|
||||
/* Enable TPA in split mode. In this mode each TPA segment
|
||||
* starts on the new BD, so there is one BD per segment.
|
||||
*/
|
||||
sge_tpa_params->tpa_pkt_split_flg = 1;
|
||||
sge_tpa_params->tpa_hdr_data_split_flg = 0;
|
||||
sge_tpa_params->tpa_gro_consistent_flg = 0;
|
||||
sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
|
||||
sge_tpa_params->tpa_max_size = 0x7FFF;
|
||||
sge_tpa_params->tpa_min_size_to_start = mtu / 2;
|
||||
sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
|
||||
}
|
||||
|
||||
static int qede_start_queues(struct rte_eth_dev *eth_dev,
|
||||
__rte_unused bool clear_stats)
|
||||
static int qede_start_queues(struct rte_eth_dev *eth_dev)
|
||||
{
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct ecore_queue_start_common_params q_params;
|
||||
struct qed_dev_info *qed_info = &qdev->dev_info.common;
|
||||
struct qed_update_vport_params vport_update_params;
|
||||
struct ecore_sge_tpa_params tpa_params;
|
||||
struct qede_tx_queue *txq;
|
||||
struct qede_fastpath *fp;
|
||||
dma_addr_t p_phys_table;
|
||||
@ -635,35 +604,6 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev,
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare and send the vport enable */
|
||||
memset(&vport_update_params, 0, sizeof(vport_update_params));
|
||||
/* Update MTU via vport update */
|
||||
vport_update_params.mtu = qdev->mtu;
|
||||
vport_update_params.vport_id = 0;
|
||||
vport_update_params.update_vport_active_flg = 1;
|
||||
vport_update_params.vport_active_flg = 1;
|
||||
|
||||
/* @DPDK */
|
||||
if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
|
||||
/* TBD: Check SRIOV enabled for VF */
|
||||
vport_update_params.update_tx_switching_flg = 1;
|
||||
vport_update_params.tx_switching_flg = 1;
|
||||
}
|
||||
|
||||
/* TPA */
|
||||
if (qdev->enable_lro) {
|
||||
DP_INFO(edev, "Enabling LRO\n");
|
||||
memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
|
||||
qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
|
||||
vport_update_params.sge_tpa_params = &tpa_params;
|
||||
}
|
||||
|
||||
rc = qdev->ops->vport_update(edev, &vport_update_params);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1703,7 +1643,18 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
|
||||
if (qdev->state == QEDE_DEV_CONFIG)
|
||||
qede_init_fp_queue(eth_dev);
|
||||
|
||||
rc = qede_start_queues(eth_dev, true);
|
||||
/* Update MTU only if it has changed */
|
||||
if (qdev->mtu != qdev->new_mtu) {
|
||||
if (qede_update_mtu(eth_dev, qdev->new_mtu))
|
||||
return -1;
|
||||
qdev->mtu = qdev->new_mtu;
|
||||
/* If MTU has changed then update TPA too */
|
||||
if (qdev->enable_lro)
|
||||
if (qede_enable_tpa(eth_dev, true))
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = qede_start_queues(eth_dev);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to start queues\n");
|
||||
/* TBD: free */
|
||||
@ -1719,6 +1670,10 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
|
||||
if (qede_config_rss(eth_dev))
|
||||
return -1;
|
||||
|
||||
/* Enable vport*/
|
||||
if (qede_activate_vport(eth_dev, true))
|
||||
return -1;
|
||||
|
||||
/* Bring-up the link */
|
||||
qede_dev_set_link_state(eth_dev, true);
|
||||
|
||||
@ -1769,33 +1724,10 @@ static int qede_drain_txq(struct qede_dev *qdev,
|
||||
|
||||
static int qede_stop_queues(struct qede_dev *qdev)
|
||||
{
|
||||
struct qed_update_vport_params vport_update_params;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct ecore_sge_tpa_params tpa_params;
|
||||
struct qede_fastpath *fp;
|
||||
int rc, tc, i;
|
||||
|
||||
/* Disable the vport */
|
||||
memset(&vport_update_params, 0, sizeof(vport_update_params));
|
||||
vport_update_params.vport_id = 0;
|
||||
vport_update_params.update_vport_active_flg = 1;
|
||||
vport_update_params.vport_active_flg = 0;
|
||||
vport_update_params.update_rss_flg = 0;
|
||||
/* Disable TPA */
|
||||
if (qdev->enable_lro) {
|
||||
DP_INFO(edev, "Disabling LRO\n");
|
||||
memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
|
||||
qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
|
||||
vport_update_params.sge_tpa_params = &tpa_params;
|
||||
}
|
||||
|
||||
DP_INFO(edev, "Deactivate vport\n");
|
||||
rc = qdev->ops->vport_update(edev, &vport_update_params);
|
||||
if (rc) {
|
||||
DP_ERR(edev, "Failed to update vport\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_INFO(edev, "Flushing tx queues\n");
|
||||
|
||||
/* Flush Tx queues. If needed, request drain from MCP */
|
||||
@ -1928,6 +1860,10 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Disable vport */
|
||||
if (qede_activate_vport(eth_dev, false))
|
||||
return;
|
||||
|
||||
if (qede_stop_queues(qdev))
|
||||
DP_ERR(edev, "Didn't succeed to close queues\n");
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user