net/qede: fix VF reload
On ungraceful termination of DPDK application, PMD VF driver fails to re-load due to PF seeing the VF in unexpected state during VF acquisition handshake. This patch fixes it by allowing VF to request the PF for soft FLR during the load in such cases so that it can get cleanly re-loaded. Fixes: 2ea6f76aff40 ("qede: add core driver") Cc: stable@dpdk.org Signed-off-by: Manish Chopra <manishc@marvell.com> Signed-off-by: Shahed Shaikh <shshaikh@marvell.com> Signed-off-by: Rasesh Mody <rmody@marvell.com> Acked-by: Rasesh Mody <rmody@marvell.com>
This commit is contained in:
parent
819d0d1d57
commit
f44ca48c81
@ -5617,7 +5617,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
|
||||
p_hwfn->db_phys_addr = db_phys_addr;
|
||||
|
||||
if (IS_VF(p_dev))
|
||||
return ecore_vf_hw_prepare(p_hwfn);
|
||||
return ecore_vf_hw_prepare(p_hwfn, p_params);
|
||||
|
||||
/* Validate that chip access is feasible */
|
||||
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
|
||||
|
@ -277,6 +277,9 @@ struct ecore_hw_prepare_params {
|
||||
|
||||
/* Indicates whether this PF serves a storage target */
|
||||
bool b_is_target;
|
||||
|
||||
/* retry count for VF acquire on channel timeout */
|
||||
u8 acquire_retry_cnt;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -51,6 +51,7 @@ enum ecore_iov_pf_to_vf_status {
|
||||
PFVF_STATUS_NO_RESOURCE,
|
||||
PFVF_STATUS_FORCED,
|
||||
PFVF_STATUS_MALICIOUS,
|
||||
PFVF_STATUS_ACQUIRED,
|
||||
};
|
||||
|
||||
struct ecore_mcp_link_params;
|
||||
|
@ -61,6 +61,39 @@ const char *qede_ecore_channel_tlvs_string[] = {
|
||||
"CHANNEL_TLV_COALESCE_READ",
|
||||
"CHANNEL_TLV_BULLETIN_UPDATE_MAC",
|
||||
"CHANNEL_TLV_UPDATE_MTU",
|
||||
"CHANNEL_TLV_RDMA_ACQUIRE",
|
||||
"CHANNEL_TLV_RDMA_START",
|
||||
"CHANNEL_TLV_RDMA_STOP",
|
||||
"CHANNEL_TLV_RDMA_ADD_USER",
|
||||
"CHANNEL_TLV_RDMA_REMOVE_USER",
|
||||
"CHANNEL_TLV_RDMA_QUERY_COUNTERS",
|
||||
"CHANNEL_TLV_RDMA_ALLOC_TID",
|
||||
"CHANNEL_TLV_RDMA_REGISTER_TID",
|
||||
"CHANNEL_TLV_RDMA_DEREGISTER_TID",
|
||||
"CHANNEL_TLV_RDMA_FREE_TID",
|
||||
"CHANNEL_TLV_RDMA_CREATE_CQ",
|
||||
"CHANNEL_TLV_RDMA_RESIZE_CQ",
|
||||
"CHANNEL_TLV_RDMA_DESTROY_CQ",
|
||||
"CHANNEL_TLV_RDMA_CREATE_QP",
|
||||
"CHANNEL_TLV_RDMA_MODIFY_QP",
|
||||
"CHANNEL_TLV_RDMA_QUERY_QP",
|
||||
"CHANNEL_TLV_RDMA_DESTROY_QP",
|
||||
"CHANNEL_TLV_RDMA_CREATE_SRQ",
|
||||
"CHANNEL_TLV_RDMA_MODIFY_SRQ",
|
||||
"CHANNEL_TLV_RDMA_DESTROY_SRQ",
|
||||
"CHANNEL_TLV_RDMA_QUERY_PORT",
|
||||
"CHANNEL_TLV_RDMA_QUERY_DEVICE",
|
||||
"CHANNEL_TLV_RDMA_IWARP_CONNECT",
|
||||
"CHANNEL_TLV_RDMA_IWARP_ACCEPT",
|
||||
"CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN",
|
||||
"CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN",
|
||||
"CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN",
|
||||
"CHANNEL_TLV_RDMA_IWARP_REJECT",
|
||||
"CHANNEL_TLV_RDMA_IWARP_SEND_RTR",
|
||||
"CHANNEL_TLV_ESTABLISH_LL2_CONN",
|
||||
"CHANNEL_TLV_TERMINATE_LL2_CONN",
|
||||
"CHANNEL_TLV_ASYNC_EVENT",
|
||||
"CHANNEL_TLV_SOFT_FLR",
|
||||
"CHANNEL_TLV_MAX"
|
||||
};
|
||||
|
||||
|
@ -226,7 +226,6 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
|
||||
return _ecore_vf_pf_release(p_hwfn, true);
|
||||
}
|
||||
|
||||
#define VF_ACQUIRE_THRESH 3
|
||||
static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
|
||||
struct vf_pf_resc_request *p_req,
|
||||
struct pf_vf_resc *p_resp)
|
||||
@ -251,13 +250,47 @@ static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
|
||||
p_req->num_cids = p_resp->num_cids;
|
||||
}
|
||||
|
||||
static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
static enum _ecore_status_t
|
||||
ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
|
||||
struct pfvf_def_resp_tlv *resp;
|
||||
struct vfpf_soft_flr_tlv *req;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_SOFT_FLR, sizeof(*req));
|
||||
|
||||
/* add list termination tlv */
|
||||
ecore_add_tlv(&p_iov->offset,
|
||||
CHANNEL_TLV_LIST_END,
|
||||
sizeof(struct channel_list_end_tlv));
|
||||
|
||||
resp = &p_iov->pf2vf_reply->default_resp;
|
||||
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "rc=0x%x\n", rc);
|
||||
|
||||
/* to release the mutex as ecore_vf_pf_acquire() take the mutex */
|
||||
ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
|
||||
|
||||
/* As of today, there is no mechanism in place for VF to know the FLR
|
||||
* status, so sufficiently (worst case time) wait for FLR to complete,
|
||||
* as mailbox request to MFW by the PF for initiating VF flr and PF
|
||||
* processing VF FLR could take time.
|
||||
*/
|
||||
OSAL_MSLEEP(3000);
|
||||
|
||||
return ecore_vf_pf_acquire(p_hwfn);
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
|
||||
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
|
||||
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
|
||||
struct ecore_vf_acquire_sw_info vf_sw_info;
|
||||
struct ecore_dev *p_dev = p_hwfn->p_dev;
|
||||
u8 retry_cnt = p_iov->acquire_retry_cnt;
|
||||
struct vf_pf_resc_request *p_resc;
|
||||
bool resources_acquired = false;
|
||||
struct vfpf_acquire_tlv *req;
|
||||
@ -318,6 +351,14 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
/* send acquire request */
|
||||
rc = ecore_send_msg2pf(p_hwfn,
|
||||
&resp->hdr.status, sizeof(*resp));
|
||||
|
||||
if (retry_cnt && rc == ECORE_TIMEOUT) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF retrying to acquire due to VPC timeout\n");
|
||||
retry_cnt--;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rc != ECORE_SUCCESS)
|
||||
goto exit;
|
||||
|
||||
@ -343,7 +384,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
resources_acquired = true;
|
||||
} /* PF refuses to allocate our resources */
|
||||
else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
|
||||
attempts < VF_ACQUIRE_THRESH) {
|
||||
attempts < ECORE_VF_ACQUIRE_THRESH) {
|
||||
ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
|
||||
&resp->resc);
|
||||
|
||||
@ -391,6 +432,9 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
"PF rejected acquisition by VF\n");
|
||||
rc = ECORE_INVAL;
|
||||
goto exit;
|
||||
} else if (resp->hdr.status == PFVF_STATUS_ACQUIRED) {
|
||||
ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
|
||||
return ecore_vf_pf_soft_flr_acquire(p_hwfn);
|
||||
} else {
|
||||
DP_ERR(p_hwfn,
|
||||
"PF returned err %d to VF acquisition request\n",
|
||||
@ -477,7 +521,9 @@ u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
|
||||
enum _ecore_status_t
|
||||
ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_hw_prepare_params *p_params)
|
||||
{
|
||||
struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
|
||||
struct ecore_vf_iov *p_iov;
|
||||
@ -583,6 +629,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
|
||||
#endif
|
||||
OSAL_MUTEX_INIT(&p_iov->mutex);
|
||||
|
||||
p_iov->acquire_retry_cnt = p_params->acquire_retry_cnt;
|
||||
p_hwfn->vf_iov_info = p_iov;
|
||||
|
||||
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "ecore_vf_api.h"
|
||||
#include "ecore_l2_api.h"
|
||||
#include "ecore_vfpf_if.h"
|
||||
#include "ecore_dev_api.h"
|
||||
|
||||
/* Default number of CIDs [total of both Rx and Tx] to be requested
|
||||
* by default.
|
||||
@ -59,6 +60,9 @@ struct ecore_vf_iov {
|
||||
* bar or via the doorbell bar.
|
||||
*/
|
||||
bool b_doorbell_bar;
|
||||
|
||||
/* retry count for VF acquire on channel timeout */
|
||||
u8 acquire_retry_cnt;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -72,6 +76,8 @@ struct ecore_vf_iov {
|
||||
enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
|
||||
u16 *p_coal,
|
||||
struct ecore_queue_cid *p_cid);
|
||||
|
||||
enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn);
|
||||
/**
|
||||
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
|
||||
* Coalesce value '0' will omit the configuration.
|
||||
@ -92,10 +98,13 @@ enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
|
||||
* sends ACQUIRE message
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_params
|
||||
*
|
||||
* @return enum _ecore_status_t
|
||||
*/
|
||||
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
|
||||
enum _ecore_status_t
|
||||
ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_hw_prepare_params *p_params);
|
||||
|
||||
/**
|
||||
* @brief VF - start the RX Queue by sending a message to the PF
|
||||
|
@ -11,6 +11,9 @@
|
||||
#include "ecore_mcp_api.h"
|
||||
|
||||
#ifdef CONFIG_ECORE_SRIOV
|
||||
|
||||
#define ECORE_VF_ACQUIRE_THRESH 3
|
||||
|
||||
/**
|
||||
* @brief Read the VF bulletin and act on it if needed
|
||||
*
|
||||
|
@ -251,6 +251,13 @@ struct vfpf_qid_tlv {
|
||||
u8 padding[3];
|
||||
};
|
||||
|
||||
/* Soft FLR req */
|
||||
struct vfpf_soft_flr_tlv {
|
||||
struct vfpf_first_tlv first_tlv;
|
||||
u32 reserved1;
|
||||
u32 reserved2;
|
||||
};
|
||||
|
||||
/* Setup Queue */
|
||||
struct vfpf_start_rxq_tlv {
|
||||
struct vfpf_first_tlv first_tlv;
|
||||
@ -557,6 +564,7 @@ union vfpf_tlvs {
|
||||
struct vfpf_read_coal_req_tlv read_coal_req;
|
||||
struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
|
||||
struct vfpf_update_mtu_tlv update_mtu;
|
||||
struct vfpf_soft_flr_tlv soft_flr;
|
||||
struct tlv_buffer_size tlv_buf_size;
|
||||
};
|
||||
|
||||
@ -689,6 +697,39 @@ enum {
|
||||
CHANNEL_TLV_COALESCE_READ,
|
||||
CHANNEL_TLV_BULLETIN_UPDATE_MAC,
|
||||
CHANNEL_TLV_UPDATE_MTU,
|
||||
CHANNEL_TLV_RDMA_ACQUIRE,
|
||||
CHANNEL_TLV_RDMA_START,
|
||||
CHANNEL_TLV_RDMA_STOP,
|
||||
CHANNEL_TLV_RDMA_ADD_USER,
|
||||
CHANNEL_TLV_RDMA_REMOVE_USER,
|
||||
CHANNEL_TLV_RDMA_QUERY_COUNTERS,
|
||||
CHANNEL_TLV_RDMA_ALLOC_TID,
|
||||
CHANNEL_TLV_RDMA_REGISTER_TID,
|
||||
CHANNEL_TLV_RDMA_DEREGISTER_TID,
|
||||
CHANNEL_TLV_RDMA_FREE_TID,
|
||||
CHANNEL_TLV_RDMA_CREATE_CQ,
|
||||
CHANNEL_TLV_RDMA_RESIZE_CQ,
|
||||
CHANNEL_TLV_RDMA_DESTROY_CQ,
|
||||
CHANNEL_TLV_RDMA_CREATE_QP,
|
||||
CHANNEL_TLV_RDMA_MODIFY_QP,
|
||||
CHANNEL_TLV_RDMA_QUERY_QP,
|
||||
CHANNEL_TLV_RDMA_DESTROY_QP,
|
||||
CHANNEL_TLV_RDMA_QUERY_PORT,
|
||||
CHANNEL_TLV_RDMA_QUERY_DEVICE,
|
||||
CHANNEL_TLV_RDMA_IWARP_CONNECT,
|
||||
CHANNEL_TLV_RDMA_IWARP_ACCEPT,
|
||||
CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN,
|
||||
CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN,
|
||||
CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN,
|
||||
CHANNEL_TLV_RDMA_IWARP_REJECT,
|
||||
CHANNEL_TLV_RDMA_IWARP_SEND_RTR,
|
||||
CHANNEL_TLV_ESTABLISH_LL2_CONN,
|
||||
CHANNEL_TLV_TERMINATE_LL2_CONN,
|
||||
CHANNEL_TLV_ASYNC_EVENT,
|
||||
CHANNEL_TLV_RDMA_CREATE_SRQ,
|
||||
CHANNEL_TLV_RDMA_MODIFY_SRQ,
|
||||
CHANNEL_TLV_RDMA_DESTROY_SRQ,
|
||||
CHANNEL_TLV_SOFT_FLR,
|
||||
CHANNEL_TLV_MAX,
|
||||
|
||||
/* Required for iterating over vport-update tlvs.
|
||||
|
@ -1290,6 +1290,7 @@ struct public_drv_mb {
|
||||
/*deprecated don't use*/
|
||||
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
|
||||
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
|
||||
#define DRV_MSG_CODE_INITIATE_VF_FLR 0x02020000
|
||||
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
|
||||
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
|
||||
#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
|
||||
@ -1749,6 +1750,7 @@ struct public_drv_mb {
|
||||
#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
|
||||
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
|
||||
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
|
||||
#define FW_MSG_CODE_INITIATE_VF_FLR_OK 0xb0030000
|
||||
#define FW_MSG_CODE_ERR_RESOURCE_TEMPORARY_UNAVAILABLE 0x008b0000
|
||||
#define FW_MSG_CODE_ERR_RESOURCE_ALREADY_ALLOCATED 0x008c0000
|
||||
#define FW_MSG_CODE_ERR_RESOURCE_NOT_ALLOCATED 0x008d0000
|
||||
|
@ -56,6 +56,10 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
|
||||
qed_init_pci(edev, pci_dev);
|
||||
|
||||
memset(&hw_prepare_params, 0, sizeof(hw_prepare_params));
|
||||
|
||||
if (is_vf)
|
||||
hw_prepare_params.acquire_retry_cnt = ECORE_VF_ACQUIRE_THRESH;
|
||||
|
||||
hw_prepare_params.personality = ECORE_PCI_ETH;
|
||||
hw_prepare_params.drv_resc_alloc = false;
|
||||
hw_prepare_params.chk_reg_fifo = false;
|
||||
|
Loading…
x
Reference in New Issue
Block a user