net/qede/base: avoid possible race condition
There's a possible race in multiple VF scenarios for base driver users that use the optional APIs ecore_iov_pf_get_and_clear_pending_events, ecore_iov_pf_add_pending_events. If the client doesn't synchronize the two calls, it's possible for the PF to clear a VF pending message indication without ever getting it [as 'get & clear' isn't atomic], leading to VF timeout on the command. The solution is to switch into a per-VF indication rather than having a bitfield for the various VFs with pending events. As part of the solution, the setting/clearing of the indications is done internally by base driver. As a result, ecore_iov_pf_add_pending_events is no longer needed and ecore_iov_pf_get_and_clear_pending_events loses the 'and_clear' from its name as its now a proper getter. A VF would be considered 'pending' [I.e., get_pending_events() should have '1' for it in its bitfield] beginning with the PF's base driver recognizing a message sent by that VF [in SP_DPC] and ending only when that VF message is processed. Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
parent
e4782d3089
commit
4be18885d1
@ -345,21 +345,13 @@ ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
|
||||
u16 vfid, bool b_enabled_only);
|
||||
|
||||
/**
|
||||
* @brief Set pending events bitmap for given @vfid
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param vfid
|
||||
*/
|
||||
void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
|
||||
|
||||
/**
|
||||
* @brief Copy pending events bitmap in @events and clear
|
||||
* original copy of events
|
||||
* @brief fills a bitmask of all VFs which have pending unhandled
|
||||
* messages.
|
||||
*
|
||||
* @param p_hwfn
|
||||
*/
|
||||
void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
|
||||
u64 *events);
|
||||
void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
|
||||
u64 *events);
|
||||
|
||||
/**
|
||||
* @brief Copy VF's message to PF's buffer
|
||||
|
@ -3755,8 +3755,7 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
|
||||
ack_vfs[vfid / 32] |= (1 << (vfid % 32));
|
||||
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
|
||||
~(1ULL << (rel_vf_id % 64));
|
||||
p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
|
||||
~(1ULL << (rel_vf_id % 64));
|
||||
p_vf->vf_mbx.b_pending_msg = false;
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -3886,12 +3885,22 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
|
||||
mbx = &p_vf->vf_mbx;
|
||||
|
||||
/* ecore_iov_process_mbx_request */
|
||||
DP_VERBOSE(p_hwfn,
|
||||
ECORE_MSG_IOV,
|
||||
"VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
|
||||
#ifndef CONFIG_ECORE_SW_CHANNEL
|
||||
if (!mbx->b_pending_msg) {
|
||||
DP_NOTICE(p_hwfn, true,
|
||||
"VF[%02x]: Trying to process mailbox message when none is pending\n",
|
||||
p_vf->abs_vf_id);
|
||||
return;
|
||||
}
|
||||
mbx->b_pending_msg = false;
|
||||
#endif
|
||||
|
||||
mbx->first_tlv = mbx->req_virt->first_tlv;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"VF[%02x]: Processing mailbox message [type %04x]\n",
|
||||
p_vf->abs_vf_id, mbx->first_tlv.tl.type);
|
||||
|
||||
OSAL_IOV_VF_MSG_TYPE(p_hwfn,
|
||||
p_vf->relative_vf_id,
|
||||
mbx->first_tlv.tl.type);
|
||||
@ -4016,26 +4025,20 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
|
||||
#endif
|
||||
}
|
||||
|
||||
void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
|
||||
void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
|
||||
u64 *events)
|
||||
{
|
||||
u64 add_bit = 1ULL << (vfid % 64);
|
||||
int i;
|
||||
|
||||
/* TODO - add locking mechanisms [no atomics in ecore, so we can't
|
||||
* add the lock inside the ecore_pf_iov struct].
|
||||
*/
|
||||
p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
|
||||
}
|
||||
OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
|
||||
|
||||
void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
|
||||
u64 *events)
|
||||
{
|
||||
u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
|
||||
ecore_for_each_vf(p_hwfn, i) {
|
||||
struct ecore_vf_info *p_vf;
|
||||
|
||||
/* TODO - Take a lock */
|
||||
OSAL_MEMCPY(events, p_pending_events,
|
||||
sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
|
||||
OSAL_MEMSET(p_pending_events, 0,
|
||||
sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
|
||||
p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
|
||||
if (p_vf->vf_mbx.b_pending_msg)
|
||||
events[i / 64] |= 1ULL << (i % 64);
|
||||
}
|
||||
}
|
||||
|
||||
static struct ecore_vf_info *
|
||||
@ -4069,6 +4072,8 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
|
||||
*/
|
||||
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
|
||||
|
||||
p_vf->vf_mbx.b_pending_msg = true;
|
||||
|
||||
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
|
||||
}
|
||||
|
||||
|
@ -45,6 +45,9 @@ struct ecore_iov_vf_mbx {
|
||||
/* Address in VF where a pending message is located */
|
||||
dma_addr_t pending_req;
|
||||
|
||||
/* Message from VF awaits handling */
|
||||
bool b_pending_msg;
|
||||
|
||||
u8 *offset;
|
||||
|
||||
#ifdef CONFIG_ECORE_SW_CHANNEL
|
||||
@ -168,7 +171,6 @@ struct ecore_vf_info {
|
||||
*/
|
||||
struct ecore_pf_iov {
|
||||
struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
|
||||
u64 pending_events[ECORE_VF_ARRAY_LENGTH];
|
||||
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
|
||||
|
||||
#ifndef REMOVE_DBG
|
||||
|
Loading…
Reference in New Issue
Block a user