net/qede/base: read per queue coalescing from HW

Add base driver API to read per queue coalescing from hardware.
Move ecore_set_rxq|txq_coalesce() declarations to ecore_l2.h.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
Rasesh Mody 2017-09-18 18:51:20 -07:00 committed by Ferruh Yigit
parent 3b212853e4
commit 823a84aa6b
7 changed files with 295 additions and 22 deletions

View File

@ -635,6 +635,20 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
/**
* @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
*
* @param p_hwfn
* @param p_coal - store coalesce value read from the hardware.
* @param p_handle
*
* @return enum _ecore_status_t
**/
enum _ecore_status_t
ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
void *handle);
/**
* @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on

View File

@ -196,6 +196,7 @@ static struct ecore_queue_cid *
_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
u16 opaque_fid, u32 cid,
struct ecore_queue_start_common_params *p_params,
bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@ -214,6 +215,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
p_cid->rel.queue_id = p_params->queue_id;
p_cid->rel.stats_id = p_params->stats_id;
p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
p_cid->b_is_rx = b_is_rx;
p_cid->sb_idx = p_params->sb_idx;
/* Fill-in bits related to VFs' queues if information was provided */
@ -287,6 +289,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@ -321,7 +324,7 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
}
p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
p_params, p_vf_params);
p_params, b_is_rx, p_vf_params);
if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
_ecore_cxt_release_cid(p_hwfn, cid, vfid);
@ -330,9 +333,11 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
static struct ecore_queue_cid *
ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
bool b_is_rx,
struct ecore_queue_start_common_params *p_params)
{
return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
OSAL_NULL);
}
enum _ecore_status_t
@ -984,7 +989,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
/* Allocate a CID for the queue */
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (p_cid == OSAL_NULL)
return ECORE_NOMEM;
@ -1200,7 +1205,7 @@ ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc;
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (p_cid == OSAL_NULL)
return ECORE_INVAL;
@ -2137,3 +2142,108 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_rx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
enum _ecore_status_t rc;
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(osal_uintptr_t)&sb_entry, 2, 0);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = ecore_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return ECORE_INVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_rx_coal = (u16)(coalesce << timer_res);
return ECORE_SUCCESS;
}
int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_tx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
u8 timer_res;
enum _ecore_status_t rc;
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
(u64)(osal_uintptr_t)&sb_entry, 2, 0);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
}
timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
coalesce = ecore_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
if (!is_valid)
return ECORE_INVAL;
coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
*p_tx_coal = (u16)(coalesce << timer_res);
return ECORE_SUCCESS;
}
enum _ecore_status_t
ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
void *handle)
{
struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_ptt *p_ptt;
if (IS_VF(p_hwfn->p_dev)) {
rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Unable to read queue calescing\n");
return rc;
}
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_AGAIN;
if (p_cid->b_is_rx) {
rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc != ECORE_SUCCESS)
goto out;
} else {
rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
if (rc != ECORE_SUCCESS)
goto out;
}
out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}

View File

@ -64,6 +64,8 @@ struct ecore_queue_cid {
u32 cid;
u16 opaque_fid;
bool b_is_rx;
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
@ -96,6 +98,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
@ -140,4 +143,25 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_hw_coal);
enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_queue_cid *p_cid,
u16 *p_hw_coal);
#endif

View File

@ -54,6 +54,7 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_UPDATE_TUNN_PARAM",
"CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_QID",
"CHANNEL_TLV_COALESCE_READ",
"CHANNEL_TLV_MAX"
};
@ -1392,6 +1393,8 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
}
static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
@ -1476,8 +1479,6 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
}
struct ecore_public_vf_info
@ -2258,7 +2259,7 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
&params, &vf_params);
&params, true, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
@ -2532,7 +2533,7 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
&params, &vf_params);
&params, false, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
@ -3452,6 +3453,76 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
length, status);
}
static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_read_coal_resp_tlv *p_resp;
struct vfpf_read_coal_req_tlv *req;
u8 status = PFVF_STATUS_FAILURE;
struct ecore_vf_queue *p_queue;
struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc = ECORE_SUCCESS;
u16 coal = 0, qid, i;
bool b_is_rx;
mbx->offset = (u8 *)mbx->reply_virt;
req = &mbx->req_virt->read_coal_req;
qid = req->qid;
b_is_rx = req->is_rx ? true : false;
if (b_is_rx) {
if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
ECORE_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d]: Invalid Rx queue_id = %d\n",
p_vf->abs_vf_id, qid);
goto send_resp;
}
p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
if (rc != ECORE_SUCCESS)
goto send_resp;
} else {
if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
ECORE_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d]: Invalid Tx queue_id = %d\n",
p_vf->abs_vf_id, qid);
goto send_resp;
}
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
p_queue = &p_vf->vf_queues[qid];
if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
(!p_queue->cids[i].b_is_tx))
continue;
p_cid = p_queue->cids[i].p_cid;
rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
p_cid, &coal);
if (rc != ECORE_SUCCESS)
goto send_resp;
break;
}
}
status = PFVF_STATUS_SUCCESS;
send_resp:
p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
sizeof(*p_resp));
p_resp->coal = coal;
ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
@ -3986,6 +4057,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_UPDATE:
ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
case CHANNEL_TLV_COALESCE_READ:
ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious

View File

@ -1454,6 +1454,39 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
return rc;
}
enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
u16 *p_coal,
struct ecore_queue_cid *p_cid)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_read_coal_resp_tlv *resp;
struct vfpf_read_coal_req_tlv *req;
enum _ecore_status_t rc;
/* clear mailbox and prep header tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ,
sizeof(*req));
req->qid = p_cid->rel.queue_id;
req->is_rx = p_cid->b_is_rx ? 1 : 0;
ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->read_coal_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc != ECORE_SUCCESS)
goto exit;
if (resp->hdr.status != PFVF_STATUS_SUCCESS)
goto exit;
*p_coal = resp->coal;
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
enum _ecore_status_t
ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
struct ecore_queue_cid *p_cid)

View File

@ -51,23 +51,25 @@ struct ecore_vf_iov {
struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
};
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
/**
* @brief VF - Get coalesce per VF's relative queue.
*
* @param p_hwfn
* @param p_coal - coalesce value in micro second for VF queues.
* @param p_cid - queue cid
*
**/
enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
u16 *p_coal,
struct ecore_queue_cid *p_cid);
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
* Coalesce value '0' will omit the configuration.
* Coalesce value '0' will omit the configuration.
*
* @param p_hwfn
* @param rx_coal - coalesce value in micro second for rx queue
* @param tx_coal - coalesce value in micro second for tx queue
* @param queue_cid
* @param p_hwfn
* @param rx_coal - coalesce value in micro second for rx queue
* @param tx_coal - coalesce value in micro second for tx queue
* @param p_cid - queue cid
*
**/
enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,

View File

@ -503,6 +503,19 @@ struct vfpf_update_coalesce {
u8 padding[2];
};
struct vfpf_read_coal_req_tlv {
struct vfpf_first_tlv first_tlv;
u16 qid;
u8 is_rx;
u8 padding[5];
};
struct pfvf_read_coal_resp_tlv {
struct pfvf_tlv hdr;
u16 coal;
u8 padding[6];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@ -516,6 +529,7 @@ union vfpf_tlvs {
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size;
};
@ -525,6 +539,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
struct pfvf_read_coal_resp_tlv read_coal_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@ -644,6 +659,7 @@ enum {
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.