common/sfc_efx/base: move RxQ init/fini wrappers to generic
RxQ init/fini MCDI is similar on Riverhead and these functions should be reused to implement RxQ creation and destruction on Riverhead. Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com> Reviewed-by: Andy Moreton <amoreton@xilinx.com>
This commit is contained in:
parent
9edb8ee3e7
commit
09b59c7da7
@ -10,190 +10,6 @@
|
||||
|
||||
#if EFX_OPTS_EF10()
|
||||
|
||||
|
||||
static __checkReturn efx_rc_t
|
||||
efx_mcdi_init_rxq(
|
||||
__in efx_nic_t *enp,
|
||||
__in uint32_t ndescs,
|
||||
__in efx_evq_t *eep,
|
||||
__in uint32_t label,
|
||||
__in uint32_t instance,
|
||||
__in efsys_mem_t *esmp,
|
||||
__in boolean_t disable_scatter,
|
||||
__in boolean_t want_inner_classes,
|
||||
__in uint32_t buf_size,
|
||||
__in uint32_t ps_bufsize,
|
||||
__in uint32_t es_bufs_per_desc,
|
||||
__in uint32_t es_max_dma_len,
|
||||
__in uint32_t es_buf_stride,
|
||||
__in uint32_t hol_block_timeout)
|
||||
{
|
||||
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
|
||||
efx_mcdi_req_t req;
|
||||
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V4_IN_LEN,
|
||||
MC_CMD_INIT_RXQ_V4_OUT_LEN);
|
||||
int npages = efx_rxq_nbufs(enp, ndescs);
|
||||
int i;
|
||||
efx_qword_t *dma_addr;
|
||||
uint64_t addr;
|
||||
efx_rc_t rc;
|
||||
uint32_t dma_mode;
|
||||
boolean_t want_outer_classes;
|
||||
boolean_t no_cont_ev;
|
||||
|
||||
EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs);
|
||||
|
||||
if ((esmp == NULL) ||
|
||||
(EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) {
|
||||
rc = EINVAL;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV);
|
||||
if ((no_cont_ev == B_TRUE) && (disable_scatter == B_FALSE)) {
|
||||
/* TODO: Support scatter in NO_CONT_EV mode */
|
||||
rc = EINVAL;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
if (ps_bufsize > 0)
|
||||
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
|
||||
else if (es_bufs_per_desc > 0)
|
||||
dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
|
||||
else
|
||||
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
|
||||
|
||||
if (encp->enc_tunnel_encapsulations_supported != 0 &&
|
||||
!want_inner_classes) {
|
||||
/*
|
||||
* WANT_OUTER_CLASSES can only be specified on hardware which
|
||||
* supports tunnel encapsulation offloads, even though it is
|
||||
* effectively the behaviour the hardware gives.
|
||||
*
|
||||
* Also, on hardware which does support such offloads, older
|
||||
* firmware rejects the flag if the offloads are not supported
|
||||
* by the current firmware variant, which means this may fail if
|
||||
* the capabilities are not updated when the firmware variant
|
||||
* changes. This is not an issue on newer firmware, as it was
|
||||
* changed in bug 69842 (v6.4.2.1007) to permit this flag to be
|
||||
* specified on all firmware variants.
|
||||
*/
|
||||
want_outer_classes = B_TRUE;
|
||||
} else {
|
||||
want_outer_classes = B_FALSE;
|
||||
}
|
||||
|
||||
req.emr_cmd = MC_CMD_INIT_RXQ;
|
||||
req.emr_in_buf = payload;
|
||||
req.emr_in_length = MC_CMD_INIT_RXQ_V4_IN_LEN;
|
||||
req.emr_out_buf = payload;
|
||||
req.emr_out_length = MC_CMD_INIT_RXQ_V4_OUT_LEN;
|
||||
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
|
||||
MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS,
|
||||
INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
|
||||
INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
|
||||
INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
|
||||
INIT_RXQ_EXT_IN_CRC_MODE, 0,
|
||||
INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
|
||||
INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
|
||||
INIT_RXQ_EXT_IN_DMA_MODE,
|
||||
dma_mode,
|
||||
INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
|
||||
INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes,
|
||||
INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id);
|
||||
|
||||
if (es_bufs_per_desc > 0) {
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
|
||||
es_bufs_per_desc);
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
|
||||
hol_block_timeout);
|
||||
}
|
||||
|
||||
if (encp->enc_init_rxq_with_buffer_size)
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES,
|
||||
buf_size);
|
||||
|
||||
dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
|
||||
addr = EFSYS_MEM_ADDR(esmp);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
EFX_POPULATE_QWORD_2(*dma_addr,
|
||||
EFX_DWORD_1, (uint32_t)(addr >> 32),
|
||||
EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
|
||||
|
||||
dma_addr++;
|
||||
addr += EFX_BUF_SIZE;
|
||||
}
|
||||
|
||||
efx_mcdi_execute(enp, &req);
|
||||
|
||||
if (req.emr_rc != 0) {
|
||||
rc = req.emr_rc;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
||||
fail3:
|
||||
EFSYS_PROBE(fail3);
|
||||
fail2:
|
||||
EFSYS_PROBE(fail2);
|
||||
fail1:
|
||||
EFSYS_PROBE1(fail1, efx_rc_t, rc);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
static __checkReturn efx_rc_t
|
||||
efx_mcdi_fini_rxq(
|
||||
__in efx_nic_t *enp,
|
||||
__in uint32_t instance)
|
||||
{
|
||||
efx_mcdi_req_t req;
|
||||
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
|
||||
MC_CMD_FINI_RXQ_OUT_LEN);
|
||||
efx_rc_t rc;
|
||||
|
||||
req.emr_cmd = MC_CMD_FINI_RXQ;
|
||||
req.emr_in_buf = payload;
|
||||
req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
|
||||
req.emr_out_buf = payload;
|
||||
req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
|
||||
|
||||
MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
|
||||
|
||||
efx_mcdi_execute_quiet(enp, &req);
|
||||
|
||||
if (req.emr_rc != 0) {
|
||||
rc = req.emr_rc;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
||||
fail1:
|
||||
/*
|
||||
* EALREADY is not an error, but indicates that the MC has rebooted and
|
||||
* that the RXQ has already been destroyed.
|
||||
*/
|
||||
if (rc != EALREADY)
|
||||
EFSYS_PROBE1(fail1, efx_rc_t, rc);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
#if EFSYS_OPT_RX_SCALE
|
||||
static __checkReturn efx_rc_t
|
||||
efx_mcdi_rss_context_alloc(
|
||||
|
@ -1430,6 +1430,34 @@ efx_mcdi_fini_evq(
|
||||
|
||||
#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
|
||||
|
||||
#if EFX_OPTS_EF10()
|
||||
|
||||
LIBEFX_INTERNAL
|
||||
extern __checkReturn efx_rc_t
|
||||
efx_mcdi_init_rxq(
|
||||
__in efx_nic_t *enp,
|
||||
__in uint32_t ndescs,
|
||||
__in efx_evq_t *eep,
|
||||
__in uint32_t label,
|
||||
__in uint32_t instance,
|
||||
__in efsys_mem_t *esmp,
|
||||
__in boolean_t disable_scatter,
|
||||
__in boolean_t want_inner_classes,
|
||||
__in uint32_t buf_size,
|
||||
__in uint32_t ps_bufsize,
|
||||
__in uint32_t es_bufs_per_desc,
|
||||
__in uint32_t es_max_dma_len,
|
||||
__in uint32_t es_buf_stride,
|
||||
__in uint32_t hol_block_timeout);
|
||||
|
||||
LIBEFX_INTERNAL
|
||||
extern __checkReturn efx_rc_t
|
||||
efx_mcdi_fini_rxq(
|
||||
__in efx_nic_t *enp,
|
||||
__in uint32_t instance);
|
||||
|
||||
#endif /* EFX_OPTS_EF10() */
|
||||
|
||||
#endif /* EFSYS_OPT_MCDI */
|
||||
|
||||
#if EFSYS_OPT_MAC_STATS
|
||||
|
@ -2682,4 +2682,191 @@ fail1:
|
||||
|
||||
#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
|
||||
|
||||
#if EFX_OPTS_EF10()
|
||||
|
||||
__checkReturn efx_rc_t
|
||||
efx_mcdi_init_rxq(
|
||||
__in efx_nic_t *enp,
|
||||
__in uint32_t ndescs,
|
||||
__in efx_evq_t *eep,
|
||||
__in uint32_t label,
|
||||
__in uint32_t instance,
|
||||
__in efsys_mem_t *esmp,
|
||||
__in boolean_t disable_scatter,
|
||||
__in boolean_t want_inner_classes,
|
||||
__in uint32_t buf_size,
|
||||
__in uint32_t ps_bufsize,
|
||||
__in uint32_t es_bufs_per_desc,
|
||||
__in uint32_t es_max_dma_len,
|
||||
__in uint32_t es_buf_stride,
|
||||
__in uint32_t hol_block_timeout)
|
||||
{
|
||||
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
|
||||
efx_mcdi_req_t req;
|
||||
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V4_IN_LEN,
|
||||
MC_CMD_INIT_RXQ_V4_OUT_LEN);
|
||||
int npages = efx_rxq_nbufs(enp, ndescs);
|
||||
int i;
|
||||
efx_qword_t *dma_addr;
|
||||
uint64_t addr;
|
||||
efx_rc_t rc;
|
||||
uint32_t dma_mode;
|
||||
boolean_t want_outer_classes;
|
||||
boolean_t no_cont_ev;
|
||||
|
||||
EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs);
|
||||
|
||||
if ((esmp == NULL) ||
|
||||
(EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) {
|
||||
rc = EINVAL;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV);
|
||||
if ((no_cont_ev == B_TRUE) && (disable_scatter == B_FALSE)) {
|
||||
/* TODO: Support scatter in NO_CONT_EV mode */
|
||||
rc = EINVAL;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
if (ps_bufsize > 0)
|
||||
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
|
||||
else if (es_bufs_per_desc > 0)
|
||||
dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
|
||||
else
|
||||
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
|
||||
|
||||
if (encp->enc_tunnel_encapsulations_supported != 0 &&
|
||||
!want_inner_classes) {
|
||||
/*
|
||||
* WANT_OUTER_CLASSES can only be specified on hardware which
|
||||
* supports tunnel encapsulation offloads, even though it is
|
||||
* effectively the behaviour the hardware gives.
|
||||
*
|
||||
* Also, on hardware which does support such offloads, older
|
||||
* firmware rejects the flag if the offloads are not supported
|
||||
* by the current firmware variant, which means this may fail if
|
||||
* the capabilities are not updated when the firmware variant
|
||||
* changes. This is not an issue on newer firmware, as it was
|
||||
* changed in bug 69842 (v6.4.2.1007) to permit this flag to be
|
||||
* specified on all firmware variants.
|
||||
*/
|
||||
want_outer_classes = B_TRUE;
|
||||
} else {
|
||||
want_outer_classes = B_FALSE;
|
||||
}
|
||||
|
||||
req.emr_cmd = MC_CMD_INIT_RXQ;
|
||||
req.emr_in_buf = payload;
|
||||
req.emr_in_length = MC_CMD_INIT_RXQ_V4_IN_LEN;
|
||||
req.emr_out_buf = payload;
|
||||
req.emr_out_length = MC_CMD_INIT_RXQ_V4_OUT_LEN;
|
||||
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
|
||||
MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS,
|
||||
INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
|
||||
INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
|
||||
INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
|
||||
INIT_RXQ_EXT_IN_CRC_MODE, 0,
|
||||
INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
|
||||
INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
|
||||
INIT_RXQ_EXT_IN_DMA_MODE,
|
||||
dma_mode,
|
||||
INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
|
||||
INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes,
|
||||
INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id);
|
||||
|
||||
if (es_bufs_per_desc > 0) {
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
|
||||
es_bufs_per_desc);
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
|
||||
MCDI_IN_SET_DWORD(req,
|
||||
INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
|
||||
hol_block_timeout);
|
||||
}
|
||||
|
||||
if (encp->enc_init_rxq_with_buffer_size)
|
||||
MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES,
|
||||
buf_size);
|
||||
|
||||
dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
|
||||
addr = EFSYS_MEM_ADDR(esmp);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
EFX_POPULATE_QWORD_2(*dma_addr,
|
||||
EFX_DWORD_1, (uint32_t)(addr >> 32),
|
||||
EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
|
||||
|
||||
dma_addr++;
|
||||
addr += EFX_BUF_SIZE;
|
||||
}
|
||||
|
||||
efx_mcdi_execute(enp, &req);
|
||||
|
||||
if (req.emr_rc != 0) {
|
||||
rc = req.emr_rc;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
||||
fail3:
|
||||
EFSYS_PROBE(fail3);
|
||||
fail2:
|
||||
EFSYS_PROBE(fail2);
|
||||
fail1:
|
||||
EFSYS_PROBE1(fail1, efx_rc_t, rc);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
__checkReturn efx_rc_t
|
||||
efx_mcdi_fini_rxq(
|
||||
__in efx_nic_t *enp,
|
||||
__in uint32_t instance)
|
||||
{
|
||||
efx_mcdi_req_t req;
|
||||
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
|
||||
MC_CMD_FINI_RXQ_OUT_LEN);
|
||||
efx_rc_t rc;
|
||||
|
||||
req.emr_cmd = MC_CMD_FINI_RXQ;
|
||||
req.emr_in_buf = payload;
|
||||
req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
|
||||
req.emr_out_buf = payload;
|
||||
req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
|
||||
|
||||
MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
|
||||
|
||||
efx_mcdi_execute_quiet(enp, &req);
|
||||
|
||||
if (req.emr_rc != 0) {
|
||||
rc = req.emr_rc;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
||||
fail1:
|
||||
/*
|
||||
* EALREADY is not an error, but indicates that the MC has rebooted and
|
||||
* that the RXQ has already been destroyed.
|
||||
*/
|
||||
if (rc != EALREADY)
|
||||
EFSYS_PROBE1(fail1, efx_rc_t, rc);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
|
||||
#endif /* EFX_OPTS_EF10() */
|
||||
|
||||
#endif /* EFSYS_OPT_MCDI */
|
||||
|
Loading…
x
Reference in New Issue
Block a user