common/sfc_efx/base: add base virtio support for vDPA

In the vDPA mode, only data path is offloaded in the hardware and
control path still goes through the hypervisor and it configures
virtqueues via vDPA driver so new virtqueue APIs are required.

Implement virtio init/fini and virtqueue create/destroy APIs.

Signed-off-by: Vijay Srivastava <vijays@solarflare.com>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
This commit is contained in:
Vijay Srivastava 2021-03-16 11:58:25 +03:00 committed by Ferruh Yigit
parent 2382a60759
commit 4dda72dbde
8 changed files with 578 additions and 0 deletions

View File

@ -4368,6 +4368,115 @@ efx_mae_action_rule_remove(
#endif /* EFSYS_OPT_MAE */
#if EFSYS_OPT_VIRTIO
/* A Virtio net device can have one or more pairs of Rx/Tx virtqueues
* while virtio block device has a single virtqueue,
* for further details refer section of 4.2.3 of SF-120734
*/
typedef enum efx_virtio_vq_type_e {
EFX_VIRTIO_VQ_TYPE_NET_RXQ,
EFX_VIRTIO_VQ_TYPE_NET_TXQ,
EFX_VIRTIO_VQ_TYPE_BLOCK,
EFX_VIRTIO_VQ_NTYPES
} efx_virtio_vq_type_t;
typedef struct efx_virtio_vq_dyncfg_s {
/*
* If queue is being created to be migrated then this
* should be the FINAL_PIDX value returned by MC_CMD_VIRTIO_FINI_QUEUE
* of the queue being migrated from. Otherwise, it should be zero.
*/
uint32_t evvd_vq_pidx;
/*
* If this queue is being created to be migrated then this
* should be the FINAL_CIDX value returned by MC_CMD_VIRTIO_FINI_QUEUE
* of the queue being migrated from. Otherwise, it should be zero.
*/
uint32_t evvd_vq_cidx;
} efx_virtio_vq_dyncfg_t;
/*
* Virtqueue size must be a power of 2, maximum size is 32768
* (see VIRTIO v1.1 section 2.6)
*/
#define EFX_VIRTIO_MAX_VQ_SIZE 0x8000
typedef struct efx_virtio_vq_cfg_s {
unsigned int evvc_vq_num;
efx_virtio_vq_type_t evvc_type;
/*
* vDPA as VF : It is target VF number if queue is being created on VF.
* vDPA as PF : If queue to be created on PF then it should be
* EFX_PCI_VF_INVALID.
*/
uint16_t evvc_target_vf;
/*
* Maximum virtqueue size is EFX_VIRTIO_MAX_VQ_SIZE and
* virtqueue size 0 means the queue is unavailable.
*/
uint32_t evvc_vq_size;
efsys_dma_addr_t evvc_desc_tbl_addr;
efsys_dma_addr_t evvc_avail_ring_addr;
efsys_dma_addr_t evvc_used_ring_addr;
/* MSIX vector number for the virtqueue or 0xFFFF if MSIX is not used */
uint16_t evvc_msix_vector;
/*
* evvc_pas_id contains a PCIe address space identifier if the queue
* uses PASID.
*/
boolean_t evvc_use_pasid;
uint32_t evvc_pas_id;
/* Negotiated virtio features to be applied to this virtqueue */
uint64_t evcc_features;
} efx_virtio_vq_cfg_t;
typedef struct efx_virtio_vq_s efx_virtio_vq_t;
LIBEFX_API
extern __checkReturn efx_rc_t
efx_virtio_init(
__in efx_nic_t *enp);
LIBEFX_API
extern void
efx_virtio_fini(
__in efx_nic_t *enp);
/*
* When virtio net driver in the guest sets VIRTIO_CONFIG_STATUS_DRIVER_OK bit,
* hypervisor starts configuring all the virtqueues in the device. When the
* vhost_user has received VHOST_USER_SET_VRING_ENABLE for all the virtqueues,
* then it invokes VDPA driver callback dev_conf. APIs qstart and qcreate would
* be invoked from dev_conf callback to create the virtqueues, For further
* details refer SF-122427.
*/
LIBEFX_API
extern __checkReturn efx_rc_t
efx_virtio_qcreate(
__in efx_nic_t *enp,
__deref_out efx_virtio_vq_t **evvpp);
LIBEFX_API
extern __checkReturn efx_rc_t
efx_virtio_qstart(
__in efx_virtio_vq_t *evvp,
__in efx_virtio_vq_cfg_t *evvcp,
__in_opt efx_virtio_vq_dyncfg_t *evvdp);
LIBEFX_API
extern __checkReturn efx_rc_t
efx_virtio_qstop(
__in efx_virtio_vq_t *evvp,
__out_opt efx_virtio_vq_dyncfg_t *evvdp);
LIBEFX_API
extern void
efx_virtio_qdestroy(
__in efx_virtio_vq_t *evvp);
#endif /* EFSYS_OPT_VIRTIO */
#ifdef __cplusplus
}
#endif

View File

@ -407,4 +407,10 @@
# endif
#endif /* EFSYS_OPT_MAE */
#if EFSYS_OPT_VIRTIO
# if !EFSYS_OPT_RIVERHEAD
# error "VIRTIO requires RIVERHEAD"
# endif
#endif /* EFSYS_OPT_VIRTIO */
#endif /* _SYS_EFX_CHECK_H */

View File

@ -65,6 +65,7 @@ extern "C" {
#define EFX_MOD_TUNNEL 0x00004000
#define EFX_MOD_EVB 0x00008000
#define EFX_MOD_PROXY 0x00010000
#define EFX_MOD_VIRTIO 0x00020000
#define EFX_RESET_PHY 0x00000001
#define EFX_RESET_RXQ_ERR 0x00000002
@ -308,6 +309,16 @@ typedef struct efx_tunnel_ops_s {
} efx_tunnel_ops_t;
#endif /* EFSYS_OPT_TUNNEL */
#if EFSYS_OPT_VIRTIO
typedef struct efx_virtio_ops_s {
efx_rc_t (*evo_virtio_qstart)(efx_virtio_vq_t *,
efx_virtio_vq_cfg_t *,
efx_virtio_vq_dyncfg_t *);
efx_rc_t (*evo_virtio_qstop)(efx_virtio_vq_t *,
efx_virtio_vq_dyncfg_t *);
} efx_virtio_ops_t;
#endif /* EFSYS_OPT_VIRTIO */
typedef struct efx_port_s {
efx_mac_type_t ep_mac_type;
uint32_t ep_phy_type;
@ -858,6 +869,9 @@ struct efx_nic_s {
#if EFSYS_OPT_VPD
const efx_vpd_ops_t *en_evpdop;
#endif /* EFSYS_OPT_VPD */
#if EFSYS_OPT_VIRTIO
const efx_virtio_ops_t *en_evop;
#endif /* EFSYS_OPT_VPD */
#if EFSYS_OPT_RX_SCALE
efx_rx_hash_support_t en_hash_support;
efx_rx_scale_context_type_t en_rss_context_type;
@ -1750,6 +1764,28 @@ struct efx_mae_actions_s {
#endif /* EFSYS_OPT_MAE */
#if EFSYS_OPT_VIRTIO
#define EFX_VQ_MAGIC 0x026011950
typedef enum efx_virtio_vq_state_e {
EFX_VIRTIO_VQ_STATE_UNKNOWN = 0,
EFX_VIRTIO_VQ_STATE_INITIALIZED,
EFX_VIRTIO_VQ_STATE_STARTED,
EFX_VIRTIO_VQ_NSTATES
} efx_virtio_vq_state_t;
struct efx_virtio_vq_s {
uint32_t evv_magic;
efx_nic_t *evv_enp;
efx_virtio_vq_state_t evv_state;
uint32_t evv_vi_index;
efx_virtio_vq_type_t evv_type;
uint16_t evv_target_vf;
};
#endif /* EFSYS_OPT_VIRTIO */
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,216 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright(c) 2020-2021 Xilinx, Inc.
*/
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_VIRTIO
#if EFSYS_OPT_RIVERHEAD
static const efx_virtio_ops_t __efx_virtio_rhead_ops = {
rhead_virtio_qstart, /* evo_virtio_qstart */
rhead_virtio_qstop, /* evo_virtio_qstop */
};
#endif /* EFSYS_OPT_RIVERHEAD */
__checkReturn efx_rc_t
efx_virtio_init(
__in efx_nic_t *enp)
{
const efx_virtio_ops_t *evop;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VIRTIO));
switch (enp->en_family) {
#if EFSYS_OPT_RIVERHEAD
case EFX_FAMILY_RIVERHEAD:
evop = &__efx_virtio_rhead_ops;
break;
#endif /* EFSYS_OPT_RIVERHEAD */
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
goto fail1;
}
enp->en_evop = evop;
enp->en_mod_flags |= EFX_MOD_VIRTIO;
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
enp->en_evop = NULL;
enp->en_mod_flags &= ~EFX_MOD_VIRTIO;
return (rc);
}
void
efx_virtio_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VIRTIO);
enp->en_evop = NULL;
enp->en_mod_flags &= ~EFX_MOD_VIRTIO;
}
__checkReturn efx_rc_t
efx_virtio_qcreate(
__in efx_nic_t *enp,
__deref_out efx_virtio_vq_t **evvpp)
{
const efx_virtio_ops_t *evop = enp->en_evop;
efx_virtio_vq_t *evvp;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VIRTIO);
/* Allocate a virtqueue object */
EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_virtio_vq_t), evvp);
if (evvp == NULL) {
rc = ENOMEM;
goto fail1;
}
evvp->evv_magic = EFX_VQ_MAGIC;
evvp->evv_enp = enp;
evvp->evv_state = EFX_VIRTIO_VQ_STATE_INITIALIZED;
*evvpp = evvp;
return (0);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
efx_virtio_qstart(
__in efx_virtio_vq_t *evvp,
__in efx_virtio_vq_cfg_t *evvcp,
__in_opt efx_virtio_vq_dyncfg_t *evvdp)
{
const efx_virtio_ops_t *evop;
efx_rc_t rc;
if ((evvcp == NULL) || (evvp == NULL)) {
rc = EINVAL;
goto fail1;
}
if (evvp->evv_state != EFX_VIRTIO_VQ_STATE_INITIALIZED) {
rc = EINVAL;
goto fail2;
}
evop = evvp->evv_enp->en_evop;
if (evop == NULL) {
rc = ENOTSUP;
goto fail3;
}
if ((rc = evop->evo_virtio_qstart(evvp, evvcp, evvdp)) != 0)
goto fail4;
evvp->evv_type = evvcp->evvc_type;
evvp->evv_target_vf = evvcp->evvc_target_vf;
evvp->evv_state = EFX_VIRTIO_VQ_STATE_STARTED;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
efx_virtio_qstop(
__in efx_virtio_vq_t *evvp,
__out_opt efx_virtio_vq_dyncfg_t *evvdp)
{
efx_nic_t *enp;
const efx_virtio_ops_t *evop;
efx_rc_t rc;
if (evvp == NULL) {
rc = EINVAL;
goto fail1;
}
enp = evvp->evv_enp;
evop = enp->en_evop;
EFSYS_ASSERT3U(evvp->evv_magic, ==, EFX_VQ_MAGIC);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VIRTIO);
if (evop == NULL) {
rc = ENOTSUP;
goto fail2;
}
if (evvp->evv_state != EFX_VIRTIO_VQ_STATE_STARTED) {
rc = EINVAL;
goto fail3;
}
if ((rc = evop->evo_virtio_qstop(evvp, evvdp)) != 0)
goto fail4;
evvp->evv_state = EFX_VIRTIO_VQ_STATE_INITIALIZED;
return 0;
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
void
efx_virtio_qdestroy(
__in efx_virtio_vq_t *evvp)
{
efx_nic_t *enp;
if (evvp == NULL)
return;
enp = evvp->evv_enp;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
if (evvp->evv_state == EFX_VIRTIO_VQ_STATE_INITIALIZED) {
/* Free the virtqueue object */
EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_virtio_vq_t), evvp);
}
}
#endif /* EFSYS_OPT_VIRTIO */

View File

@ -29,6 +29,7 @@ sources = [
'efx_tunnel.c',
'efx_tx.c',
'efx_vpd.c',
'efx_virtio.c',
'mcdi_mon.c',
'siena_mac.c',
'siena_mcdi.c',
@ -61,6 +62,7 @@ sources = [
'rhead_rx.c',
'rhead_tunnel.c',
'rhead_tx.c',
'rhead_virtio.c',
]
extra_flags = [

View File

@ -477,6 +477,23 @@ rhead_nic_xilinx_cap_tbl_read_ef100_locator(
__in efsys_dma_addr_t offset,
__out efx_bar_region_t *ebrp);
#if EFSYS_OPT_VIRTIO
LIBEFX_INTERNAL
extern __checkReturn efx_rc_t
rhead_virtio_qstart(
__in efx_virtio_vq_t *evvp,
__in efx_virtio_vq_cfg_t *evvcp,
__in_opt efx_virtio_vq_dyncfg_t *evvdp);
LIBEFX_INTERNAL
extern __checkReturn efx_rc_t
rhead_virtio_qstop(
__in efx_virtio_vq_t *evvp,
__out_opt efx_virtio_vq_dyncfg_t *evvdp);
#endif /* EFSYS_OPT_VIRTIO */
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,190 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright(c) 2020-2021 Xilinx, Inc.
*/
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_RIVERHEAD && EFSYS_OPT_VIRTIO
/*
* Get function-local index of the associated VI from the
* virtqueue number queue 0 is reserved for MCDI
*/
#define EFX_VIRTIO_GET_VI_INDEX(vq_num) (((vq_num) / 2) + 1)
__checkReturn efx_rc_t
rhead_virtio_qstart(
__in efx_virtio_vq_t *evvp,
__in efx_virtio_vq_cfg_t *evvcp,
__in_opt efx_virtio_vq_dyncfg_t *evvdp)
{
efx_nic_t *enp = evvp->evv_enp;
efx_mcdi_req_t req;
uint32_t vi_index;
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN,
MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN);
efx_rc_t rc;
EFX_STATIC_ASSERT(EFX_VIRTIO_VQ_TYPE_NET_RXQ ==
MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ);
EFX_STATIC_ASSERT(EFX_VIRTIO_VQ_TYPE_NET_TXQ ==
MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ);
EFX_STATIC_ASSERT(EFX_VIRTIO_VQ_TYPE_BLOCK ==
MC_CMD_VIRTIO_INIT_QUEUE_REQ_BLOCK);
if (evvcp->evvc_type >= EFX_VIRTIO_VQ_NTYPES) {
rc = EINVAL;
goto fail1;
}
/* virtqueue size must be power of 2 */
if ((!ISP2(evvcp->evvc_vq_size)) ||
(evvcp->evvc_vq_size > EFX_VIRTIO_MAX_VQ_SIZE)) {
rc = EINVAL;
goto fail2;
}
if (evvdp != NULL) {
if ((evvdp->evvd_vq_cidx > evvcp->evvc_vq_size) ||
(evvdp->evvd_vq_pidx > evvcp->evvc_vq_size)) {
rc = EINVAL;
goto fail3;
}
}
req.emr_cmd = MC_CMD_VIRTIO_INIT_QUEUE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN;
MCDI_IN_SET_BYTE(req, VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE,
evvcp->evvc_type);
MCDI_IN_SET_WORD(req, VIRTIO_INIT_QUEUE_REQ_TARGET_VF,
evvcp->evvc_target_vf);
vi_index = EFX_VIRTIO_GET_VI_INDEX(evvcp->evvc_vq_num);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_INSTANCE, vi_index);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_SIZE,
evvcp->evvc_vq_size);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO,
evvcp->evvc_desc_tbl_addr & 0xFFFFFFFF);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI,
evvcp->evvc_desc_tbl_addr >> 32);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO,
evvcp->evvc_avail_ring_addr & 0xFFFFFFFF);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI,
evvcp->evvc_avail_ring_addr >> 32);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO,
evvcp->evvc_used_ring_addr & 0xFFFFFFFF);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI,
evvcp->evvc_used_ring_addr >> 32);
if (evvcp->evvc_use_pasid) {
MCDI_IN_POPULATE_DWORD_1(req, VIRTIO_INIT_QUEUE_REQ_FLAGS,
VIRTIO_INIT_QUEUE_REQ_USE_PASID, 1);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_PASID,
evvcp->evvc_pas_id);
}
MCDI_IN_SET_WORD(req, VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR,
evvcp->evvc_msix_vector);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_FEATURES_LO,
evvcp->evcc_features & 0xFFFFFFFF);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_FEATURES_HI,
evvcp->evcc_features >> 32);
if (evvdp != NULL) {
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX,
evvdp->evvd_vq_pidx);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX,
evvdp->evvd_vq_cidx);
}
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR,
MAE_MPORT_SELECTOR_ASSIGNED);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail4;
}
evvp->evv_vi_index = vi_index;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
__checkReturn efx_rc_t
rhead_virtio_qstop(
__in efx_virtio_vq_t *evvp,
__out_opt efx_virtio_vq_dyncfg_t *evvdp)
{
efx_mcdi_req_t req;
efx_nic_t *enp = evvp->evv_enp;
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN,
MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN);
efx_rc_t rc;
req.emr_cmd = MC_CMD_VIRTIO_FINI_QUEUE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN;
MCDI_IN_SET_BYTE(req, VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE, evvp->evv_type);
MCDI_IN_SET_WORD(req, VIRTIO_INIT_QUEUE_REQ_TARGET_VF,
evvp->evv_target_vf);
MCDI_IN_SET_DWORD(req, VIRTIO_INIT_QUEUE_REQ_INSTANCE,
evvp->evv_vi_index);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN) {
rc = EMSGSIZE;
goto fail2;
}
if (evvdp != NULL) {
evvdp->evvd_vq_pidx =
MCDI_OUT_DWORD(req, VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX);
evvdp->evvd_vq_cidx =
MCDI_OUT_DWORD(req, VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX);
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
}
#endif /* EFSYS_OPT_RIVERHEAD && EFSYS_OPT_VIRTIO */

View File

@ -187,6 +187,8 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_MAE 1
#define EFSYS_OPT_VIRTIO 0
/* ID */
typedef struct __efsys_identifier_s efsys_identifier_t;