net/sfc: advertise offload capabilities by Rx datapaths

Rx datapath feature bits were useful on migration from the old offload API
to the new one. However, right now it just adds indirection which
complicates code reading and understanding. Also addition of a new
offloads requires addition of a new feature bits and makes patches longer
and harder to understand. So, remove feature bits which correspond to Rx
offloads and simply advertise device and per-queue offloads directly.
Generic code could still mask some offloads if running HW or FW does not
support it.

Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
This commit is contained in:
Andrew Rybchenko 2019-06-01 09:42:43 +01:00 committed by Ferruh Yigit
parent cd7bb0d442
commit f08d113d55
4 changed files with 46 additions and 34 deletions

View File

@ -199,12 +199,19 @@ struct sfc_dp_rx {
struct sfc_dp dp;
unsigned int features;
#define SFC_DP_RX_FEAT_SCATTER 0x1
#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
#define SFC_DP_RX_FEAT_TUNNELS 0x4
#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
#define SFC_DP_RX_FEAT_CHECKSUM 0x20
#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1
#define SFC_DP_RX_FEAT_FLOW_FLAG 0x2
#define SFC_DP_RX_FEAT_FLOW_MARK 0x4
/**
* Rx offload capabilities supported by the datapath on device
* level only if HW/FW supports it.
*/
uint64_t dev_offload_capa;
/**
* Rx offload capabilities supported by the datapath per-queue
* if HW/FW supports it.
*/
uint64_t queue_offload_capa;
sfc_dp_rx_get_dev_info_t *get_dev_info;
sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
@ -237,6 +244,12 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
}
static inline uint64_t
sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx)
{
return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa;
}
/** Get Rx datapath ops by the datapath RxQ handle */
const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);

View File

@ -714,8 +714,9 @@ struct sfc_dp_rx sfc_ef10_essb_rx = {
SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
},
.features = SFC_DP_RX_FEAT_FLOW_FLAG |
SFC_DP_RX_FEAT_FLOW_MARK |
SFC_DP_RX_FEAT_CHECKSUM,
SFC_DP_RX_FEAT_FLOW_MARK,
.dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
.queue_offload_capa = 0,
.get_dev_info = sfc_ef10_essb_rx_get_dev_info,
.pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
.qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,

View File

@ -750,10 +750,10 @@ struct sfc_dp_rx sfc_ef10_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_RX_FEAT_SCATTER |
SFC_DP_RX_FEAT_MULTI_PROCESS |
SFC_DP_RX_FEAT_TUNNELS |
SFC_DP_RX_FEAT_CHECKSUM,
.features = SFC_DP_RX_FEAT_MULTI_PROCESS,
.dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM,
.queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.get_dev_info = sfc_ef10_rx_get_dev_info,
.qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.qcreate = sfc_ef10_rx_qcreate,

View File

@ -557,8 +557,9 @@ struct sfc_dp_rx sfc_efx_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
.features = SFC_DP_RX_FEAT_SCATTER |
SFC_DP_RX_FEAT_CHECKSUM,
.features = 0,
.dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
.queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
@ -806,36 +807,32 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
sfc_ev_qstop(rxq->evq);
}
static uint64_t
sfc_rx_get_offload_mask(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
uint64_t no_caps = 0;
if (encp->enc_tunnel_encapsulations_supported == 0)
no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
return ~no_caps;
}
uint64_t
sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
uint64_t caps = 0;
uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
}
if (encp->enc_tunnel_encapsulations_supported &&
(sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
return caps;
return caps & sfc_rx_get_offload_mask(sa);
}
uint64_t
sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
{
uint64_t caps = 0;
if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
caps |= DEV_RX_OFFLOAD_SCATTER;
return caps;
return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
}
static int
@ -1047,7 +1044,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
(sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
(sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,