Mechanically convert mlx5en(4) to IfAPI

Reviewed by:	zlei
Sponsored by:	Juniper Networks, Inc.
Differential Revision: https://reviews.freebsd.org/D38595
This commit is contained in:
Justin Hibbits 2022-09-19 16:58:00 -04:00
parent 6444662a56
commit 5dc00f00b7
11 changed files with 220 additions and 212 deletions

View File

@ -770,7 +770,7 @@ struct mlx5e_rq {
u32 wqe_sz;
u32 nsegs;
struct mlx5e_rq_mbuf *mbuf;
struct ifnet *ifp;
if_t ifp;
struct mlx5e_cq cq;
struct lro_ctrl lro;
volatile int enabled;
@ -1114,7 +1114,7 @@ struct mlx5e_priv {
struct work_struct set_rx_mode_work;
MLX5_DECLARE_DOORBELL_LOCK(doorbell_lock)
struct ifnet *ifp;
if_t ifp;
struct sysctl_ctx_list sysctl_ctx;
struct sysctl_oid *sysctl_ifnet;
struct sysctl_oid *sysctl_hw;
@ -1200,10 +1200,10 @@ struct mlx5e_eeprom {
bool mlx5e_do_send_cqe(struct mlx5e_sq *);
int mlx5e_get_full_header_size(const struct mbuf *, const struct tcphdr **);
int mlx5e_xmit(struct ifnet *, struct mbuf *);
int mlx5e_xmit(if_t, struct mbuf *);
int mlx5e_open_locked(struct ifnet *);
int mlx5e_close_locked(struct ifnet *);
int mlx5e_open_locked(if_t);
int mlx5e_close_locked(if_t);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
void mlx5e_dump_err_cqe(struct mlx5e_cq *, u32, const struct mlx5_err_cqe *);
@ -1221,14 +1221,14 @@ int mlx5e_open_flow_rules(struct mlx5e_priv *priv);
void mlx5e_close_flow_rules(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_vlan_rx_add_vid(void *, struct ifnet *, u16);
void mlx5e_vlan_rx_kill_vid(void *, struct ifnet *, u16);
void mlx5e_vlan_rx_add_vid(void *, if_t, u16);
void mlx5e_vlan_rx_kill_vid(void *, if_t, u16);
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_vxlan_start(void *arg, struct ifnet *ifp, sa_family_t family,
void mlx5e_vxlan_start(void *arg, if_t ifp, sa_family_t family,
u_int port);
void mlx5e_vxlan_stop(void *arg, struct ifnet *ifp, sa_family_t family,
void mlx5e_vxlan_stop(void *arg, if_t ifp, sa_family_t family,
u_int port);
int mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv);
void mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv);

View File

@ -1133,7 +1133,7 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
MLX5_CAP_ETH(priv->mdev, lro_cap)) {
priv->params_ethtool.hw_lro = 1;
/* check if feature should actually be enabled */
if (priv->ifp->if_capenable & IFCAP_LRO) {
if (if_getcapenable(priv->ifp) & IFCAP_LRO) {
priv->params.hw_lro_en = true;
} else {
priv->params.hw_lro_en = false;

View File

@ -722,7 +722,7 @@ mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{
struct ifnet *ifp = priv->ifp;
if_t ifp = priv->ifp;
int max_list_size;
int list_size;
u16 *vlans;
@ -914,7 +914,7 @@ mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
if (priv->vlan.filter_disabled) {
priv->vlan.filter_disabled = false;
if (priv->ifp->if_flags & IFF_PROMISC)
if (if_getflags(priv->ifp) & IFF_PROMISC)
return;
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_del_any_vid_rules(priv);
@ -926,7 +926,7 @@ mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
if (!priv->vlan.filter_disabled) {
priv->vlan.filter_disabled = true;
if (priv->ifp->if_flags & IFF_PROMISC)
if (if_getflags(priv->ifp) & IFF_PROMISC)
return;
if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
mlx5e_add_any_vid_rules(priv);
@ -934,7 +934,7 @@ mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
}
void
mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
{
struct mlx5e_priv *priv = arg;
@ -949,7 +949,7 @@ mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
}
void
mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
{
struct mlx5e_priv *priv = arg;
@ -1087,7 +1087,7 @@ mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
struct mlx5e_eth_addr_hash_head head_uc;
struct mlx5e_eth_addr_hash_head head_mc;
struct mlx5e_eth_addr_hash_node *hn;
struct ifnet *ifp = priv->ifp;
if_t ifp = priv->ifp;
size_t x;
size_t num;
@ -1110,8 +1110,7 @@ mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
hn = mlx5e_move_hn(&head_free, &head_uc);
MPASS(hn != NULL);
ether_addr_copy(hn->ai.addr,
LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
ctx.free = &head_free;
ctx.fill = &head_uc;
@ -1158,7 +1157,7 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
u8 addr_array[][ETH_ALEN], int size)
{
bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
struct ifnet *ifp = priv->ifp;
if_t ifp = priv->ifp;
struct mlx5e_eth_addr_hash_node *hn;
struct mlx5e_eth_addr_hash_head *addr_list;
struct mlx5e_eth_addr_hash_node *tmp;
@ -1168,12 +1167,12 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
else if (priv->eth_addr.broadcast_enabled)
ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
continue;
if (i >= size)
break;
@ -1275,10 +1274,11 @@ static void
mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
{
struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
struct ifnet *ndev = priv->ifp;
if_t ndev = priv->ifp;
int ndev_flags = if_getflags(ndev);
bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
@ -1290,7 +1290,7 @@ mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
/* update broadcast address */
ether_addr_copy(priv->eth_addr.broadcast.addr,
priv->ifp->if_broadcastaddr);
if_getbroadcastaddr(priv->ifp));
if (enable_promisc) {
mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
@ -1894,7 +1894,7 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
}
el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
if ((priv->ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0) {
if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_add_vxlan_rule_from_db(priv, el);
if (err == 0)
el->installed = true;
@ -2023,7 +2023,7 @@ mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
}
void
mlx5e_vxlan_start(void *arg, struct ifnet *ifp __unused, sa_family_t family,
mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
u_int port)
{
struct mlx5e_priv *priv = arg;
@ -2037,7 +2037,7 @@ mlx5e_vxlan_start(void *arg, struct ifnet *ifp __unused, sa_family_t family,
}
void
mlx5e_vxlan_stop(void *arg, struct ifnet *ifp __unused, sa_family_t family,
mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
u_int port)
{
struct mlx5e_priv *priv = arg;

View File

@ -309,7 +309,7 @@ mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en)
CTASSERT(MLX5E_TLS_ST_INIT == 0);
int
mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
mlx5e_tls_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
@ -320,7 +320,7 @@ mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
const struct tls_session_params *en;
int error;
priv = ifp->if_softc;
priv = if_getsoftc(ifp);
if (priv->gone != 0 || priv->tls.init == 0)
return (EOPNOTSUPP);
@ -489,7 +489,7 @@ mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt)
ptag->state = MLX5E_TLS_ST_RELEASE;
MLX5E_TLS_TAG_UNLOCK(ptag);
priv = ptag->tag.ifp->if_softc;
priv = if_getsoftc(ptag->tag.ifp);
queue_work(priv->tls.wq, &ptag->work);
}

View File

@ -654,7 +654,7 @@ CTASSERT(MLX5E_TLS_RX_ST_INIT == 0);
* Returns zero on success else an error happened.
*/
int
mlx5e_tls_rx_snd_tag_alloc(struct ifnet *ifp,
mlx5e_tls_rx_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
@ -666,7 +666,7 @@ mlx5e_tls_rx_snd_tag_alloc(struct ifnet *ifp,
uint32_t value;
int error;
priv = ifp->if_softc;
priv = if_getsoftc(ifp);
if (unlikely(priv->gone != 0 || priv->tls_rx.init == 0 ||
params->hdr.flowtype == M_HASHTYPE_NONE))
@ -801,7 +801,7 @@ mlx5e_tls_rx_snd_tag_alloc(struct ifnet *ifp,
goto cleanup;
}
if (ifp->if_pcp != IFNET_PCP_NONE || params->tls_rx.vlan_id != 0) {
if (if_getpcp(ifp) != IFNET_PCP_NONE || params->tls_rx.vlan_id != 0) {
/* create flow rule for TLS RX traffic (tagged) */
flow_rule = mlx5e_accel_fs_add_inpcb(priv, params->tls_rx.inp,
ptag->tirn, MLX5_FS_DEFAULT_FLOW_TAG, params->tls_rx.vlan_id);
@ -996,7 +996,7 @@ mlx5e_tls_rx_snd_tag_free(struct m_snd_tag *pmt)
ptag->state = MLX5E_TLS_RX_ST_RELEASE;
MLX5E_TLS_RX_TAG_UNLOCK(ptag);
priv = ptag->tag.ifp->if_softc;
priv = if_getsoftc(ptag->tag.ifp);
queue_work(priv->tls_rx.wq, &ptag->work);
}

View File

@ -391,7 +391,7 @@ mlx5e_update_carrier(struct mlx5e_priv *priv)
MLX5_PTYS_EN, 1);
if (error) {
priv->media_active_last = IFM_ETHER;
priv->ifp->if_baudrate = 1;
if_setbaudrate(priv->ifp, 1);
mlx5_en_err(priv->ifp, "query port ptys failed: 0x%x\n",
error);
return;
@ -449,15 +449,15 @@ mlx5e_update_carrier(struct mlx5e_priv *priv)
break;
}
priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX;
priv->ifp->if_baudrate = media_entry.baudrate;
if_setbaudrate(priv->ifp, media_entry.baudrate);
if_link_state_change(priv->ifp, LINK_STATE_UP);
}
static void
mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
mlx5e_media_status(if_t dev, struct ifmediareq *ifmr)
{
struct mlx5e_priv *priv = dev->if_softc;
struct mlx5e_priv *priv = if_getsoftc(dev);
ifmr->ifm_status = priv->media_status_last;
ifmr->ifm_current = ifmr->ifm_active = priv->media_active_last |
@ -530,9 +530,9 @@ mlx5e_set_port_pfc(struct mlx5e_priv *priv)
}
static int
mlx5e_media_change(struct ifnet *dev)
mlx5e_media_change(if_t dev)
{
struct mlx5e_priv *priv = dev->if_softc;
struct mlx5e_priv *priv = if_getsoftc(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 eth_proto_cap;
u32 link_mode;
@ -2332,7 +2332,7 @@ mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
u32 r, n;
r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
MLX5E_SW2MB_MTU(if_getmtu(priv->ifp));
if (r > MJUM16BYTES)
return (-ENOMEM);
@ -3207,9 +3207,9 @@ mlx5e_close_tirs(struct mlx5e_priv *priv)
* HW MTU includes all headers and checksums.
*/
static int
mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
mlx5e_set_dev_port_mtu(if_t ifp, int sw_mtu)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_priv *priv = if_getsoftc(ifp);
struct mlx5_core_dev *mdev = priv->mdev;
int hw_mtu;
int err;
@ -3231,7 +3231,7 @@ mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
err);
}
ifp->if_mtu = sw_mtu;
if_setmtu(ifp, sw_mtu);
err = mlx5_query_vport_mtu(mdev, &hw_mtu);
if (err || !hw_mtu) {
@ -3264,9 +3264,9 @@ mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
}
int
mlx5e_open_locked(struct ifnet *ifp)
mlx5e_open_locked(if_t ifp)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_priv *priv = if_getsoftc(ifp);
int err;
u16 set_id;
@ -3338,14 +3338,14 @@ mlx5e_open(void *arg)
"Setting port status to up failed\n");
mlx5e_open_locked(priv->ifp);
priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
if_setdrvflagbits(priv->ifp, IFF_DRV_RUNNING, 0);
PRIV_UNLOCK(priv);
}
int
mlx5e_close_locked(struct ifnet *ifp)
mlx5e_close_locked(if_t ifp)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_priv *priv = if_getsoftc(ifp);
/* check if already closed */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
@ -3365,9 +3365,9 @@ mlx5e_close_locked(struct ifnet *ifp)
}
static uint64_t
mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
mlx5e_get_counter(if_t ifp, ift_counter cnt)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_priv *priv = if_getsoftc(ifp);
u64 retval;
/* PRIV_LOCK(priv); XXX not allowed */
@ -3418,15 +3418,15 @@ mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
}
static void
mlx5e_set_rx_mode(struct ifnet *ifp)
mlx5e_set_rx_mode(if_t ifp)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_priv *priv = if_getsoftc(ifp);
queue_work(priv->wq, &priv->set_rx_mode_work);
}
static int
mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
mlx5e_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct mlx5e_priv *priv;
struct ifreq *ifr;
@ -3443,7 +3443,7 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
int max_mtu;
uint8_t read_addr;
priv = ifp->if_softc;
priv = if_getsoftc(ifp);
/* check if detaching */
if (priv == NULL || priv->gone != 0)
@ -3478,27 +3478,27 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
PRIV_UNLOCK(priv);
break;
case SIOCSIFFLAGS:
if ((ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
if ((if_getflags(ifp) & IFF_UP) &&
(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
mlx5e_set_rx_mode(ifp);
break;
}
PRIV_LOCK(priv);
if (ifp->if_flags & IFF_UP) {
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
if (if_getflags(ifp) & IFF_UP) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
mlx5e_open_locked(ifp);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
}
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
mlx5_set_port_status(priv->mdev,
MLX5_PORT_DOWN);
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
mlx5e_close_locked(ifp);
mlx5e_update_carrier(priv);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
}
}
PRIV_UNLOCK(priv);
@ -3521,98 +3521,98 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
drv_ioctl_data = &drv_ioctl_data_d;
drv_ioctl_data->reqcap = ifr->ifr_reqcap;
PRIV_LOCK(priv);
drv_ioctl_data->reqcap2 = ifp->if_capenable2;
drv_ioctl_data->reqcap2 = if_getcapenable2(ifp);
drv_ioctl_data->nvcap = NULL;
goto siocsifcap_driver;
case SIOCSIFCAPNV:
drv_ioctl_data = (struct siocsifcapnv_driver_data *)data;
PRIV_LOCK(priv);
siocsifcap_driver:
mask = drv_ioctl_data->reqcap ^ ifp->if_capenable;
mask = drv_ioctl_data->reqcap ^ if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
ifp->if_capenable ^= IFCAP_TXCSUM;
ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
if_togglecapenable(ifp, IFCAP_TXCSUM);
if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
if (IFCAP_TSO4 & ifp->if_capenable &&
!(IFCAP_TXCSUM & ifp->if_capenable)) {
if (IFCAP_TSO4 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO4;
ifp->if_capenable &= ~IFCAP_TSO4;
ifp->if_hwassist &= ~CSUM_IP_TSO;
if_setcapenablebit(ifp, 0, IFCAP_TSO4);
if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
mlx5_en_err(ifp,
"tso4 disabled due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
if (IFCAP_TSO6 & ifp->if_capenable &&
!(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
if (IFCAP_TSO6 & if_getcapenable(ifp) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mask &= ~IFCAP_TSO6;
ifp->if_capenable &= ~IFCAP_TSO6;
ifp->if_hwassist &= ~CSUM_IP6_TSO;
if_setcapenablebit(ifp, 0, IFCAP_TSO6);
if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
mlx5_en_err(ifp,
"tso6 disabled due to -txcsum6.\n");
}
}
if (mask & IFCAP_MEXTPG)
ifp->if_capenable ^= IFCAP_MEXTPG;
if_togglecapenable(ifp, IFCAP_MEXTPG);
if (mask & IFCAP_TXTLS4)
ifp->if_capenable ^= IFCAP_TXTLS4;
if_togglecapenable(ifp, IFCAP_TXTLS4);
if (mask & IFCAP_TXTLS6)
ifp->if_capenable ^= IFCAP_TXTLS6;
if_togglecapenable(ifp, IFCAP_TXTLS6);
#ifdef RATELIMIT
if (mask & IFCAP_TXTLS_RTLMT)
ifp->if_capenable ^= IFCAP_TXTLS_RTLMT;
if_togglecapenable(ifp, IFCAP_TXTLS_RTLMT);
#endif
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if_togglecapenable(ifp, IFCAP_RXCSUM);
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & ifp->if_capenable) &&
!(IFCAP_TXCSUM & ifp->if_capenable)) {
if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM & if_getcapenable(ifp))) {
mlx5_en_err(ifp, "enable txcsum first.\n");
error = EAGAIN;
goto out;
}
ifp->if_capenable ^= IFCAP_TSO4;
ifp->if_hwassist ^= CSUM_IP_TSO;
if_togglecapenable(ifp, IFCAP_TSO4);
if_togglehwassist(ifp, CSUM_IP_TSO);
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & ifp->if_capenable) &&
!(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
!(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
mlx5_en_err(ifp, "enable txcsum6 first.\n");
error = EAGAIN;
goto out;
}
ifp->if_capenable ^= IFCAP_TSO6;
ifp->if_hwassist ^= CSUM_IP6_TSO;
if_togglecapenable(ifp, IFCAP_TSO6);
if_togglehwassist(ifp, CSUM_IP6_TSO);
}
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if (mask & IFCAP_VLAN_HWFILTER) {
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)
mlx5e_disable_vlan_filter(priv);
else
mlx5e_enable_vlan_filter(priv);
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
}
if (mask & IFCAP_VLAN_HWTAGGING)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if (mask & IFCAP_WOL_MAGIC)
ifp->if_capenable ^= IFCAP_WOL_MAGIC;
if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
if (mask & IFCAP_VXLAN_HWCSUM) {
const bool was_enabled =
(ifp->if_capenable & IFCAP_VXLAN_HWCSUM) != 0;
(if_getcapenable(ifp) & IFCAP_VXLAN_HWCSUM) != 0;
if (was_enabled)
mlx5e_del_all_vxlan_rules(priv);
ifp->if_capenable ^= IFCAP_VXLAN_HWCSUM;
ifp->if_hwassist ^= CSUM_INNER_IP | CSUM_INNER_IP_UDP |
if_togglecapenable(ifp, IFCAP_VXLAN_HWCSUM);
if_togglehwassist(ifp, CSUM_INNER_IP | CSUM_INNER_IP_UDP |
CSUM_INNER_IP_TCP | CSUM_INNER_IP6_UDP |
CSUM_INNER_IP6_TCP;
CSUM_INNER_IP6_TCP);
if (!was_enabled) {
int err = mlx5e_add_all_vxlan_rules(priv);
if (err != 0) {
@ -3622,9 +3622,9 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
}
if (mask & IFCAP_VXLAN_HWTSO) {
ifp->if_capenable ^= IFCAP_VXLAN_HWTSO;
ifp->if_hwassist ^= CSUM_INNER_IP_TSO |
CSUM_INNER_IP6_TSO;
if_togglecapenable(ifp, IFCAP_VXLAN_HWTSO);
if_togglehwassist(ifp, CSUM_INNER_IP_TSO |
CSUM_INNER_IP6_TSO);
}
VLAN_CAPABILITIES(ifp);
@ -3633,10 +3633,10 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
bool need_restart = false;
ifp->if_capenable ^= IFCAP_LRO;
if_togglecapenable(ifp, IFCAP_LRO);
/* figure out if updating HW LRO is needed */
if (!(ifp->if_capenable & IFCAP_LRO)) {
if (!(if_getcapenable(ifp) & IFCAP_LRO)) {
if (priv->params.hw_lro_en) {
priv->params.hw_lro_en = false;
need_restart = true;
@ -3654,8 +3654,8 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
}
if (mask & IFCAP_HWRXTSTMP) {
ifp->if_capenable ^= IFCAP_HWRXTSTMP;
if (ifp->if_capenable & IFCAP_HWRXTSTMP) {
if_togglecapenable(ifp, IFCAP_HWRXTSTMP);
if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP) {
if (priv->clbr_done == 0)
mlx5e_reset_calibration_callout(priv);
} else {
@ -3663,11 +3663,11 @@ mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
priv->clbr_done = 0;
}
}
mask = drv_ioctl_data->reqcap2 ^ ifp->if_capenable2;
mask = drv_ioctl_data->reqcap2 ^ if_getcapenable2(ifp);
if ((mask & IFCAP2_BIT(IFCAP2_RXTLS4)) != 0)
ifp->if_capenable2 ^= IFCAP2_BIT(IFCAP2_RXTLS4);
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS4));
if ((mask & IFCAP2_BIT(IFCAP2_RXTLS6)) != 0)
ifp->if_capenable2 ^= IFCAP2_BIT(IFCAP2_RXTLS6);
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS6));
out:
PRIV_UNLOCK(priv);
break;
@ -3911,7 +3911,7 @@ static int
mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
struct mlx5_core_mkey *mkey)
{
struct ifnet *ifp = priv->ifp;
if_t ifp = priv->ifp;
struct mlx5_core_dev *mdev = priv->mdev;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@ -4332,14 +4332,14 @@ mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
}
static int
mlx5e_ul_snd_tag_alloc(struct ifnet *ifp,
mlx5e_ul_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
struct mlx5e_priv *priv;
struct mlx5e_channel *pch;
priv = ifp->if_softc;
priv = if_getsoftc(ifp);
if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) {
return (EOPNOTSUPP);
@ -4394,7 +4394,7 @@ mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
}
static int
mlx5e_snd_tag_alloc(struct ifnet *ifp,
mlx5e_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
@ -4440,7 +4440,7 @@ static const uint64_t adapter_rates_mlx[NUM_HDWR_RATES_MLX] = {
};
static void
mlx5e_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
mlx5e_ratelimit_query(if_t ifp __unused, struct if_ratelimit_query_results *q)
{
/*
* This function needs updating by the driver maintainer!
@ -4485,7 +4485,7 @@ mlx5e_ifm_add(struct mlx5e_priv *priv, int type)
static void *
mlx5e_create_ifp(struct mlx5_core_dev *mdev)
{
struct ifnet *ifp;
if_t ifp;
struct mlx5e_priv *priv;
u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
struct sysctl_oid_list *child;
@ -4522,71 +4522,71 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
goto err_free_ifp;
}
ifp->if_softc = priv;
if_setsoftc(ifp, priv);
if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
ifp->if_mtu = ETHERMTU;
ifp->if_init = mlx5e_open;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
IFF_KNOWSEPOCH;
ifp->if_ioctl = mlx5e_ioctl;
ifp->if_transmit = mlx5e_xmit;
ifp->if_qflush = if_qflush;
ifp->if_get_counter = mlx5e_get_counter;
ifp->if_snd.ifq_maxlen = ifqmaxlen;
if_setmtu(ifp, ETHERMTU);
if_setinitfn(ifp, mlx5e_open);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
IFF_KNOWSEPOCH);
if_setioctlfn(ifp, mlx5e_ioctl);
if_settransmitfn(ifp, mlx5e_xmit);
if_setqflushfn(ifp, if_qflush);
if_setgetcounterfn(ifp, mlx5e_get_counter);
if_setsendqlen(ifp, ifqmaxlen);
/*
* Set driver features
*/
ifp->if_capabilities |= IFCAP_NV;
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
ifp->if_capabilities |= IFCAP_LRO;
ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP;
ifp->if_capabilities |= IFCAP_MEXTPG;
ifp->if_capabilities |= IFCAP_TXTLS4 | IFCAP_TXTLS6;
if_setcapabilities(ifp, IFCAP_NV);
if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER, 0);
if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE | IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO | IFCAP_VLAN_HWTSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_HWSTATS | IFCAP_HWRXTSTMP, 0);
if_setcapabilitiesbit(ifp, IFCAP_MEXTPG, 0);
if_setcapabilitiesbit(ifp, IFCAP_TXTLS4 | IFCAP_TXTLS6, 0);
#ifdef RATELIMIT
ifp->if_capabilities |= IFCAP_TXRTLMT | IFCAP_TXTLS_RTLMT;
if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT | IFCAP_TXTLS_RTLMT, 0);
#endif
ifp->if_capabilities |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO;
ifp->if_capabilities2 |= IFCAP2_BIT(IFCAP2_RXTLS4) |
IFCAP2_BIT(IFCAP2_RXTLS6);
ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc;
if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_RXTLS4) |
IFCAP2_BIT(IFCAP2_RXTLS6), 0);
if_setsndtagallocfn(ifp, mlx5e_snd_tag_alloc);
#ifdef RATELIMIT
ifp->if_ratelimit_query = mlx5e_ratelimit_query;
if_setratelimitqueryfn(ifp, mlx5e_ratelimit_query);
#endif
/* set TSO limits so that we don't have to drop TX packets */
ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
if_sethwtsomax(ifp, MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
if_sethwtsomaxsegcount(ifp, MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */);
if_sethwtsomaxsegsize(ifp, MLX5E_MAX_TX_MBUF_SIZE);
ifp->if_capenable = ifp->if_capabilities;
ifp->if_capenable2 = ifp->if_capabilities2;
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TSO)
ifp->if_hwassist |= CSUM_TSO;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if (ifp->if_capabilities & IFCAP_VXLAN_HWCSUM)
ifp->if_hwassist |= CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setcapenable2(ifp, if_getcapabilities2(ifp));
if_sethwassist(ifp, 0);
if (if_getcapenable(ifp) & IFCAP_TSO)
if_sethwassistbits(ifp, CSUM_TSO, 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM)
if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP), 0);
if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
if_sethwassistbits(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6), 0);
if (if_getcapabilities(ifp) & IFCAP_VXLAN_HWCSUM)
if_sethwassistbits(ifp, CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP |
CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP |
CSUM_ENCAP_VXLAN;
if (ifp->if_capabilities & IFCAP_VXLAN_HWTSO)
ifp->if_hwassist |= CSUM_INNER_IP6_TSO | CSUM_INNER_IP_TSO;
CSUM_ENCAP_VXLAN, 0);
if (if_getcapabilities(ifp) & IFCAP_VXLAN_HWTSO)
if_sethwassistbits(ifp, CSUM_INNER_IP6_TSO | CSUM_INNER_IP_TSO, 0);
/* ifnet sysctl tree */
sysctl_ctx_init(&priv->sysctl_ctx);
priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
OID_AUTO, ifp->if_dname, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
OID_AUTO, if_getdname(ifp), CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"MLX5 ethernet - interface name");
if (priv->sysctl_ifnet == NULL) {
mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
goto err_free_sysctl;
}
snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
snprintf(unit, sizeof(unit), "%d", if_getdunit(ifp));
priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
OID_AUTO, unit, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"MLX5 ethernet - interface unit");
@ -4682,7 +4682,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
}
/* set default MTU */
mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
mlx5e_set_dev_port_mtu(ifp, if_getmtu(ifp));
/* Set default media status */
priv->media_status_last = IFM_AVALID;
@ -4805,7 +4805,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = ifp->if_xname;
pa.pa_headname = if_name(ifp);
priv->pfil = pfil_head_register(&pa);
PRIV_LOCK(priv);
@ -4866,7 +4866,7 @@ static void
mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
struct ifnet *ifp = priv->ifp;
if_t ifp = priv->ifp;
/* don't allow more IOCTLs */
priv->gone = 1;
@ -4964,7 +4964,7 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef DEBUGNET
static void
mlx5_en_debugnet_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize)
mlx5_en_debugnet_init(if_t dev, int *nrxr, int *ncl, int *clsize)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
@ -4976,12 +4976,12 @@ mlx5_en_debugnet_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize)
}
static void
mlx5_en_debugnet_event(struct ifnet *dev, enum debugnet_ev event)
mlx5_en_debugnet_event(if_t dev, enum debugnet_ev event)
{
}
static int
mlx5_en_debugnet_transmit(struct ifnet *dev, struct mbuf *m)
mlx5_en_debugnet_transmit(if_t dev, struct mbuf *m)
{
struct mlx5e_priv *priv = if_getsoftc(dev);
struct mlx5e_sq *sq;
@ -5011,7 +5011,7 @@ mlx5_en_debugnet_transmit(struct ifnet *dev, struct mbuf *m)
}
static int
mlx5_en_debugnet_poll(struct ifnet *dev, int count)
mlx5_en_debugnet_poll(if_t dev, int count)
{
struct mlx5e_priv *priv = if_getsoftc(dev);

View File

@ -515,7 +515,7 @@ mlx5e_rlw_channel_set_rate_locked(struct mlx5e_rl_worker *rlw,
/* get current burst size in bytes */
temp = rl->param.tx_burst_size *
MLX5E_SW2HW_MTU(rlw->priv->ifp->if_mtu);
MLX5E_SW2HW_MTU(if_getmtu(rlw->priv->ifp));
/* limit burst size to 64K currently */
if (temp > 65535)
@ -1202,7 +1202,7 @@ mlx5e_find_available_tx_ring_index(struct mlx5e_rl_worker *rlw,
}
int
mlx5e_rl_snd_tag_alloc(struct ifnet *ifp,
mlx5e_rl_snd_tag_alloc(if_t ifp,
union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
@ -1211,7 +1211,7 @@ mlx5e_rl_snd_tag_alloc(struct ifnet *ifp,
struct mlx5e_priv *priv;
int error;
priv = ifp->if_softc;
priv = if_getsoftc(ifp);
/* check if there is support for packet pacing or if device is going away */
if (!MLX5_CAP_GEN(priv->mdev, qos) ||

View File

@ -279,7 +279,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq, struct mbuf *mb,
u32 cqe_bcnt)
{
struct ifnet *ifp = rq->ifp;
if_t ifp = rq->ifp;
struct mlx5e_channel *c;
struct mbuf *mb_head;
int lro_num_seg; /* HW LRO session aggregated packets counter */
@ -375,7 +375,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
} else {
rq->stats.csum_none++;
}
} else if (likely((ifp->if_capenable & (IFCAP_RXCSUM |
} else if (likely((if_getcapenable(ifp) & (IFCAP_RXCSUM |
IFCAP_RXCSUM_IPV6)) != 0) &&
((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
(CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) {
@ -501,7 +501,7 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
struct pfil_head *pfil;
int i, rv;
CURVNET_SET_QUIET(rq->ifp->if_vnet);
CURVNET_SET_QUIET(if_getvnet(rq->ifp));
pfil = rq->channel->priv->pfil;
for (i = 0; i < budget; i++) {
struct mlx5e_rx_wqe *wqe;
@ -586,17 +586,17 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
rq->stats.bytes += byte_cnt;
rq->stats.packets++;
#ifdef NUMA
mb->m_pkthdr.numa_domain = rq->ifp->if_numa_domain;
mb->m_pkthdr.numa_domain = if_getnumadomain(rq->ifp);
#endif
#if !defined(HAVE_TCP_LRO_RX)
tcp_lro_queue_mbuf(&rq->lro, mb);
#else
if (mb->m_pkthdr.csum_flags == 0 ||
(rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
(if_getcapenable(rq->ifp) & IFCAP_LRO) == 0 ||
rq->lro.lro_cnt == 0 ||
tcp_lro_rx(&rq->lro, mb, 0) != 0) {
rq->ifp->if_input(rq->ifp, mb);
if_input(rq->ifp, mb);
}
#endif
wq_ll_pop:
@ -632,7 +632,7 @@ mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
mb->m_data[14] = rq->ix;
mb->m_pkthdr.rcvif = rq->ifp;
mb->m_pkthdr.leaf_rcvif = rq->ifp;
rq->ifp->if_input(rq->ifp, mb);
if_input(rq->ifp, mb);
}
#endif
for (int j = 0; j != MLX5E_MAX_TX_NUM_TC; j++) {

View File

@ -88,7 +88,7 @@ mlx5e_hash_init(void *arg)
SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
static struct mlx5e_sq *
mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
mlx5e_select_queue_by_send_tag(if_t ifp, struct mbuf *mb)
{
struct m_snd_tag *mb_tag;
struct mlx5e_sq *sq;
@ -135,9 +135,9 @@ mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
}
static struct mlx5e_sq *
mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
mlx5e_select_queue(if_t ifp, struct mbuf *mb)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_priv *priv = if_getsoftc(ifp);
struct mlx5e_sq *sq;
u32 ch;
u32 tc;
@ -689,7 +689,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
struct mlx5e_xmit_args args = {};
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe *wqe;
struct ifnet *ifp;
if_t ifp;
int nsegs;
int err;
int x;
@ -747,7 +747,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
mb = *mbp;
/* Send a copy of the frame to the BPF listener, if any */
if (ifp != NULL && ifp->if_bpf != NULL)
if (ifp != NULL && if_getbpf(ifp) != NULL)
ETHER_BPF_MTAP(ifp, mb);
if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
@ -1101,11 +1101,11 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
}
static int
mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
mlx5e_xmit_locked(if_t ifp, struct mlx5e_sq *sq, struct mbuf *mb)
{
int err = 0;
if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
READ_ONCE(sq->running) == 0)) {
m_freem(mb);
return (ENETDOWN);
@ -1137,7 +1137,7 @@ mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
}
int
mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
mlx5e_xmit(if_t ifp, struct mbuf *mb)
{
struct mlx5e_sq *sq;
int ret;

View File

@ -650,7 +650,7 @@ struct mlx5_roce {
* netdev pointer
*/
rwlock_t netdev_lock;
struct ifnet *netdev;
if_t netdev;
struct notifier_block nb;
atomic_t next_port;
};

View File

@ -87,21 +87,21 @@ mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
}
static bool mlx5_netdev_match(struct ifnet *ndev,
static bool mlx5_netdev_match(if_t ndev,
struct mlx5_core_dev *mdev,
const char *dname)
{
return ndev->if_type == IFT_ETHER &&
ndev->if_dname != NULL &&
strcmp(ndev->if_dname, dname) == 0 &&
ndev->if_softc != NULL &&
*(struct mlx5_core_dev **)ndev->if_softc == mdev;
return if_gettype(ndev) == IFT_ETHER &&
if_getdname(ndev) != NULL &&
strcmp(if_getdname(ndev), dname) == 0 &&
if_getsoftc(ndev) != NULL &&
*(struct mlx5_core_dev **)if_getsoftc(ndev) == mdev;
}
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct ifnet *ndev = netdev_notifier_info_to_ifp(ptr);
if_t ndev = netdev_notifier_info_to_ifp(ptr);
struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
roce.nb);
@ -118,7 +118,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
case NETDEV_UP:
case NETDEV_DOWN: {
struct ifnet *upper = NULL;
if_t upper = NULL;
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
&& ibdev->ib_active) {
@ -140,11 +140,11 @@ static int mlx5_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
static struct ifnet *mlx5_ib_get_netdev(struct ib_device *device,
static if_t mlx5_ib_get_netdev(struct ib_device *device,
u8 port_num)
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct ifnet *ndev;
if_t ndev;
/* Ensure ndev does not disappear before we invoke if_ref()
*/
@ -289,7 +289,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
{
struct mlx5_ib_dev *dev = to_mdev(device);
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
struct ifnet *ndev;
if_t ndev;
enum ib_mtu ndev_ib_mtu;
u16 qkey_viol_cntr;
u32 eth_prot_oper;
@ -334,13 +334,13 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (!ndev)
return 0;
if (ndev->if_drv_flags & IFF_DRV_RUNNING &&
ndev->if_link_state == LINK_STATE_UP) {
if (if_getdrvflags(ndev) & IFF_DRV_RUNNING &&
if_getlinkstate(ndev) == LINK_STATE_UP) {
props->state = IB_PORT_ACTIVE;
props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
}
ndev_ib_mtu = iboe_get_mtu(ndev->if_mtu);
ndev_ib_mtu = iboe_get_mtu(if_getmtu(ndev));
if_rele(ndev);
@ -361,7 +361,7 @@ static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
if (!gid)
return;
ether_addr_copy(mlx5_addr_mac, IF_LLADDR(attr->ndev));
ether_addr_copy(mlx5_addr_mac, if_getlladdr(attr->ndev));
vlan_id = rdma_vlan_dev_vlan_id(attr->ndev);
if (vlan_id != 0xffff) {
@ -3132,10 +3132,25 @@ static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
}
}
static int
mlx5_enable_roce_if_cb(if_t ifp, void *arg)
{
struct mlx5_ib_dev *dev = arg;
/* check if network interface belongs to mlx5en */
if (!mlx5_netdev_match(ifp, dev->mdev, "mce"))
return (0);
write_lock(&dev->roce.netdev_lock);
dev->roce.netdev = ifp;
write_unlock(&dev->roce.netdev_lock);
return (0);
}
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
{
VNET_ITERATOR_DECL(vnet_iter);
struct ifnet *idev;
int err;
/* Check if mlx5en net device already exists */
@ -3143,14 +3158,7 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
VNET_FOREACH(vnet_iter) {
IFNET_RLOCK();
CURVNET_SET_QUIET(vnet_iter);
CK_STAILQ_FOREACH(idev, &V_ifnet, if_link) {
/* check if network interface belongs to mlx5en */
if (!mlx5_netdev_match(idev, dev->mdev, "mce"))
continue;
write_lock(&dev->roce.netdev_lock);
dev->roce.netdev = idev;
write_unlock(&dev->roce.netdev_lock);
}
if_foreach(mlx5_enable_roce_if_cb, dev);
CURVNET_RESTORE();
IFNET_RUNLOCK();
}