Updated the mlx4 and mlxen drivers to the latest version, v2.1.6:

- Added support for dumping the SFP EEPROM content to dmesg.
- Fixed handling of network interface capability IOCTLs.
- Fixed race when loading and unloading the mlxen driver by applying
  appropriate locking.
- Removed two unused C-files.

MFC after:	1 week
Submitted by:	Mark Bloch <markb@mellanox.com>
Sponsored by:	Mellanox Technologies
Differential Revision:	https://reviews.freebsd.org/D4283
This commit is contained in:
Hans Petter Selasky 2015-12-03 13:29:20 +00:00
parent f837e46d16
commit 6111807106
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=291694
15 changed files with 418 additions and 1886 deletions

File diff suppressed because it is too large Load Diff

View File

@ -42,16 +42,7 @@
#include "mlx4_en.h"
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
#ifdef __linux__
MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
#endif
static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
/* Mellanox ConnectX HCA Ethernet driver */
#define MLX4_EN_PARM_INT(X, def_val, desc) \
static unsigned int X = def_val;\
@ -176,8 +167,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
int i;
int err;
printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
if (!mdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed, "

View File

@ -658,8 +658,10 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
continue;
/* Make sure the list didn't grow. */
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (tmp == NULL)
if (tmp == NULL) {
en_err(priv, "Failed to allocate multicast list\n");
break;
}
memcpy(tmp->addr,
LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
list_add_tail(&tmp->list, &priv->mc_list);
@ -970,12 +972,12 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
/* Important note: the following call for if_link_state_change
* is needed for interface up scenario (start port, link state
* change) */
/* update netif baudrate */
priv->dev->if_baudrate =
IF_Mbps(priv->port_state.link_speed);
/* Important note: the following call for if_link_state_change
* is needed for interface up scenario (start port, link state
* change) */
if_link_state_change(priv->dev, LINK_STATE_UP);
en_dbg(HW, priv, "Link Up\n");
}
@ -1195,8 +1197,8 @@ static void mlx4_en_linkstate(struct work_struct *work)
/* update netif baudrate */
priv->dev->if_baudrate = 0;
/* make sure the port is up before notifying the OS.
* This is tricky since we get here on INIT_PORT and
/* make sure the port is up before notifying the OS.
* This is tricky since we get here on INIT_PORT and
* in such case we can't tell the OS the port is up.
* To solve this there is a call to if_link_state_change
* in set_rx_mode.
@ -1574,6 +1576,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
priv->tx_ring[i]->oversized_packets = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@ -1643,8 +1646,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->sysctl)
sysctl_ctx_free(&priv->stat_ctx);
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
@ -1729,8 +1730,11 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
/* Unregister device - this will close the port if it was up */
if (priv->registered)
if (priv->registered) {
mutex_lock(&mdev->state_lock);
ether_ifdetach(dev);
mutex_unlock(&mdev->state_lock);
}
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@ -1808,13 +1812,6 @@ static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
active = IFM_ETHER;
if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
return (active);
/*
* [ShaharK] mlx4_en_QUERY_PORT sleeps and cannot be called under a
* non-sleepable lock.
* I moved it to the periodic mlx4_en_do_get_stats.
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return (active);
*/
active |= IFM_FDX;
trans_type = priv->port_state.transciver;
/* XXX I don't know all of the transceiver values. */
@ -1947,12 +1944,55 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
case SIOCSIFCAP:
mutex_lock(&mdev->state_lock);
mask = ifr->ifr_reqcap ^ dev->if_capenable;
if (mask & IFCAP_HWCSUM)
dev->if_capenable ^= IFCAP_HWCSUM;
if (mask & IFCAP_TSO4)
if (mask & IFCAP_TXCSUM) {
dev->if_capenable ^= IFCAP_TXCSUM;
dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
if (IFCAP_TSO4 & dev->if_capenable &&
!(IFCAP_TXCSUM & dev->if_capenable)) {
dev->if_capenable &= ~IFCAP_TSO4;
dev->if_hwassist &= ~CSUM_IP_TSO;
if_printf(dev,
"tso4 disabled due to -txcsum.\n");
}
}
if (mask & IFCAP_TXCSUM_IPV6) {
dev->if_capenable ^= IFCAP_TXCSUM_IPV6;
dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
if (IFCAP_TSO6 & dev->if_capenable &&
!(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
dev->if_capenable &= ~IFCAP_TSO6;
dev->if_hwassist &= ~CSUM_IP6_TSO;
if_printf(dev,
"tso6 disabled due to -txcsum6.\n");
}
}
if (mask & IFCAP_RXCSUM)
dev->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
dev->if_capenable ^= IFCAP_RXCSUM_IPV6;
if (mask & IFCAP_TSO4) {
if (!(IFCAP_TSO4 & dev->if_capenable) &&
!(IFCAP_TXCSUM & dev->if_capenable)) {
if_printf(dev, "enable txcsum first.\n");
error = EAGAIN;
goto out;
}
dev->if_capenable ^= IFCAP_TSO4;
if (mask & IFCAP_TSO6)
dev->if_hwassist ^= CSUM_IP_TSO;
}
if (mask & IFCAP_TSO6) {
if (!(IFCAP_TSO6 & dev->if_capenable) &&
!(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
if_printf(dev, "enable txcsum6 first.\n");
error = EAGAIN;
goto out;
}
dev->if_capenable ^= IFCAP_TSO6;
dev->if_hwassist ^= CSUM_IP6_TSO;
}
if (mask & IFCAP_LRO)
dev->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
@ -1963,9 +2003,11 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
dev->if_capenable ^= IFCAP_WOL_MAGIC;
if (dev->if_drv_flags & IFF_DRV_RUNNING)
mlx4_en_start_port(dev);
out:
mutex_unlock(&mdev->state_lock);
VLAN_CAPABILITIES(dev);
break;
#if __FreeBSD_version >= 1100036
case SIOCGI2C: {
struct ifi2creq i2c;
@ -1989,6 +2031,7 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
break;
}
#endif
default:
error = ether_ioctl(dev, command, data);
break;
@ -2088,7 +2131,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
INIT_HLIST_HEAD(&priv->mac_hash[i]);
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
priv->mac = mdev->dev->caps.def_mac[priv->port];
@ -2104,8 +2146,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE);
@ -2127,7 +2167,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/*
* Set driver features
*/
dev->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
@ -2136,10 +2176,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->LSO_support)
dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
#if __FreeBSD_version >= 1100000
/* set TSO limits so that we don't have to drop TX packets */
dev->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
dev->if_hw_tsomaxsegcount = 16;
dev->if_hw_tsomaxsegsize = 65536; /* XXX can do up to 4GByte */
dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */;
dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */;
dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE;
#endif
dev->if_capenable = dev->if_capabilities;
@ -2148,6 +2190,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->if_hwassist |= CSUM_TSO;
if (dev->if_capenable & IFCAP_TXCSUM)
dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
if (dev->if_capenable & IFCAP_TXCSUM_IPV6)
dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
/* Register for VLAN events */
@ -2210,8 +2254,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
return 0;
out:
@ -2293,6 +2335,162 @@ static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
return (error);
}
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int ret;
u8 data[4];
/* Read first 2 bytes to get Module & REV ID */
ret = mlx4_get_module_info(mdev->dev, priv->port,
0/*offset*/, 2/*size*/, data);
if (ret < 2) {
en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret);
return -EIO;
}
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n");
return -EINVAL;
}
return 0;
}
static int mlx4_en_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int offset = ee->offset;
int i = 0, ret;
if (ee->len == 0)
return -EINVAL;
memset(data, 0, ee->len);
while (i < ee->len) {
en_dbg(DRV, priv,
"mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
i, offset, ee->len - i);
ret = mlx4_get_module_info(mdev->dev, priv->port,
offset, ee->len - i, data + i);
if (!ret) /* Done reading */
return 0;
if (ret < 0) {
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
return -1;
}
i += ret;
offset += ret;
}
return 0;
}
static void mlx4_en_print_eeprom(u8 *data, __u32 len)
{
int i;
int j = 0;
int row = 0;
const int NUM_OF_BYTES = 16;
printf("\nOffset\t\tValues\n");
printf("------\t\t------\n");
while(row < len){
printf("0x%04x\t\t",row);
for(i=0; i < NUM_OF_BYTES; i++){
printf("%02x ", data[j]);
row++;
j++;
}
printf("\n");
}
}
/* Read cable EEPROM module information by first inspecting the first
* two bytes to get the length and then read the rest of the information.
* The information is printed to dmesg. */
static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS)
{
u8* data;
int error;
int result = 0;
struct mlx4_en_priv *priv;
struct net_device *dev;
struct ethtool_modinfo modinfo;
struct ethtool_eeprom ee;
error = sysctl_handle_int(oidp, &result, 0, req);
if (error || !req->newptr)
return (error);
if (result == 1) {
priv = arg1;
dev = priv->dev;
data = kmalloc(PAGE_SIZE, GFP_KERNEL);
error = mlx4_en_get_module_info(dev, &modinfo);
if (error) {
en_err(priv,
"mlx4_en_get_module_info returned with error - FAILED (0x%x)\n",
-error);
goto out;
}
ee.len = modinfo.eeprom_len;
ee.offset = 0;
error = mlx4_en_get_module_eeprom(dev, &ee, data);
if (error) {
en_err(priv,
"mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n",
-error);
/* Continue printing partial information in case of an error */
}
/* EEPROM information will be printed in dmesg */
mlx4_en_print_eeprom(data, ee.len);
out:
kfree(data);
}
/* Return zero to prevent sysctl failure. */
return (0);
}
static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
{
struct mlx4_en_priv *priv;
@ -2418,7 +2616,7 @@ static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
/* Add coalescer configuration. */
coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
"coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
coal_list = SYSCTL_CHILDREN(node);
coal_list = SYSCTL_CHILDREN(coal);
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
CTLFLAG_RW, &priv->pkt_rate_low, 0,
"Packets per-second for minimum delay");
@ -2437,11 +2635,14 @@ static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
"Enable adaptive rx coalescing");
/* EEPROM support */
SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
mlx4_en_read_eeprom, "I", "EEPROM information");
}
static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
{
struct net_device *dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *node;
struct sysctl_oid_list *node_list;
@ -2452,8 +2653,6 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
char namebuf[128];
int i;
dev = priv->dev;
ctx = &priv->stat_ctx;
sysctl_ctx_init(ctx);
node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO,
@ -2481,6 +2680,8 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
&priv->port_stats.wake_queue, "Queue resumed after full");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
&priv->port_stats.tx_timeout, "Transmit timeouts");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
&priv->port_stats.oversized_packets, "TX oversized packets, m_defrag failed");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
&priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
@ -2564,7 +2765,7 @@ struct mlx4_en_pkt_stats {
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
&priv->pkstats.tx_packets, "TX packets");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
&priv->pkstats.tx_packets, "TX Bytes");
&priv->pkstats.tx_bytes, "TX Bytes");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
&priv->pkstats.tx_multicast_packets, "TX Multicast Packets");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
@ -2605,8 +2806,8 @@ struct mlx4_en_pkt_stats {
CTLFLAG_RD, &tx_ring->packets, "TX packets");
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
}
for (i = 0; i < priv->rx_ring_num; i++) {
rx_ring = priv->rx_ring[i];
snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);

View File

@ -194,6 +194,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
priv->port_stats.queue_stopped += priv->tx_ring[i]->queue_stopped;
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
}
/* RX Statistics */
priv->pkstats.rx_packets = be64_to_cpu(mlx4_en_stats->RTOT_prio_0) +
@ -546,8 +547,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
if (!mlx4_is_mfunc(mdev->dev)) {
/* netdevice stats format */
#if __FreeBSD_version >= 1100000
if (reset == 0) {
/* netdevice stats format */
dev = mdev->pndev[port];
if_inc_counter(dev, IFCOUNTER_IPACKETS,
priv->pkstats.rx_packets - priv->pkstats_last.rx_packets);
@ -567,6 +569,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->pkstats.tx_multicast_packets - priv->pkstats_last.tx_multicast_packets);
}
priv->pkstats_last = priv->pkstats;
#else
dev = mdev->pndev[port];
dev->if_ipackets = priv->pkstats.rx_packets;
dev->if_opackets = priv->pkstats.tx_packets;
dev->if_ibytes = priv->pkstats.rx_bytes;
dev->if_obytes = priv->pkstats.tx_bytes;
dev->if_ierrors = priv->pkstats.rx_errors;
dev->if_iqdrops = priv->pkstats.rx_dropped;
dev->if_imcasts = priv->pkstats.rx_multicast_packets;
dev->if_omcasts = priv->pkstats.tx_multicast_packets;
dev->if_collisions = 0;
#endif
}
spin_unlock(&priv->stats_lock);

View File

@ -49,7 +49,8 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
(ring->buf + (ring->stride * index));
int possible_frags;
int i;
@ -102,7 +103,8 @@ static int mlx4_en_alloc_buf(struct mlx4_en_priv *priv,
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
(ring->buf + (index * ring->stride));
struct mbuf **mb_list = ring->rx_info + (index << priv->log_rx_info);
int i;
@ -130,7 +132,8 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_frag_info *frag_info;
struct mlx4_en_dev *mdev = priv->mdev;
struct mbuf **mb_list;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
(ring->buf + (index << ring->log_stride));
dma_addr_t dma;
int nr;
@ -574,7 +577,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cons_index & size)) {
mb_list = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride);
rx_desc = (struct mlx4_en_rx_desc *)
(ring->buf + (index << ring->log_stride));
/*
* make sure we read the CQE after we read the ownership bit
@ -611,7 +615,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
mb->m_flags |= M_VLANTAG;
}
if (likely(dev->if_capabilities & IFCAP_RXCSUM) &&
if (likely(dev->if_capenable &
(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
priv->port_stats.rx_chksum_good++;
@ -692,6 +697,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
// Because there is no NAPI in freeBSD
done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) {
cq->curr_poll_rx_cpu_id = curcpu;
taskqueue_enqueue(cq->tq, &cq->cq_task);
}
else {
@ -702,8 +708,15 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
void mlx4_en_rx_que(void *context, int pending)
{
struct mlx4_en_cq *cq;
struct thread *td;
cq = context;
td = curthread;
thread_lock(td);
sched_bind(td, cq->curr_poll_rx_cpu_id);
thread_unlock(td);
while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
== MLX4_EN_RX_BUDGET);
mlx4_en_arm_cq(cq->dev->if_softc, cq);
@ -841,8 +854,8 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
else
rss_rings = priv->prof->rss_rings;
ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));

View File

@ -1,178 +0,0 @@
/*
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/mlx4/driver.h>
#include "mlx4_en.h"
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
{
struct sk_buff *skb;
struct ethhdr *ethh;
unsigned char *packet;
unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
unsigned int i;
int err;
/* build the pkt before xmit */
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
if (!skb) {
en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
return -ENOMEM;
}
skb_reserve(skb, NET_IP_ALIGN);
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
packet = (unsigned char *)skb_put(skb, packet_size);
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
memset(ethh->h_source, 0, ETH_ALEN);
ethh->h_proto = htons(ETH_P_ARP);
skb_set_mac_header(skb, 0);
for (i = 0; i < packet_size; ++i) /* fill our packet */
packet[i] = (unsigned char)(i & 0xff);
/* xmit the pkt */
err = mlx4_en_xmit(skb, priv->dev);
return err;
}
static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
goto mlx4_en_test_loopback_exit;
}
/* polling for result */
for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
msleep(MLX4_EN_LOOPBACK_TIMEOUT);
if (priv->loopback_ok) {
loopback_ok = 1;
break;
}
}
if (!loopback_ok)
en_err(priv, "Loopback packet didn't arrive\n");
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
if (priv->port_state.link_state == 1)
return 0;
else
return 1;
}
static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
{
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
/* The device supports 1G, 10G and 40G speed */
if (priv->port_state.link_speed != MLX4_EN_LINK_SPEED_1G &&
priv->port_state.link_speed != MLX4_EN_LINK_SPEED_10G &&
priv->port_state.link_speed != MLX4_EN_LINK_SPEED_40G)
return priv->port_state.link_speed;
return 0;
}
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
if (*flags & ETH_TEST_FL_OFFLINE) {
/* disable the interface */
carrier_ok = netif_carrier_ok(dev);
netif_carrier_off(dev);
/* Wait until all tx queues are empty.
* there should not be any additional incoming traffic
* since we turned the carrier off */
msleep(200);
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
if (priv->port_up)
buf[4] = mlx4_en_test_loopback(priv);
}
if (carrier_ok)
netif_carrier_on(dev);
}
buf[0] = mlx4_test_interrupts(mdev->dev);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
if (buf[i])
*flags |= ETH_TEST_FL_FAILED;
}
}

View File

@ -249,7 +249,8 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
int index, u8 owner)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *)
(ring->buf + index * TXBB_SIZE);
void *end = ring->buf + ring->buf_size;
__be32 *ptr = (__be32 *)tx_desc;
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
@ -268,7 +269,7 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
*ptr = stamp;
ptr += STAMP_DWORDS;
if ((void *)ptr >= end) {
ptr = ring->buf;
ptr = (__be32 *)ring->buf;
stamp ^= cpu_to_be32(0x80000000);
}
}
@ -280,7 +281,8 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *)
(ring->buf + index * TXBB_SIZE);
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
struct mbuf *mb = tx_info->mb;
void *end = ring->buf + ring->buf_size;
@ -307,7 +309,8 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
data = ring->buf + ((void *)data - end);
data = (struct mlx4_wqe_data_seg *)
(ring->buf + ((void *)data - end));
}
if (tx_info->linear) {
@ -321,7 +324,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
for (i = 0; i < frags; i++) {
/* Check for wraparound before unmapping */
if ((void *) data >= end)
data = ring->buf;
data = (struct mlx4_wqe_data_seg *)ring->buf;
pci_unmap_single(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
data->byte_count, PCI_DMA_TODEVICE);
@ -522,7 +525,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
}
/* Return real descriptor location */
return ring->buf + index * TXBB_SIZE;
return (struct mlx4_en_tx_desc *)(ring->buf + index * TXBB_SIZE);
}
static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
@ -723,18 +726,14 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
up = (vlan_tag >> 13) % MLX4_EN_NUM_UP;
}
#endif
/* check if flowid is set */
if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE)
queue_index = mb->m_pkthdr.flowid;
else
queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom);
queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom);
return ((queue_index % rings_p_up) + (up * rings_p_up));
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt)
{
__iowrite64_copy(dst, src, bytecnt / 8);
__iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8);
}
static u64 mlx4_en_mac_to_u64(u8 *addr)
@ -843,7 +842,7 @@ static int mlx4_en_xmit(struct net_device *dev, int tx_ind, struct mbuf **mbp)
/* See if we have enough space for whole descriptor TXBB for setting
* SW ownership on next descriptor; if not, use a bounce buffer. */
if (likely(index + nr_txbb <= ring_size))
tx_desc = ring->buf + index * TXBB_SIZE;
tx_desc = (struct mlx4_en_tx_desc *)(ring->buf + index * TXBB_SIZE);
else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
@ -1018,10 +1017,13 @@ mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m)
}
enqueued = 0;
if (m != NULL) {
if ((err = drbr_enqueue(dev, ring->br, m)) != 0)
return (err);
}
if (m != NULL)
/*
* If we can't insert mbuf into drbr, try to xmit anyway.
* We keep the error we got so we could return that after xmit.
*/
err = drbr_enqueue(dev, ring->br, m);
/* Process the queue */
while ((next = drbr_peek(dev, ring->br)) != NULL) {
if ((err = mlx4_en_xmit(dev, tx_ind, &next)) != 0) {
@ -1075,10 +1077,14 @@ mlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
int i, err = 0;
/* Compute which queue to use */
i = mlx4_en_select_queue(dev, m);
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
i = m->m_pkthdr.flowid % priv->tx_ring_num;
}
else {
i = mlx4_en_select_queue(dev, m);
}
ring = priv->tx_ring[i];
if (spin_trylock(&ring->tx_lock)) {
err = mlx4_en_transmit_locked(dev, i, m);
spin_unlock(&ring->tx_lock);

View File

@ -33,7 +33,7 @@
* SOFTWARE.
*/
#include <linux/kmod.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/pci.h>
@ -53,9 +53,7 @@
#include "icm.h"
#include "mlx4_stats.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
/* Mellanox ConnectX HCA low-level driver */
struct workqueue_struct *mlx4_wq;
@ -173,7 +171,7 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_NAME ": Mellanox ConnectX VPI driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static int log_num_mac = 7;
@ -1295,6 +1293,43 @@ static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
}
}
static ssize_t
show_board(struct device *device, struct device_attribute *attr,
char *buf)
{
struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info,
board_attr);
struct mlx4_dev *mdev = info->dev;
return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
mdev->board_id);
}
static ssize_t
show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info,
hca_attr);
struct mlx4_dev *mdev = info->dev;
return sprintf(buf, "MT%d\n", mdev->pdev->device);
}
static ssize_t
show_firmware_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info,
firmware_attr);
struct mlx4_dev *mdev = info->dev;
return sprintf(buf, "%d.%d.%d\n", (int)(mdev->caps.fw_ver >> 32),
(int)(mdev->caps.fw_ver >> 16) & 0xffff,
(int)mdev->caps.fw_ver & 0xffff);
}
static ssize_t show_port_ib_mtu(struct device *dev,
struct device_attribute *attr,
char *buf)
@ -2937,6 +2972,30 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_irq:
dev->caps.num_comp_vectors = 0;
dev->caps.comp_pool = 0;
return;
}
static void
mlx4_init_hca_info(struct mlx4_dev *dev)
{
struct mlx4_hca_info *info = &mlx4_priv(dev)->hca_info;
info->dev = dev;
info->firmware_attr = (struct device_attribute)__ATTR(fw_ver, S_IRUGO,
show_firmware_version, NULL);
if (device_create_file(&dev->pdev->dev, &info->firmware_attr))
mlx4_err(dev, "Failed to add file firmware version");
info->hca_attr = (struct device_attribute)__ATTR(hca, S_IRUGO, show_hca,
NULL);
if (device_create_file(&dev->pdev->dev, &info->hca_attr))
mlx4_err(dev, "Failed to add file hca type");
info->board_attr = (struct device_attribute)__ATTR(board_id, S_IRUGO,
show_board, NULL);
if (device_create_file(&dev->pdev->dev, &info->board_attr))
mlx4_err(dev, "Failed to add file board id type");
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@ -2990,6 +3049,14 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
return err;
}
static void
mlx4_cleanup_hca_info(struct mlx4_hca_info *info)
{
device_remove_file(&info->dev->pdev->dev, &info->firmware_attr);
device_remove_file(&info->dev->pdev->dev, &info->board_attr);
device_remove_file(&info->dev->pdev->dev, &info->hca_attr);
}
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
if (info->port < 0)
@ -3347,6 +3414,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
goto err_steer;
mlx4_init_quotas(dev);
mlx4_init_hca_info(dev);
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
@ -3439,8 +3507,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
static int __devinit mlx4_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
printk_once(KERN_INFO "%s", mlx4_version);
device_set_desc(pdev->dev.bsddev, mlx4_version);
return __mlx4_init_one(pdev, id->driver_data);
}
@ -3460,6 +3527,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
mlx4_cleanup_hca_info(&priv->hca_info);
for (p = 1; p <= dev->caps.num_ports; p++) {
mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);

View File

@ -51,7 +51,7 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
#define DRV_VERSION "2.1"
#define DRV_VERSION "2.1.6"
#define DRV_RELDATE __DATE__
#define DRV_STACK_NAME "Linux-MLNX_OFED"
@ -755,6 +755,13 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
struct mlx4_hca_info {
struct mlx4_dev *dev;
struct device_attribute firmware_attr;
struct device_attribute hca_attr;
struct device_attribute board_attr;
};
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@ -845,6 +852,7 @@ struct mlx4_priv {
struct mlx4_uar driver_uar;
void __iomem *kar;
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
struct mlx4_hca_info hca_info;
struct mlx4_sense sense;
struct mutex port_mutex;
struct mlx4_msix_ctl msix_ctl;

View File

@ -59,8 +59,6 @@
#include "mlx4_stats.h"
#define DRV_NAME "mlx4_en"
#define DRV_VERSION "2.1"
#define DRV_RELDATE __DATE__
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@ -154,7 +152,7 @@ enum {
#define MLX4_EN_NUM_UP 1
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
MLX4_EN_NUM_UP)
MLX4_EN_NUM_UP)
#define MLX4_EN_DEF_TX_RING_SIZE 1024
#define MLX4_EN_DEF_RX_RING_SIZE 1024
@ -265,9 +263,16 @@ struct mlx4_en_tx_desc {
#define MLX4_EN_USE_SRQ 0x01000000
#define MLX4_EN_TX_BUDGET 64*4 //Compensate for no NAPI in freeBSD - might need some fine tunning in the future.
#define MLX4_EN_RX_BUDGET 64
#define MLX4_EN_TX_MAX_DESC_SIZE 512 /* bytes */
#define MLX4_EN_TX_MAX_MBUF_SIZE 65536 /* bytes */
#define MLX4_EN_TX_MAX_PAYLOAD_SIZE 65536 /* bytes */
#define MLX4_EN_TX_MAX_MBUF_FRAGS \
((MLX4_EN_TX_MAX_DESC_SIZE - 128) / DS_SIZE_ALIGNMENT) /* units */
#define MLX4_EN_TX_WQE_MAX_WQEBBS \
(MLX4_EN_TX_MAX_DESC_SIZE / TXBB_SIZE) /* units */
#define MLX4_EN_CX3_LOW_ID 0x1000
#define MLX4_EN_CX3_HIGH_ID 0x1005
@ -282,7 +287,7 @@ struct mlx4_en_tx_ring {
u32 cons;
u32 buf_size;
u32 doorbell_qpn;
void *buf;
u8 *buf;
u16 poll_cnt;
int blocked;
struct mlx4_en_tx_info *tx_info;
@ -300,6 +305,7 @@ struct mlx4_en_tx_ring {
unsigned long packets;
unsigned long tx_csum;
unsigned long queue_stopped;
unsigned long oversized_packets;
unsigned long wake_queue;
struct mlx4_bf bf;
bool bf_enabled;
@ -339,7 +345,7 @@ struct mlx4_en_rx_ring {
u32 rx_buf_size;
u32 rx_mb_size;
int qpn;
void *buf;
u8 *buf;
void *rx_info;
unsigned long errors;
unsigned long bytes;
@ -400,6 +406,7 @@ struct mlx4_en_cq {
#define MLX4_EN_OPCODE_ERROR 0x1e
u32 tot_rx;
u32 tot_tx;
u32 curr_poll_rx_cpu_id;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
@ -641,7 +648,6 @@ struct mlx4_en_priv {
unsigned long last_ifq_jiffies;
u64 if_counters_rx_errors;
u64 if_counters_rx_no_buffer;
};
enum mlx4_en_wol {

View File

@ -124,6 +124,7 @@ struct mlx4_en_port_stats {
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
unsigned long oversized_packets;
unsigned long rx_alloc_failed;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;

View File

@ -1145,12 +1145,17 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset,
size = MODULE_INFO_MAX_READ;
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
if (IS_ERR(inbox)) {
mlx4_err(dev,
"mlx4_alloc_cmd_mailbox returned with error(%lx)", PTR_ERR(inbox));
return PTR_ERR(inbox);
}
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_err(dev,
"mlx4_alloc_cmd_mailbox returned with error(%lx)", PTR_ERR(outbox));
return PTR_ERR(outbox);
}

View File

@ -139,7 +139,7 @@ enum {
};
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
void __iomem *uar_page,
u8 __iomem *uar_page,
spinlock_t *doorbell_lock)
{
__be32 doorbell[2];

View File

@ -413,6 +413,13 @@ enum {
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
enum mlx4_module_id {
MLX4_MODULE_ID_SFP = 0x3,
MLX4_MODULE_ID_QSFP = 0xC,
MLX4_MODULE_ID_QSFP_PLUS = 0xD,
MLX4_MODULE_ID_QSFP28 = 0x11,
};
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;

View File

@ -39,6 +39,12 @@
#define MLX4_INVALID_LKEY 0x100
#define DS_SIZE_ALIGNMENT 16
#define SET_BYTE_COUNT(byte_count) cpu_to_be32(byte_count)
#define SET_LSO_MSS(mss_hdr_size) cpu_to_be32(mss_hdr_size)
#define DS_BYTE_COUNT_MASK cpu_to_be32(0x7fffffff)
enum ib_m_qp_attr_mask {
IB_M_EXT_CLASS_1 = 1 << 28,
IB_M_EXT_CLASS_2 = 1 << 29,
@ -266,7 +272,9 @@ enum { /* param3 */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
enum {
MLX4_WQE_CTRL_OWN = 1 << 31,
MLX4_WQE_CTRL_NEC = 1 << 29,
MLX4_WQE_CTRL_RR = 1 << 6,
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,