Hardware driver update from Mellanox Technologies, including:

- improved performance
 - better stability
 - new features
 - bugfixes

Supported HCAs:
 - ConnectX-2
 - ConnectX-3
 - ConnectX-3 Pro

Sponsored by:	Mellanox Technologies
MFC after:	1 week
This commit is contained in:
hselasky 2014-09-23 12:37:01 +00:00
parent 6ca5e81a16
commit 512a43f91c
55 changed files with 9219 additions and 3651 deletions

View File

@ -35,8 +35,6 @@
#endif /* HAVE_CONFIG_H */
#include <stdio.h>
#include <endian.h>
#include <byteswap.h>
#include <infiniband/verbs.h>

View File

@ -36,9 +36,6 @@
#include <stdio.h>
#include <endian.h>
#include <byteswap.h>
#include <infiniband/verbs.h>
#include <infiniband/arch.h>

View File

@ -41,8 +41,6 @@
#include <string.h>
#include <getopt.h>
#include <netinet/in.h>
#include <endian.h>
#include <byteswap.h>
#include <infiniband/verbs.h>
#include <infiniband/driver.h>

View File

@ -36,7 +36,7 @@
#include <infiniband/kern-abi.h>
#define MLX4_UVERBS_MIN_ABI_VERSION 2
#define MLX4_UVERBS_MAX_ABI_VERSION 3
#define MLX4_UVERBS_MAX_ABI_VERSION 4
struct mlx4_alloc_ucontext_resp {
struct ibv_get_context_resp ibv_resp;

View File

@ -3781,7 +3781,7 @@ ofed/drivers/net/mlx4/sys_tune.c optional mlx4ib | mlxen \
ofed/drivers/net/mlx4/en_cq.c optional mlxen \
no-depend obj-prefix "mlx4_" \
compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/"
ofed/drivers/net/mlx4/en_frag.c optional mlxen \
ofed/drivers/net/mlx4/utils.c optional mlxen \
no-depend obj-prefix "mlx4_" \
compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/"
ofed/drivers/net/mlx4/en_main.c optional mlxen \

View File

@ -2,7 +2,7 @@
.PATH: ${.CURDIR}/../../ofed/drivers/net/mlx4
.PATH: ${.CURDIR}/../../ofed/include/linux
KMOD = mlx4
SRCS = device_if.h bus_if.h pci_if.h vnode_if.h
SRCS = device_if.h bus_if.h pci_if.h vnode_if.h opt_inet.h opt_inet6.h
SRCS+= alloc.c catas.c cmd.c cq.c eq.c fw.c icm.c intf.c main.c mcg.c mr.c linux_compat.c linux_radix.c linux_idr.c
SRCS+= pd.c port.c profile.c qp.c reset.c sense.c srq.c resource_tracker.c sys_tune.c

View File

@ -3,8 +3,8 @@
KMOD = mlxen
SRCS = device_if.h bus_if.h pci_if.h vnode_if.h
SRCS += en_cq.c en_frag.c en_main.c en_netdev.c en_port.c en_resources.c
SRCS += en_rx.c en_tx.c
SRCS += en_cq.c en_main.c en_netdev.c en_port.c en_resources.c
SRCS += en_rx.c en_tx.c utils.c
SRCS += opt_inet.h opt_inet6.h
CFLAGS+= -I${.CURDIR}/../../ofed/drivers/net/mlx4
CFLAGS+= -I${.CURDIR}/../../ofed/include/

View File

@ -1081,7 +1081,7 @@ static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK, 0, 0);
}
static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
@ -1093,7 +1093,7 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
if (!dev->sriov.is_going_down) {
mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK, 0, 0);
}
}
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
@ -1191,7 +1191,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
/*if master, notify all slaves*/
if (mlx4_is_master(dev->dev))
mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK, 0, 0);
}
if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)

View File

@ -1005,7 +1005,7 @@ static int flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flo
case IB_FLOW_IB_UC:
spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
if(flow_spec->l2_id.ib_uc.qpn) {
spec_l2->ib.r_u_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
spec_l2->ib.l3_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
spec_l2->ib.qpn_msk = cpu_to_be32(0xffffff);
}
break;
@ -2013,7 +2013,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (i = 0; i < ibdev->num_ports; ++i) {
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
err = mlx4_counter_alloc(ibdev->dev, i + 1, &ibdev->counters[i]);
if (err)
ibdev->counters[i] = -1;
} else
@ -2112,7 +2112,7 @@ err_steer_qp_release:
err_counter:
for (; i; --i)
if (ibdev->counters[i - 1] != -1)
mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
mlx4_counter_free(ibdev->dev, i, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->priv_uar.map);
@ -2200,7 +2200,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
iounmap(ibdev->priv_uar.map);
for (p = 0; p < ibdev->num_ports; ++p)
if (ibdev->counters[p] != -1)
mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_counter_free(ibdev->dev, p + 1, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);

View File

@ -2679,10 +2679,10 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
static __be32 convert_access(int acc)
{
return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC) : 0) |
(acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) |
(acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) |
(acc & IB_ACCESS_REMOTE_WRITE ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
(acc & IB_ACCESS_REMOTE_READ ? cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) |
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
}
@ -2709,10 +2709,12 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
{
iseg->flags = 0;
iseg->mem_key = cpu_to_be32(rkey);
iseg->guest_id = 0;
iseg->pa = 0;
iseg->mem_key = cpu_to_be32(rkey);
iseg->reserved1 = 0;
iseg->reserved2 = 0;
iseg->reserved3[0] = 0;
iseg->reserved3[1] = 0;
}
static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,

View File

@ -240,7 +240,7 @@ ipoib_ib_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
*/
if (unlikely(!ipoib_alloc_rx_mb(priv, wr_id))) {
memcpy(&priv->rx_ring[wr_id], &saverx, sizeof(saverx));
dev->if_iqdrops++;
if_inc_counter(dev, IFCOUNTER_IQDROPS, 1);
goto repost;
}

View File

@ -745,7 +745,7 @@ ipoib_vlan_start(struct ifnet *dev)
if (mb == NULL)
break;
m_freem(mb);
dev->if_oerrors++;
if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
}
}
@ -1452,7 +1452,7 @@ ipoib_input(struct ifnet *ifp, struct mbuf *m)
m->m_flags |= M_BCAST;
else
m->m_flags |= M_MCAST;
ifp->if_imcasts++;
if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
}
ipoib_demux(ifp, m, ntohs(eh->proto));

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -34,7 +34,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
@ -70,9 +70,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
return obj;
}
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
{
mlx4_bitmap_free_range(bitmap, obj, 1);
mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
static unsigned long find_aligned_range(unsigned long *bitmap,
@ -148,11 +148,17 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
return bitmap->avail;
}
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
if (!use_rr) {
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
}
bitmap_clear(bitmap->table, obj, cnt);
bitmap->avail += cnt;
spin_unlock(&bitmap->lock);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -34,10 +34,11 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include "mlx4.h"
#define MLX4_CATAS_POLL_INTERVAL (5 * HZ)
#define MLX4_CATAS_POLL_INTERVAL (5 * HZ)
static DEFINE_SPINLOCK(catas_lock);
@ -156,11 +157,13 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
del_timer_sync(&priv->catas_err.timer);
if (priv->catas_err.map)
if (priv->catas_err.map) {
iounmap(priv->catas_err.map);
priv->catas_err.map = NULL;
}
spin_lock_irq(&catas_lock);
list_del(&priv->catas_err.list);
list_del_init(&priv->catas_err.list);
spin_unlock_irq(&catas_lock);
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -34,14 +34,17 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/device.h>
#include <linux/semaphore.h>
#include <rdma/ib_smi.h>
#include <asm/io.h>
#include <linux/ktime.h>
#include "mlx4.h"
#include "fw.h"
@ -110,6 +113,14 @@ enum {
GO_BIT_TIMEOUT_MSECS = 10000
};
enum mlx4_vlan_transition {
MLX4_VLAN_TRANSITION_VST_VST = 0,
MLX4_VLAN_TRANSITION_VST_VGT = 1,
MLX4_VLAN_TRANSITION_VGT_VST = 2,
MLX4_VLAN_TRANSITION_VGT_VGT = 3,
};
struct mlx4_cmd_context {
struct completion done;
int result;
@ -152,6 +163,131 @@ static int mlx4_status_to_errno(u8 status)
return trans_table[status];
}
static const char *cmd_to_str(u16 cmd)
{
switch (cmd) {
case MLX4_CMD_SYS_EN: return "SYS_EN";
case MLX4_CMD_SYS_DIS: return "SYS_DIS";
case MLX4_CMD_MAP_FA: return "MAP_FA";
case MLX4_CMD_UNMAP_FA: return "UNMAP_FA";
case MLX4_CMD_RUN_FW: return "RUN_FW";
case MLX4_CMD_MOD_STAT_CFG: return "MOD_STAT_CFG";
case MLX4_CMD_QUERY_DEV_CAP: return "QUERY_DEV_CAP";
case MLX4_CMD_QUERY_FW: return "QUERY_FW";
case MLX4_CMD_ENABLE_LAM: return "ENABLE_LAM";
case MLX4_CMD_DISABLE_LAM: return "DISABLE_LAM";
case MLX4_CMD_QUERY_DDR: return "QUERY_DDR";
case MLX4_CMD_QUERY_ADAPTER: return "QUERY_ADAPTER";
case MLX4_CMD_INIT_HCA: return "INIT_HCA";
case MLX4_CMD_CLOSE_HCA: return "CLOSE_HCA";
case MLX4_CMD_INIT_PORT: return "INIT_PORT";
case MLX4_CMD_CLOSE_PORT: return "CLOSE_PORT";
case MLX4_CMD_QUERY_HCA: return "QUERY_HCA";
case MLX4_CMD_QUERY_PORT: return "QUERY_PORT";
case MLX4_CMD_SENSE_PORT: return "SENSE_PORT";
case MLX4_CMD_HW_HEALTH_CHECK: return "HW_HEALTH_CHECK";
case MLX4_CMD_SET_PORT: return "SET_PORT";
case MLX4_CMD_SET_NODE: return "SET_NODE";
case MLX4_CMD_QUERY_FUNC: return "QUERY_FUNC";
case MLX4_CMD_MAP_ICM: return "MAP_ICM";
case MLX4_CMD_UNMAP_ICM: return "UNMAP_ICM";
case MLX4_CMD_MAP_ICM_AUX: return "MAP_ICM_AUX";
case MLX4_CMD_UNMAP_ICM_AUX: return "UNMAP_ICM_AUX";
case MLX4_CMD_SET_ICM_SIZE: return "SET_ICM_SIZE";
/*master notify fw on finish for slave's flr*/
case MLX4_CMD_INFORM_FLR_DONE: return "INFORM_FLR_DONE";
case MLX4_CMD_GET_OP_REQ: return "GET_OP_REQ";
/* TPT commands */
case MLX4_CMD_SW2HW_MPT: return "SW2HW_MPT";
case MLX4_CMD_QUERY_MPT: return "QUERY_MPT";
case MLX4_CMD_HW2SW_MPT: return "HW2SW_MPT";
case MLX4_CMD_READ_MTT: return "READ_MTT";
case MLX4_CMD_WRITE_MTT: return "WRITE_MTT";
case MLX4_CMD_SYNC_TPT: return "SYNC_TPT";
/* EQ commands */
case MLX4_CMD_MAP_EQ: return "MAP_EQ";
case MLX4_CMD_SW2HW_EQ: return "SW2HW_EQ";
case MLX4_CMD_HW2SW_EQ: return "HW2SW_EQ";
case MLX4_CMD_QUERY_EQ: return "QUERY_EQ";
/* CQ commands */
case MLX4_CMD_SW2HW_CQ: return "SW2HW_CQ";
case MLX4_CMD_HW2SW_CQ: return "HW2SW_CQ";
case MLX4_CMD_QUERY_CQ: return "QUERY_CQ:";
case MLX4_CMD_MODIFY_CQ: return "MODIFY_CQ:";
/* SRQ commands */
case MLX4_CMD_SW2HW_SRQ: return "SW2HW_SRQ";
case MLX4_CMD_HW2SW_SRQ: return "HW2SW_SRQ";
case MLX4_CMD_QUERY_SRQ: return "QUERY_SRQ";
case MLX4_CMD_ARM_SRQ: return "ARM_SRQ";
/* QP/EE commands */
case MLX4_CMD_RST2INIT_QP: return "RST2INIT_QP";
case MLX4_CMD_INIT2RTR_QP: return "INIT2RTR_QP";
case MLX4_CMD_RTR2RTS_QP: return "RTR2RTS_QP";
case MLX4_CMD_RTS2RTS_QP: return "RTS2RTS_QP";
case MLX4_CMD_SQERR2RTS_QP: return "SQERR2RTS_QP";
case MLX4_CMD_2ERR_QP: return "2ERR_QP";
case MLX4_CMD_RTS2SQD_QP: return "RTS2SQD_QP";
case MLX4_CMD_SQD2SQD_QP: return "SQD2SQD_QP";
case MLX4_CMD_SQD2RTS_QP: return "SQD2RTS_QP";
case MLX4_CMD_2RST_QP: return "2RST_QP";
case MLX4_CMD_QUERY_QP: return "QUERY_QP";
case MLX4_CMD_INIT2INIT_QP: return "INIT2INIT_QP";
case MLX4_CMD_SUSPEND_QP: return "SUSPEND_QP";
case MLX4_CMD_UNSUSPEND_QP: return "UNSUSPEND_QP";
/* special QP and management commands */
case MLX4_CMD_CONF_SPECIAL_QP: return "CONF_SPECIAL_QP";
case MLX4_CMD_MAD_IFC: return "MAD_IFC";
/* multicast commands */
case MLX4_CMD_READ_MCG: return "READ_MCG";
case MLX4_CMD_WRITE_MCG: return "WRITE_MCG";
case MLX4_CMD_MGID_HASH: return "MGID_HASH";
/* miscellaneous commands */
case MLX4_CMD_DIAG_RPRT: return "DIAG_RPRT";
case MLX4_CMD_NOP: return "NOP";
case MLX4_CMD_ACCESS_MEM: return "ACCESS_MEM";
case MLX4_CMD_SET_VEP: return "SET_VEP";
/* Ethernet specific commands */
case MLX4_CMD_SET_VLAN_FLTR: return "SET_VLAN_FLTR";
case MLX4_CMD_SET_MCAST_FLTR: return "SET_MCAST_FLTR";
case MLX4_CMD_DUMP_ETH_STATS: return "DUMP_ETH_STATS";
/* Communication channel commands */
case MLX4_CMD_ARM_COMM_CHANNEL: return "ARM_COMM_CHANNEL";
case MLX4_CMD_GEN_EQE: return "GEN_EQE";
/* virtual commands */
case MLX4_CMD_ALLOC_RES: return "ALLOC_RES";
case MLX4_CMD_FREE_RES: return "FREE_RES";
case MLX4_CMD_MCAST_ATTACH: return "MCAST_ATTACH";
case MLX4_CMD_UCAST_ATTACH: return "UCAST_ATTACH";
case MLX4_CMD_PROMISC: return "PROMISC";
case MLX4_CMD_QUERY_FUNC_CAP: return "QUERY_FUNC_CAP";
case MLX4_CMD_QP_ATTACH: return "QP_ATTACH";
/* debug commands */
case MLX4_CMD_QUERY_DEBUG_MSG: return "QUERY_DEBUG_MSG";
case MLX4_CMD_SET_DEBUG_MSG: return "SET_DEBUG_MSG";
/* statistics commands */
case MLX4_CMD_QUERY_IF_STAT: return "QUERY_IF_STAT";
case MLX4_CMD_SET_IF_STAT: return "SET_IF_STAT";
/* register/delete flow steering network rules */
case MLX4_QP_FLOW_STEERING_ATTACH: return "QP_FLOW_STEERING_ATTACH";
case MLX4_QP_FLOW_STEERING_DETACH: return "QP_FLOW_STEERING_DETACH";
case MLX4_FLOW_STEERING_IB_UC_QP_RANGE: return "FLOW_STEERING_IB_UC_QP_RANGE";
default: return "OTHER";
}
}
static u8 mlx4_errno_to_status(int errno)
{
switch (errno) {
@ -244,6 +380,17 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
down(&cmd->event_sem);
end = msecs_to_jiffies(timeout) + jiffies;
while (comm_pending(dev) && time_before(jiffies, end))
cond_resched();
if (comm_pending(dev)) {
mlx4_warn(dev, "mlx4_comm_cmd_wait: Comm channel "
"is not idle. My toggle is %d (op: 0x%x)\n",
mlx4_priv(dev)->cmd.comm_toggle, op);
up(&cmd->event_sem);
return -EAGAIN;
}
spin_lock(&cmd->context_lock);
BUG_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
@ -255,12 +402,8 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
mlx4_comm_cmd_post(dev, op, param);
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
mlx4_warn(dev, "communication channel command 0x%x timed out\n", op);
err = -EBUSY;
goto out;
}
/* In slave, wait unconditionally for completion */
wait_for_completion(&context->done);
err = context->result;
if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
@ -309,14 +452,29 @@ static int cmd_pending(struct mlx4_dev *dev)
!!(status & swab32(1 << HCR_T_BIT)));
}
static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op, u16 token,
int event)
static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit,
int *t_bit)
{
if (pci_channel_offline(dev->pdev))
return -EIO;
*status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
*t_bit = !!(*status & swab32(1 << HCR_T_BIT));
*go_bit = !!(*status & swab32(1 << HCR_GO_BIT));
return 0;
}
static int mlx4_cmd_post(struct mlx4_dev *dev, struct timespec *ts1,
u64 in_param, u64 out_param, u32 in_modifier,
u8 op_modifier, u16 op, u16 token, int event)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
u32 __iomem *hcr = cmd->hcr;
int ret = -EAGAIN;
unsigned long end;
int err, go_bit = 0, t_bit = 0;
u32 status = 0;
mutex_lock(&cmd->hcr_mutex);
@ -363,6 +521,9 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
__raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
if (ts1)
ktime_get_ts(ts1);
/* __raw_writel may not order writes. */
wmb();
@ -383,6 +544,15 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
ret = 0;
out:
if (ret) {
err = get_status(dev, &status, &go_bit, &t_bit);
mlx4_warn(dev, "Could not post command %s (0x%x): ret=%d, "
"in_param=0x%llx, in_mod=0x%x, op_mod=0x%x, "
"get_status err=%d, status_reg=0x%x, go_bit=%d, "
"t_bit=%d, toggle=0x%x\n", cmd_to_str(op), op, ret,
(unsigned long long) in_param, in_modifier, op_modifier, err, status,
go_bit, t_bit, cmd->toggle);
}
mutex_unlock(&cmd->hcr_mutex);
return ret;
}
@ -439,7 +609,7 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
ret = mlx4_status_to_errno(vhcr->status);
} else
mlx4_err(dev, "failed execution of VHCR_POST command"
"opcode 0x%x\n", op);
"opcode %s (0x%x)\n", cmd_to_str(op), op);
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
@ -467,7 +637,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
goto out;
}
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
err = mlx4_cmd_post(dev, NULL, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
goto out;
@ -487,7 +657,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
}
if (cmd_pending(dev)) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op);
mlx4_warn(dev, "command %s (0x%x) timed out (go bit not cleared)\n",
cmd_to_str(op), op);
err = -ETIMEDOUT;
goto out;
}
@ -502,8 +673,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
__raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
err = mlx4_status_to_errno(stat);
if (err)
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, stat);
mlx4_err(dev, "command %s (0x%x) failed: fw status = 0x%x\n",
cmd_to_str(op), op, stat);
out:
up(&priv->cmd.poll_sem);
@ -527,19 +698,6 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
complete(&context->done);
}
static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit,
int *t_bit)
{
if (pci_channel_offline(dev->pdev))
return -EIO;
*status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
*t_bit = !!(*status & swab32(1 << HCR_T_BIT));
*go_bit = !!(*status & swab32(1 << HCR_GO_BIT));
return 0;
}
static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
@ -549,6 +707,12 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int err = 0;
int go_bit = 0, t_bit = 0, stat_err;
u32 status = 0;
struct timespec ts1, ts2;
ktime_t t1, t2, delta;
s64 ds;
if (out_is_imm && !out_param)
return -EINVAL;
down(&cmd->event_sem);
@ -561,29 +725,38 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
init_completion(&context->done);
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
err = mlx4_cmd_post(dev, &ts1, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
if (err) {
mlx4_warn(dev, "command 0x%x could not be posted (%d)\n",
op, err);
if (err)
goto out;
}
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
stat_err = get_status(dev, &status, &go_bit, &t_bit);
mlx4_warn(dev, "command 0x%x timed out: "
"get_status err=%d, status=0x%x, go_bit=%d, "
"t_bit=%d, toggle=0x%x\n", op, stat_err, status,
go_bit, t_bit, mlx4_priv(dev)->cmd.toggle);
mlx4_warn(dev, "command %s (0x%x) timed out: in_param=0x%llx, "
"in_mod=0x%x, op_mod=0x%x, get_status err=%d, "
"status_reg=0x%x, go_bit=%d, t_bit=%d, toggle=0x%x\n"
, cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
op_modifier, stat_err, status, go_bit, t_bit,
mlx4_priv(dev)->cmd.toggle);
err = -EBUSY;
goto out;
}
if (mlx4_debug_level & MLX4_DEBUG_MASK_CMD_TIME) {
ktime_get_ts(&ts2);
t1 = timespec_to_ktime(ts1);
t2 = timespec_to_ktime(ts2);
delta = ktime_sub(t2, t1);
ds = ktime_to_ns(delta);
pr_info("mlx4: fw exec time for %s is %lld nsec\n", cmd_to_str(op), (long long) ds);
}
err = context->result;
if (err) {
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
mlx4_err(dev, "command %s (0x%x) failed: in_param=0x%llx, "
"in_mod=0x%x, op_mod=0x%x, fw status = 0x%x\n",
cmd_to_str(op), op, (unsigned long long) in_param, in_modifier,
op_modifier, context->fw_status);
goto out;
}
@ -640,7 +813,7 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
(slave & ~0x7f) | (size & 0xff)) {
mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
"master_addr:0x%llx slave_id:%d size:%d\n",
(long long)slave_addr, (long long)master_addr, slave, size);
(unsigned long long) slave_addr, (unsigned long long) master_addr, slave, size);
return -EINVAL;
}
@ -813,6 +986,24 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
static int MLX4_CMD_DIAG_RPRT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
return -EPERM;
}
static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
return -EPERM;
}
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@ -949,6 +1140,16 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_DIAG_RPRT,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.skip_err_print = true,
.verify = NULL,
.wrapper = MLX4_CMD_DIAG_RPRT_wrapper
},
{
.opcode = MLX4_CMD_NOP,
.has_inbox = false,
@ -1246,6 +1447,16 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_GEN_QP_wrapper
},
{
.opcode = MLX4_CMD_UPDATE_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.skip_err_print = true,
.verify = NULL,
.wrapper = MLX4_CMD_UPDATE_QP_wrapper
},
{
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
.has_inbox = false,
@ -1348,6 +1559,17 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
},
/* wol commands */
{
.opcode = MLX4_CMD_MOD_STAT_CFG,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.skip_err_print = true,
.verify = NULL,
.wrapper = mlx4_MOD_STAT_CFG_wrapper
},
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
@ -1401,8 +1623,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
}
if (!cmd) {
mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
vhcr->op, slave);
mlx4_err(dev, "unparavirt command: %s (0x%x) accepted from slave:%d\n",
cmd_to_str(vhcr->op), vhcr->op, slave);
vhcr_cmd->status = CMD_STAT_BAD_PARAM;
goto out_status;
}
@ -1420,8 +1642,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
vhcr->in_param,
MLX4_MAILBOX_SIZE, 1)) {
mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
__func__, cmd->opcode);
mlx4_err(dev, "%s: Failed reading inbox for cmd %s (0x%x)\n",
__func__, cmd_to_str(cmd->opcode), cmd->opcode);
vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
goto out_status;
}
@ -1429,9 +1651,9 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
"checks for resource_id:%d\n", vhcr->op, slave,
vhcr->in_modifier);
mlx4_warn(dev, "Command %s (0x%x) from slave: %d failed protection "
"checks for resource_id: %d\n", cmd_to_str(vhcr->op),
vhcr->op, slave, vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status;
}
@ -1470,9 +1692,13 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
" error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
if (!cmd->skip_err_print)
mlx4_warn(dev, "vhcr command %s (0x%x) slave:%d "
"in_param 0x%llx in_mod=0x%x, op_mod=0x%x "
"failed with error:%d, status %d\n",
cmd_to_str(vhcr->op), vhcr->op, slave,
(unsigned long long) vhcr->in_param, vhcr->in_modifier,
vhcr->op_modifier, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
}
@ -1487,7 +1713,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* If we failed to write back the outbox after the
*command was successfully executed, we must fail this
* slave, as it is now in undefined state */
mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
mlx4_err(dev, "%s: Failed writing outbox\n", __func__);
goto out;
}
}
@ -1516,6 +1742,75 @@ out:
return ret;
}
static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_vport_state *vp_admin;
struct mlx4_vf_immed_vlan_work *work;
int err;
int admin_vlan_ix = NO_INDX;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos)
return 0;
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan,
&admin_vlan_ix);
if (err) {
mlx4_warn((&priv->dev),
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
}
} else {
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg((&(priv->dev)),
"alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
}
/* save original vlan ix and vlan id */
work->orig_vlan_id = vp_oper->state.default_vlan;
work->orig_vlan_ix = vp_oper->vlan_idx;
/* handle new qos */
if (vp_oper->state.default_qos != vp_admin->default_qos)
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
vp_oper->vlan_idx = admin_vlan_ix;
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
/* iterate over QPs owned by this slave, using UPDATE_QP */
work->port = port;
work->slave = slave;
work->qos = vp_oper->state.default_qos;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
work->priv = priv;
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
queue_work(priv->mfunc.master.comm_wq, &work->work);
return 0;
}
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
int port, err;
@ -1527,7 +1822,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vp_oper->state = *vp_admin;
if (MLX4_VGT != vp_admin->default_vlan) {
err = mlx4_register_vlan(&priv->dev, port,
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
@ -1548,12 +1843,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
mlx4_warn((&priv->dev),
"No mac resorces slave %d, port %d\n",
"No mac resources slave %d, port %d\n",
slave, port);
return err;
}
mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
(long long)vp_oper->state.mac, vp_oper->mac_idx, slave, port);
(unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
return 0;
@ -1599,6 +1894,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@ -1619,7 +1915,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
/*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
mlx4_warn(dev, "slave:%d is Trying to run cmd (0x%x) "
"in the middle of FLR\n", slave, cmd);
return;
}
@ -1630,7 +1926,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
goto reset_slave;
slave_state[slave].vhcr_dma = ((u64) param) << 48;
priv->mfunc.master.slave_state[slave].cookie = 0;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
break;
case MLX4_COMM_CMD_VHCR1:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@ -1658,7 +1953,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
mlx4_err(dev, "Failed processing vhcr for slave:%d,"
mlx4_err(dev, "Failed processing vhcr for slave: %d,"
" resetting slave.\n", slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
@ -1666,7 +1961,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_unlock(&priv->cmd.slave_cmd_mutex);
break;
default:
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
mlx4_warn(dev, "Bad comm cmd: %d from slave: %d\n", cmd, slave);
goto reset_slave;
}
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
@ -1676,8 +1971,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d)"
" executing from slave:%d\n",
mlx4_warn(dev, "Slave is going down aborting command (%d)"
" executing from slave: %d\n",
cmd, slave);
return;
}
@ -1696,8 +1991,6 @@ reset_slave:
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*with slave in the middle of flr, no need to clean resources again.*/
inform_slave_state:
memset(&slave_state[slave].event_eq, 0,
sizeof(struct mlx4_slave_event_eq_info));
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
wmb();
@ -1751,7 +2044,10 @@ void mlx4_master_comm_channel(struct work_struct *work)
comm_cmd >> 16 & 0xff,
comm_cmd & 0xffff, toggle);
++served;
}
} else
mlx4_err(dev, "slave %d out of sync."
" read toggle %d, write toggle %d.\n", slave, slt,
toggle);
}
}
@ -1759,6 +2055,19 @@ void mlx4_master_comm_channel(struct work_struct *work)
mlx4_warn(dev, "Got command event with bitmask from %d slaves"
" but %d were served\n",
reported, served);
}
/* master command processing */
void mlx4_master_arm_comm_channel(struct work_struct *work)
{
struct mlx4_mfunc_master_ctx *master =
container_of(work,
struct mlx4_mfunc_master_ctx,
arm_comm_work);
struct mlx4_mfunc *mfunc =
container_of(master, struct mlx4_mfunc, master);
struct mlx4_priv *priv =
container_of(mfunc, struct mlx4_priv, mfunc);
struct mlx4_dev *dev = &priv->dev;
if (mlx4_ARM_COMM_CHANNEL(dev))
mlx4_warn(dev, "Failed to arm comm channel events\n");
@ -1839,6 +2148,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
__raw_writel((__force u32) 0,
@ -1868,6 +2178,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
INIT_WORK(&priv->mfunc.master.arm_comm_work,
mlx4_master_arm_comm_channel);
INIT_WORK(&priv->mfunc.master.slave_event_work,
mlx4_gen_slave_eqe);
INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
@ -2081,6 +2393,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
return ERR_PTR(-ENOMEM);
}
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
@ -2101,23 +2415,32 @@ u32 mlx4_comm_get_version(void)
return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
}
static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
{
if ((vf < 0) || (vf >= dev->num_vfs)) {
mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
return -EINVAL;
}
return (vf+1);
}
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;
if ((vf <= 0) || (vf > dev->num_vfs)) {
mlx4_err(dev, "Bad vf number:%d (max vf activated: %d)\n", vf, dev->num_vfs);
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
}
s_info = &priv->mfunc.master.vf_admin[vf].vport[port];
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mlx4_mac_to_u64(mac);
mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
vf, port, (long long)s_info->mac);
vf, port, (unsigned long long) s_info->mac);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
@ -2125,40 +2448,145 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
struct mlx4_vport_oper_state *vf_oper;
struct mlx4_vport_state *vf_admin;
int slave;
if ((!mlx4_is_master(dev)) ||
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_ESWITCH_SUPPORT))
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
return -EPROTONOSUPPORT;
if ((vf <= 0) || (vf > dev->num_vfs) || (vlan > 4095) || (qos > 7))
if ((vlan > 4095) || (qos > 7))
return -EINVAL;
s_info = &priv->mfunc.master.vf_admin[vf].vport[port];
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if ((0 == vlan) && (0 == qos))
s_info->default_vlan = MLX4_VGT;
vf_admin->default_vlan = MLX4_VGT;
else
s_info->default_vlan = vlan;
s_info->default_qos = qos;
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
if (priv->mfunc.master.slave_state[slave].active &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
mlx4_info(dev, "updating vf %d port %d config params immediately\n",
vf, port);
mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
/* mlx4_get_slave_default_vlan -
* retrun true if VST ( default vlan)
* if VST will fill vlan & qos (if not NULL) */
bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos)
{
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
if (vlan)
*vlan = vp_oper->state.default_vlan;
if (qos)
*qos = vp_oper->state.default_qos;
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
if ((!mlx4_is_master(dev)) ||
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_ESWITCH_SUPPORT))
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
return -EPROTONOSUPPORT;
if ((vf <= 0) || (vf > dev->num_vfs))
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
s_info = &priv->mfunc.master.vf_admin[vf].vport[port];
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->spoofchk = setting;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
struct mlx4_vport_oper_state *vp_oper;
int slave;
u8 link_stat_event;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
/* get link curent state */
if (!priv->sense.do_sense_port[port])
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
else
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
break;
case IFLA_VF_LINK_STATE_ENABLE:
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
break;
case IFLA_VF_LINK_STATE_DISABLE:
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
break;
default:
mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
link_state, slave, port);
return -EINVAL;
};
/* update the admin & oper state on the link state */
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
s_info->link_state = link_state;
vp_oper->state.link_state = link_state;
/* send event */
mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
return s_info->link_state;
}
EXPORT_SYMBOL_GPL(mlx4_get_vf_link_state);

View File

@ -2,7 +2,7 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -35,7 +35,7 @@
*/
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
@ -56,12 +56,14 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
spin_lock(&cq_table->lock);
read_lock(&cq_table->cq_table_lock);
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
spin_unlock(&cq_table->lock);
read_unlock(&cq_table->cq_table_lock);
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
@ -81,13 +83,13 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
spin_lock(&cq_table->lock);
read_lock(&cq_table->cq_table_lock);
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
spin_unlock(&cq_table->lock);
read_unlock(&cq_table->cq_table_lock);
if (!cq) {
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
@ -220,7 +222,7 @@ err_put:
mlx4_table_put(dev, &cq_table->table, *cqn);
err_out:
mlx4_bitmap_free(&cq_table->bitmap, *cqn);
mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
return err;
}
@ -250,7 +252,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
mlx4_table_put(dev, &cq_table->table, cqn);
mlx4_bitmap_free(&cq_table->bitmap, cqn);
mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
}
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
@ -269,23 +271,6 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
__mlx4_cq_free_icm(dev, cqn);
}
static int mlx4_find_least_loaded_vector(struct mlx4_priv *priv)
{
int i;
int index = 0;
int min = priv->eq_table.eq[0].load;
for (i = 1; i < priv->dev.caps.num_comp_vectors; i++) {
if (priv->eq_table.eq[i].load < min) {
index = i;
min = priv->eq_table.eq[i].load;
}
}
return index;
}
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
struct mlx4_cq *cq, unsigned vector, int collapsed,
@ -298,24 +283,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr;
int err;
cq->vector = (vector == MLX4_LEAST_ATTACHED_VECTOR) ?
mlx4_find_least_loaded_vector(priv) : vector;
if (cq->vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) {
if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
return -EINVAL;
}
cq->vector = vector;
err = mlx4_cq_alloc_icm(dev, &cq->cqn);
if (err) {
if (err)
return err;
}
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err){
if (err)
goto err_icm;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@ -331,7 +312,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
cq_context->comp_eqn = priv->eq_table.eq[cq->vector].eqn;
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@ -344,7 +325,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
goto err_radix;
priv->eq_table.eq[cq->vector].load++;
cq->cons_index = 0;
cq->arm_sn = 1;
cq->uar = uar;
@ -378,8 +358,6 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
priv->eq_table.eq[cq->vector].load--;
synchronize_irq(priv->eq_table.eq[cq->vector].irq);
spin_lock_irq(&cq_table->lock);
@ -400,6 +378,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
int err;
spin_lock_init(&cq_table->lock);
rwlock_init(&cq_table->cq_table_lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -31,12 +31,13 @@
*
*/
#include "mlx4_en.h"
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
{
return;
@ -44,52 +45,72 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
struct mlx4_en_cq *cq,
int entries, int ring, enum cq_type mode)
struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode,
int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
int err;
cq = kzalloc_node(sizeof(struct mlx4_en_cq), GFP_KERNEL, node);
if (!cq) {
cq = kzalloc(sizeof(struct mlx4_en_cq), GFP_KERNEL);
if (!cq) {
en_err(priv, "Failed to allocate CW struture\n");
return -ENOMEM;
}
}
cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT,
taskqueue_thread_enqueue, &cq->tq);
if (mode == RX) {
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->vector = (ring + priv->port) %
mdev->dev->caps.num_comp_vectors;
taskqueue_thread_enqueue, &cq->tq);
if (mode == RX) {
TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq);
taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq",
if_name(priv->dev));
if_name(priv->dev));
} else {
cq->buf_size = sizeof(struct mlx4_cqe);
cq->vector = MLX4_LEAST_ATTACHED_VECTOR;
TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq);
taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq",
if_name(priv->dev));
if_name(priv->dev));
}
cq->ring = ring;
cq->is_tx = mode;
mtx_init(&cq->lock.m, "mlx4 cq", NULL, MTX_DEF);
spin_lock_init(&cq->lock);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
if (err)
return err;
goto err_cq;
err = mlx4_en_map_buffer(&cq->wqres.buf);
if (err)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
else
cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
goto err_res;
cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
err_res:
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
kfree(cq);
return err;
}
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int err = 0;
char name[25];
int timestamp_en = 0;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@ -98,52 +119,83 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) {
if (mdev->dev->caps.comp_pool) {
if (!cq->vector) {
sprintf(name, "%s-%d", if_name(priv->dev),
cq->ring);
/* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
mlx4_warn(mdev, "Failed Assigning an EQ to "
"%s ,Falling back to legacy EQ's\n",
name);
}
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
mdev->dev->caps.num_comp_vectors;
}
} else {
struct mlx4_en_cq *rx_cq;
/*
* For TX we use the same irq per
* ring we assigned for the RX
*/
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
if (!cq->is_tx)
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx, 0);
if (err) {
cq->size = priv->rx_ring[cq->ring]->actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);
if (err)
return err;
}
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
if (cq->is_tx) {
init_timer(&cq->timer);
cq->timer.function = mlx4_en_poll_tx_cq;
cq->timer.data = (unsigned long) cq;
}
if (cq->is_tx) {
init_timer(&cq->timer);
cq->timer.function = mlx4_en_poll_tx_cq;
cq->timer.data = (unsigned long) cq;
}
return 0;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
taskqueue_drain(cq->tq, &cq->cq_task);
taskqueue_free(cq->tq);
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
cq->buf_size = 0;
cq->buf = NULL;
mtx_destroy(&cq->lock.m);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
kfree(cq);
*pcq = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_dev *mdev = priv->mdev;
taskqueue_drain(cq->tq, &cq->cq_task);
if (cq->is_tx)
del_timer(&cq->timer);
taskqueue_drain(cq->tq, &cq->cq_task);
if (cq->is_tx)
del_timer(&cq->timer);
mlx4_cq_free(mdev->dev, &cq->mcq);
mlx4_cq_free(mdev->dev, &cq->mcq);
}
/* Set rx cq moderation parameters */
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@ -60,20 +61,9 @@ static const char mlx4_en_version[] =
* Device scope module parameters
*/
/* Enable RSS TCP traffic */
MLX4_EN_PARM_INT(tcp_rss, 1,
"Enable RSS for incomming TCP traffic or disabled (0)");
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
"Enable RSS for incomming UDP traffic or disabled (0)");
/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
"Number of LRO sessions per ring or disabled (0)");
/* Allow reassembly of fragmented IP packets */
MLX4_EN_PARM_INT(ip_reasm, 1, "Allow reassembly of fragmented IP packets (!0)");
"Enable RSS for incoming UDP traffic");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@ -81,20 +71,23 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
#define MAX_PFC_TX 0xff
#define MAX_PFC_RX 0xff
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
int i;
params->tcp_rss = tcp_rss;
params->udp_rss = udp_rss;
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
params->num_tx_rings_p_up = min_t(int, mp_ncpus,
MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
params->udp_rss = 0;
}
params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
params->ip_reasm = ip_reasm;
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
params->prof[i].rx_pause = 1;
params->prof[i].rx_ppp = pfcrx;
@ -102,14 +95,15 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].tx_ring_num = MLX4_EN_NUM_HASH_RINGS + 1 +
(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
MLX4_EN_NUM_UP;
params->prof[i].rss_rings = 0;
}
return 0;
}
static void *get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
{
struct mlx4_en_dev *endev = ctx;
@ -138,6 +132,9 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
case MLX4_DEV_EVENT_SLAVE_INIT:
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
@ -150,7 +147,7 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
{
struct mlx4_en_dev *mdev = endev_ptr;
int i;
int i, ret;
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
@ -162,25 +159,22 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
mlx4_mr_free(dev, &mdev->mr);
ret = mlx4_mr_free(dev, &mdev->mr);
if (ret)
mlx4_err(mdev, "Error deregistering MR. The system may have become unstable.");
iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
sx_destroy(&mdev->state_lock.sx);
mtx_destroy(&mdev->uar_lock.m);
kfree(mdev);
}
static void *mlx4_en_add(struct mlx4_dev *dev)
{
static int mlx4_en_version_printed;
struct mlx4_en_dev *mdev;
int i;
int err;
if (!mlx4_en_version_printed) {
printk(KERN_INFO "%s", mlx4_en_version);
mlx4_en_version_printed++;
}
printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
if (!mdev) {
@ -196,10 +190,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mlx4_uar_alloc(dev, &mdev->priv_uar))
goto err_pd;
mtx_init(&mdev->uar_lock.m, "mlx4 uar", NULL, MTX_DEF);
mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
PAGE_SIZE);
if (!mdev->uar_map)
goto err_uar;
spin_lock_init(&mdev->uar_lock);
mdev->dev = dev;
mdev->dma_device = &(dev->pdev->dev);
@ -215,7 +210,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
0, 0, &mdev->mr)) {
mlx4_err(mdev, "Failed allocating memory region\n");
goto err_uar;
goto err_map;
}
if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
mlx4_err(mdev, "Failed enabling memory region\n");
@ -229,21 +224,24 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
goto err_mr;
}
/* Configure wich ports to start according to module parameters */
/* Configure which ports to start according to module parameters */
mdev->port_cnt = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
/* If we did not receive an explicit number of Rx rings, default to
* the number of completion vectors populated by the mlx4_core */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
mdev->profile.prof[i].tx_ring_num, i);
mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS));
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
mdev->profile.prof[i].rx_ring_num, i);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (!dev->caps.comp_pool) {
mdev->profile.prof[i].rx_ring_num =
rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
min_t(int,
dev->caps.num_comp_vectors,
DEF_RX_RINGS)));
} else {
mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
min_t(int, dev->caps.comp_pool/
dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
}
}
/* Create our own workqueue for reset/multicast tasks
@ -257,7 +255,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
sx_init(&mdev->state_lock.sx, "mlxen state");
mutex_init(&mdev->state_lock);
mdev->device_up = true;
/* Setup ports */
@ -265,32 +263,20 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Create a netdev for each port */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Activating port:%d\n", i);
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
goto err_free_netdev;
}
}
return mdev;
err_free_netdev:
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
}
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
mutex_unlock(&mdev->state_lock);
flush_workqueue(mdev->workqueue);
/* Stop event queue before we drop down to release shared SW state */
destroy_workqueue(mdev->workqueue);
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err = mlx4_mr_free(dev, &mdev->mr);
if (err)
mlx4_err(mdev, "Error deregistering MR. The system may have become unstable.");
err_map:
if (mdev->uar_map)
iounmap(mdev->uar_map);
err_uar:
mtx_destroy(&mdev->uar_lock.m);
mlx4_uar_free(dev, &mdev->priv_uar);
err_pd:
mlx4_pd_free(dev, mdev->priv_pdn);
@ -300,73 +286,51 @@ err_free_res:
return NULL;
}
enum mlx4_query_reply mlx4_en_query(void *endev_ptr, void *int_dev)
{
struct mlx4_en_dev *mdev = endev_ptr;
struct net_device *netdev = int_dev;
int p;
for (p = 1; p <= MLX4_MAX_PORTS; ++p)
if (mdev->pndev[p] == netdev)
return p;
return MLX4_QUERY_NOT_MINE;
}
#if 0
static struct pci_device_id mlx4_en_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX VPI PCIe 2.0 5GT/s - IB QDR / 10GigE Virt+ */
{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX EN 40GigE PCIe 2.0 5GT/s */
{ PCI_VDEVICE(MELLANOX, 0x6778) }, /* MT26488 ConnectX VPI PCIe 2.0 5GT/s - IB DDR / 10GigE Virt+ */
{ PCI_VDEVICE(MELLANOX, 0x1000) },
{ PCI_VDEVICE(MELLANOX, 0x1001) },
{ PCI_VDEVICE(MELLANOX, 0x1002) },
{ PCI_VDEVICE(MELLANOX, 0x1003) },
{ PCI_VDEVICE(MELLANOX, 0x1004) },
{ PCI_VDEVICE(MELLANOX, 0x1005) },
{ PCI_VDEVICE(MELLANOX, 0x1006) },
{ PCI_VDEVICE(MELLANOX, 0x1007) },
{ PCI_VDEVICE(MELLANOX, 0x1008) },
{ PCI_VDEVICE(MELLANOX, 0x1009) },
{ PCI_VDEVICE(MELLANOX, 0x100a) },
{ PCI_VDEVICE(MELLANOX, 0x100b) },
{ PCI_VDEVICE(MELLANOX, 0x100c) },
{ PCI_VDEVICE(MELLANOX, 0x100d) },
{ PCI_VDEVICE(MELLANOX, 0x100e) },
{ PCI_VDEVICE(MELLANOX, 0x100f) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mlx4_en_pci_table);
#endif
static struct mlx4_interface mlx4_en_interface = {
.add = mlx4_en_add,
.remove = mlx4_en_remove,
.event = mlx4_en_event,
.query = mlx4_en_query,
.get_dev = get_netdev,
.add = mlx4_en_add,
.remove = mlx4_en_remove,
.event = mlx4_en_event,
.get_dev = mlx4_en_get_netdev,
.protocol = MLX4_PROT_ETH,
};
static void mlx4_en_verify_params(void)
{
if (pfctx > MAX_PFC_TX) {
pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - "
"should be in range 0-0x%x, will be changed to default (0)\n",
pfctx, MAX_PFC_TX);
pfctx = 0;
}
if (pfcrx > MAX_PFC_RX) {
pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - "
"should be in range 0-0x%x, will be changed to default (0)\n",
pfcrx, MAX_PFC_RX);
pfcrx = 0;
}
}
static int __init mlx4_en_init(void)
{
mlx4_en_verify_params();
#ifdef CONFIG_DEBUG_FS
int err = 0;
err = mlx4_en_register_debugfs();
if (err)
pr_err(KERN_ERR "Failed to register debugfs\n");
#endif
return mlx4_register_interface(&mlx4_en_interface);
}
static void __exit mlx4_en_cleanup(void)
{
mlx4_unregister_interface(&mlx4_en_interface);
#ifdef CONFIG_DEBUG_FS
mlx4_en_unregister_debugfs();
#endif
}
module_init(mlx4_en_init);

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -31,28 +31,25 @@
*
*/
#include "mlx4_en.h"
#include <sys/types.h>
#include <linux/if_vlan.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#if 0 // moved to port.c
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
u64 mac, u64 clear, u8 mode)
{
return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
#endif
#include "en_port.h"
#include "mlx4_en.h"
#define EN_IFQ_MIN_INTERVAL 3000
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, u32 *vlans)
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vlan_fltr_mbox *filter;
int i, j;
int i;
int j;
int index = 0;
u32 entry;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@ -60,86 +57,21 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, u32 *vlans)
return PTR_ERR(mailbox);
filter = mailbox->buf;
memset(filter, 0, sizeof *filter);
if (vlans)
for (i = 0, j = VLAN_FLTR_SIZE - 1; i < VLAN_FLTR_SIZE;
i++, j--)
filter->entry[j] = cpu_to_be32(vlans[i]);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
memset(filter, 0, sizeof(*filter));
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
if (test_bit(index, priv->active_vlans))
entry |= 1 << j;
index++;
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
#if 0 //moved to port.c - shahark
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof *context);
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
context->pfctx = pfctx;
context->pprx = (pprx * (!pfcrx)) << 7;
context->pfcrx = pfcrx;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc)
{
printf("%s %s:%d\n", __func__, __FILE__, __LINE__);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
/*
context->mcast = cpu_to_be32((dev->caps.mc_promisc_mode <<
SET_PORT_PROMISC_MODE_SHIFT) | base_qpn);
*/
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
context->vlan_miss = MLX4_VLAN_MISS_IDX;
in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
#endif
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
{
struct mlx4_en_query_port_context *qport_context;
@ -153,7 +85,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
@ -169,95 +102,77 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
case MLX4_EN_10G_SPEED_XFI:
state->link_speed = 10000;
break;
case MLX4_EN_20G_SPEED:
state->link_speed = 20000;
break;
case MLX4_EN_40G_SPEED:
state->link_speed = 40000;
break;
case MLX4_EN_56G_SPEED:
state->link_speed = 56000;
break;
default:
state->link_speed = -1;
break;
}
state->transciver = qport_context->transceiver;
if (be32_to_cpu(qport_context->transceiver_code_hi) & 0x400)
state->transciver = 0x80;
state->autoneg = !!(qport_context->autoneg & MLX4_EN_AUTONEG_MASK);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
}
#if 0
static int read_iboe_counters(struct mlx4_dev *dev, int index, u64 counters[])
{
struct mlx4_cmd_mailbox *mailbox;
int err;
int mode;
struct mlx4_counters_ext *ext;
struct mlx4_counters *reg;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return -ENOMEM;
err = mlx4_cmd_box(dev, 0, mailbox->dma, index, 0,
MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_WRAPPED);
if (err)
goto out;
mode = be32_to_cpu(((struct mlx4_counters *)mailbox->buf)->counter_mode) & 0xf;
switch (mode) {
case 0:
reg = mailbox->buf;
counters[0] = be64_to_cpu(reg->rx_frames);
counters[1] = be64_to_cpu(reg->tx_frames);
counters[2] = be64_to_cpu(reg->rx_bytes);
counters[3] = be64_to_cpu(reg->tx_bytes);
break;
case 1:
ext = mailbox->buf;
counters[0] = be64_to_cpu(ext->rx_uni_frames);
counters[1] = be64_to_cpu(ext->tx_uni_frames);
counters[2] = be64_to_cpu(ext->rx_uni_bytes);
counters[3] = be64_to_cpu(ext->tx_uni_bytes);
break;
default:
err = -EINVAL;
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
#endif
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct net_device *dev;
struct mlx4_en_priv *priv;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
struct mlx4_en_vport_stats *vport_stats = &priv->vport_stats;
struct mlx4_cmd_mailbox *mailbox = NULL;
struct mlx4_cmd_mailbox *mailbox_flow = NULL;
u64 in_mod = reset << 8 | port;
unsigned long oerror;
unsigned long ierror;
int err;
int i;
//int counter;
u64 counters[4];
int do_if_stat = 1;
unsigned long period = (unsigned long) (jiffies - priv->last_ifq_jiffies);
struct mlx4_en_vport_stats tmp_vport_stats;
struct net_device *dev;
dev = mdev->pndev[port];
priv = netdev_priv(dev);
memset(counters, 0, sizeof counters);
/*
counter = mlx4_get_iboe_counter(priv->mdev->dev, port);
if (counter >= 0)
err = read_iboe_counters(priv->mdev->dev, counter, counters);
*/
if (jiffies_to_msecs(period) < EN_IFQ_MIN_INTERVAL ||
priv->counter_index == 0xff)
do_if_stat = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto mailbox_out;
}
mailbox_flow = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox_flow)) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
err = PTR_ERR(mailbox_flow);
goto mailbox_out;
}
/* 0xffs indicates invalid value */
memset(mailbox_flow->buf, 0xff, sizeof(*flowstats) *
MLX4_NUM_PRIORITIES);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
memset(mailbox_flow->buf, 0, sizeof(*flowstats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox_flow->dma,
in_mod | 1<<12, 0, MLX4_CMD_DUMP_ETH_STATS,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
if (err)
goto out;
@ -265,74 +180,394 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock(&priv->stats_lock);
oerror = ierror = 0;
dev->if_ipackets = counters[0];
dev->if_ibytes = counters[2];
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
dev->if_ipackets += priv->rx_ring[i].packets;
dev->if_ibytes += priv->rx_ring[i].bytes;
ierror += priv->rx_ring[i].errors;
}
dev->if_opackets = counters[1];
dev->if_obytes = counters[3];
for (i = 0; i <= priv->tx_ring_num; i++) {
dev->if_opackets += priv->tx_ring[i].packets;
dev->if_obytes += priv->tx_ring[i].bytes;
oerror += priv->tx_ring[i].errors;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
}
dev->if_ierrors = be32_to_cpu(mlx4_en_stats->RDROP) + ierror;
dev->if_oerrors = be32_to_cpu(mlx4_en_stats->TDROP) + oerror;
dev->if_imcasts = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->MCAST_novlan);
dev->if_omcasts = be64_to_cpu(mlx4_en_stats->TMCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->TMCAST_novlan);
dev->if_collisions = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
priv->port_stats.queue_stopped += priv->tx_ring[i]->queue_stopped;
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
}
/* RX Statistics */
priv->pkstats.rx_packets = be64_to_cpu(mlx4_en_stats->RTOT_prio_0) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_1) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_2) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_3) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_4) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_5) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_6) +
be64_to_cpu(mlx4_en_stats->RTOT_prio_7) +
be64_to_cpu(mlx4_en_stats->RTOT_novlan);
priv->pkstats.rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
be64_to_cpu(mlx4_en_stats->ROCT_novlan);
priv->pkstats.rx_multicast_packets = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->MCAST_novlan);
priv->pkstats.rx_broadcast_packets = be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
priv->pkstats.rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RJBBR) +
be32_to_cpu(mlx4_en_stats->RCRC) +
be32_to_cpu(mlx4_en_stats->RRUNT) +
be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
be32_to_cpu(mlx4_en_stats->RSHORT) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_0) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_1) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_2) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_3) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_4) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_5) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_6) +
be64_to_cpu(mlx4_en_stats->RGIANT_prio_7) +
be64_to_cpu(mlx4_en_stats->RGIANT_novlan);
priv->pkstats.rx_dropped = be32_to_cpu(mlx4_en_stats->RdropOvflw);
priv->pkstats.rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
priv->pkstats.rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
priv->pkstats.rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
priv->pkstats.rx_in_range_length_error = be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
priv->pkstats.rx_out_range_length_error = be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
priv->pkstats.rx_lt_64_bytes_packets = be64_to_cpu(mlx4_en_stats->R64_prio_0) +
be64_to_cpu(mlx4_en_stats->R64_prio_1) +
be64_to_cpu(mlx4_en_stats->R64_prio_2) +
be64_to_cpu(mlx4_en_stats->R64_prio_3) +
be64_to_cpu(mlx4_en_stats->R64_prio_4) +
be64_to_cpu(mlx4_en_stats->R64_prio_5) +
be64_to_cpu(mlx4_en_stats->R64_prio_6) +
be64_to_cpu(mlx4_en_stats->R64_prio_7) +
be64_to_cpu(mlx4_en_stats->R64_novlan);
priv->pkstats.rx_127_bytes_packets = be64_to_cpu(mlx4_en_stats->R127_prio_0) +
be64_to_cpu(mlx4_en_stats->R127_prio_1) +
be64_to_cpu(mlx4_en_stats->R127_prio_2) +
be64_to_cpu(mlx4_en_stats->R127_prio_3) +
be64_to_cpu(mlx4_en_stats->R127_prio_4) +
be64_to_cpu(mlx4_en_stats->R127_prio_5) +
be64_to_cpu(mlx4_en_stats->R127_prio_6) +
be64_to_cpu(mlx4_en_stats->R127_prio_7) +
be64_to_cpu(mlx4_en_stats->R127_novlan);
priv->pkstats.rx_255_bytes_packets = be64_to_cpu(mlx4_en_stats->R255_prio_0) +
be64_to_cpu(mlx4_en_stats->R255_prio_1) +
be64_to_cpu(mlx4_en_stats->R255_prio_2) +
be64_to_cpu(mlx4_en_stats->R255_prio_3) +
be64_to_cpu(mlx4_en_stats->R255_prio_4) +
be64_to_cpu(mlx4_en_stats->R255_prio_5) +
be64_to_cpu(mlx4_en_stats->R255_prio_6) +
be64_to_cpu(mlx4_en_stats->R255_prio_7) +
be64_to_cpu(mlx4_en_stats->R255_novlan);
priv->pkstats.rx_511_bytes_packets = be64_to_cpu(mlx4_en_stats->R511_prio_0) +
be64_to_cpu(mlx4_en_stats->R511_prio_1) +
be64_to_cpu(mlx4_en_stats->R511_prio_2) +
be64_to_cpu(mlx4_en_stats->R511_prio_3) +
be64_to_cpu(mlx4_en_stats->R511_prio_4) +
be64_to_cpu(mlx4_en_stats->R511_prio_5) +
be64_to_cpu(mlx4_en_stats->R511_prio_6) +
be64_to_cpu(mlx4_en_stats->R511_prio_7) +
be64_to_cpu(mlx4_en_stats->R511_novlan);
priv->pkstats.rx_1023_bytes_packets = be64_to_cpu(mlx4_en_stats->R1023_prio_0) +
be64_to_cpu(mlx4_en_stats->R1023_prio_1) +
be64_to_cpu(mlx4_en_stats->R1023_prio_2) +
be64_to_cpu(mlx4_en_stats->R1023_prio_3) +
be64_to_cpu(mlx4_en_stats->R1023_prio_4) +
be64_to_cpu(mlx4_en_stats->R1023_prio_5) +
be64_to_cpu(mlx4_en_stats->R1023_prio_6) +
be64_to_cpu(mlx4_en_stats->R1023_prio_7) +
be64_to_cpu(mlx4_en_stats->R1023_novlan);
priv->pkstats.rx_1518_bytes_packets = be64_to_cpu(mlx4_en_stats->R1518_prio_0) +
be64_to_cpu(mlx4_en_stats->R1518_prio_1) +
be64_to_cpu(mlx4_en_stats->R1518_prio_2) +
be64_to_cpu(mlx4_en_stats->R1518_prio_3) +
be64_to_cpu(mlx4_en_stats->R1518_prio_4) +
be64_to_cpu(mlx4_en_stats->R1518_prio_5) +
be64_to_cpu(mlx4_en_stats->R1518_prio_6) +
be64_to_cpu(mlx4_en_stats->R1518_prio_7) +
be64_to_cpu(mlx4_en_stats->R1518_novlan);
priv->pkstats.rx_1522_bytes_packets = be64_to_cpu(mlx4_en_stats->R1522_prio_0) +
be64_to_cpu(mlx4_en_stats->R1522_prio_1) +
be64_to_cpu(mlx4_en_stats->R1522_prio_2) +
be64_to_cpu(mlx4_en_stats->R1522_prio_3) +
be64_to_cpu(mlx4_en_stats->R1522_prio_4) +
be64_to_cpu(mlx4_en_stats->R1522_prio_5) +
be64_to_cpu(mlx4_en_stats->R1522_prio_6) +
be64_to_cpu(mlx4_en_stats->R1522_prio_7) +
be64_to_cpu(mlx4_en_stats->R1522_novlan);
priv->pkstats.rx_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->R1548_prio_0) +
be64_to_cpu(mlx4_en_stats->R1548_prio_1) +
be64_to_cpu(mlx4_en_stats->R1548_prio_2) +
be64_to_cpu(mlx4_en_stats->R1548_prio_3) +
be64_to_cpu(mlx4_en_stats->R1548_prio_4) +
be64_to_cpu(mlx4_en_stats->R1548_prio_5) +
be64_to_cpu(mlx4_en_stats->R1548_prio_6) +
be64_to_cpu(mlx4_en_stats->R1548_prio_7) +
be64_to_cpu(mlx4_en_stats->R1548_novlan);
priv->pkstats.rx_gt_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->R2MTU_prio_0) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_1) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_2) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_3) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_4) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_5) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_6) +
be64_to_cpu(mlx4_en_stats->R2MTU_prio_7) +
be64_to_cpu(mlx4_en_stats->R2MTU_novlan);
/* Tx Stats */
priv->pkstats.tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTOT_novlan);
priv->pkstats.tx_bytes = be64_to_cpu(mlx4_en_stats->TOCT_prio_0) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_1) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_2) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_3) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_4) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_5) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_6) +
be64_to_cpu(mlx4_en_stats->TOCT_prio_7) +
be64_to_cpu(mlx4_en_stats->TOCT_novlan);
priv->pkstats.tx_multicast_packets = be64_to_cpu(mlx4_en_stats->TMCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->TMCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->TMCAST_novlan);
priv->pkstats.tx_broadcast_packets = be64_to_cpu(mlx4_en_stats->TBCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->TBCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->TBCAST_novlan);
priv->pkstats.tx_errors = be64_to_cpu(mlx4_en_stats->TGIANT_prio_0) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_1) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_2) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_3) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_4) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_5) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_6) +
be64_to_cpu(mlx4_en_stats->TGIANT_prio_7) +
be64_to_cpu(mlx4_en_stats->TGIANT_novlan);
priv->pkstats.tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) -
priv->pkstats.tx_errors;
priv->pkstats.tx_lt_64_bytes_packets = be64_to_cpu(mlx4_en_stats->T64_prio_0) +
be64_to_cpu(mlx4_en_stats->T64_prio_1) +
be64_to_cpu(mlx4_en_stats->T64_prio_2) +
be64_to_cpu(mlx4_en_stats->T64_prio_3) +
be64_to_cpu(mlx4_en_stats->T64_prio_4) +
be64_to_cpu(mlx4_en_stats->T64_prio_5) +
be64_to_cpu(mlx4_en_stats->T64_prio_6) +
be64_to_cpu(mlx4_en_stats->T64_prio_7) +
be64_to_cpu(mlx4_en_stats->T64_novlan);
priv->pkstats.tx_127_bytes_packets = be64_to_cpu(mlx4_en_stats->T127_prio_0) +
be64_to_cpu(mlx4_en_stats->T127_prio_1) +
be64_to_cpu(mlx4_en_stats->T127_prio_2) +
be64_to_cpu(mlx4_en_stats->T127_prio_3) +
be64_to_cpu(mlx4_en_stats->T127_prio_4) +
be64_to_cpu(mlx4_en_stats->T127_prio_5) +
be64_to_cpu(mlx4_en_stats->T127_prio_6) +
be64_to_cpu(mlx4_en_stats->T127_prio_7) +
be64_to_cpu(mlx4_en_stats->T127_novlan);
priv->pkstats.tx_255_bytes_packets = be64_to_cpu(mlx4_en_stats->T255_prio_0) +
be64_to_cpu(mlx4_en_stats->T255_prio_1) +
be64_to_cpu(mlx4_en_stats->T255_prio_2) +
be64_to_cpu(mlx4_en_stats->T255_prio_3) +
be64_to_cpu(mlx4_en_stats->T255_prio_4) +
be64_to_cpu(mlx4_en_stats->T255_prio_5) +
be64_to_cpu(mlx4_en_stats->T255_prio_6) +
be64_to_cpu(mlx4_en_stats->T255_prio_7) +
be64_to_cpu(mlx4_en_stats->T255_novlan);
priv->pkstats.tx_511_bytes_packets = be64_to_cpu(mlx4_en_stats->T511_prio_0) +
be64_to_cpu(mlx4_en_stats->T511_prio_1) +
be64_to_cpu(mlx4_en_stats->T511_prio_2) +
be64_to_cpu(mlx4_en_stats->T511_prio_3) +
be64_to_cpu(mlx4_en_stats->T511_prio_4) +
be64_to_cpu(mlx4_en_stats->T511_prio_5) +
be64_to_cpu(mlx4_en_stats->T511_prio_6) +
be64_to_cpu(mlx4_en_stats->T511_prio_7) +
be64_to_cpu(mlx4_en_stats->T511_novlan);
priv->pkstats.tx_1023_bytes_packets = be64_to_cpu(mlx4_en_stats->T1023_prio_0) +
be64_to_cpu(mlx4_en_stats->T1023_prio_1) +
be64_to_cpu(mlx4_en_stats->T1023_prio_2) +
be64_to_cpu(mlx4_en_stats->T1023_prio_3) +
be64_to_cpu(mlx4_en_stats->T1023_prio_4) +
be64_to_cpu(mlx4_en_stats->T1023_prio_5) +
be64_to_cpu(mlx4_en_stats->T1023_prio_6) +
be64_to_cpu(mlx4_en_stats->T1023_prio_7) +
be64_to_cpu(mlx4_en_stats->T1023_novlan);
priv->pkstats.tx_1518_bytes_packets = be64_to_cpu(mlx4_en_stats->T1518_prio_0) +
be64_to_cpu(mlx4_en_stats->T1518_prio_1) +
be64_to_cpu(mlx4_en_stats->T1518_prio_2) +
be64_to_cpu(mlx4_en_stats->T1518_prio_3) +
be64_to_cpu(mlx4_en_stats->T1518_prio_4) +
be64_to_cpu(mlx4_en_stats->T1518_prio_5) +
be64_to_cpu(mlx4_en_stats->T1518_prio_6) +
be64_to_cpu(mlx4_en_stats->T1518_prio_7) +
be64_to_cpu(mlx4_en_stats->T1518_novlan);
priv->pkstats.tx_1522_bytes_packets = be64_to_cpu(mlx4_en_stats->T1522_prio_0) +
be64_to_cpu(mlx4_en_stats->T1522_prio_1) +
be64_to_cpu(mlx4_en_stats->T1522_prio_2) +
be64_to_cpu(mlx4_en_stats->T1522_prio_3) +
be64_to_cpu(mlx4_en_stats->T1522_prio_4) +
be64_to_cpu(mlx4_en_stats->T1522_prio_5) +
be64_to_cpu(mlx4_en_stats->T1522_prio_6) +
be64_to_cpu(mlx4_en_stats->T1522_prio_7) +
be64_to_cpu(mlx4_en_stats->T1522_novlan);
priv->pkstats.tx_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->T1548_prio_0) +
be64_to_cpu(mlx4_en_stats->T1548_prio_1) +
be64_to_cpu(mlx4_en_stats->T1548_prio_2) +
be64_to_cpu(mlx4_en_stats->T1548_prio_3) +
be64_to_cpu(mlx4_en_stats->T1548_prio_4) +
be64_to_cpu(mlx4_en_stats->T1548_prio_5) +
be64_to_cpu(mlx4_en_stats->T1548_prio_6) +
be64_to_cpu(mlx4_en_stats->T1548_prio_7) +
be64_to_cpu(mlx4_en_stats->T1548_novlan);
priv->pkstats.tx_gt_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->T2MTU_prio_0) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_1) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_2) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_3) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_4) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_5) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_6) +
be64_to_cpu(mlx4_en_stats->T2MTU_prio_7) +
be64_to_cpu(mlx4_en_stats->T2MTU_novlan);
priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1);
priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2);
priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3);
priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4);
priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5);
priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6);
priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7);
priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan);
priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan);
priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0);
priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1);
priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2);
priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3);
priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4);
priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5);
priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6);
priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7);
priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
flowstats = mailbox_flow->buf;
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
priv->flowstats[i].rx_pause =
be64_to_cpu(flowstats[i].rx_pause);
priv->flowstats[i].rx_pause_duration =
be64_to_cpu(flowstats[i].rx_pause_duration);
priv->flowstats[i].rx_pause_transition =
be64_to_cpu(flowstats[i].rx_pause_transition);
priv->flowstats[i].tx_pause =
be64_to_cpu(flowstats[i].tx_pause);
priv->flowstats[i].tx_pause_duration =
be64_to_cpu(flowstats[i].tx_pause_duration);
priv->flowstats[i].tx_pause_transition =
be64_to_cpu(flowstats[i].tx_pause_transition);
}
memset(&tmp_vport_stats, 0, sizeof(tmp_vport_stats));
spin_unlock(&priv->stats_lock);
err = mlx4_get_vport_ethtool_stats(mdev->dev, port,
&tmp_vport_stats, reset);
spin_lock(&priv->stats_lock);
if (!err) {
/* ethtool stats format */
vport_stats->rx_unicast_packets = tmp_vport_stats.rx_unicast_packets;
vport_stats->rx_unicast_bytes = tmp_vport_stats.rx_unicast_bytes;
vport_stats->rx_multicast_packets = tmp_vport_stats.rx_multicast_packets;
vport_stats->rx_multicast_bytes = tmp_vport_stats.rx_multicast_bytes;
vport_stats->rx_broadcast_packets = tmp_vport_stats.rx_broadcast_packets;
vport_stats->rx_broadcast_bytes = tmp_vport_stats.rx_broadcast_bytes;
vport_stats->rx_dropped = tmp_vport_stats.rx_dropped;
vport_stats->rx_errors = tmp_vport_stats.rx_errors;
vport_stats->tx_unicast_packets = tmp_vport_stats.tx_unicast_packets;
vport_stats->tx_unicast_bytes = tmp_vport_stats.tx_unicast_bytes;
vport_stats->tx_multicast_packets = tmp_vport_stats.tx_multicast_packets;
vport_stats->tx_multicast_bytes = tmp_vport_stats.tx_multicast_bytes;
vport_stats->tx_broadcast_packets = tmp_vport_stats.tx_broadcast_packets;
vport_stats->tx_broadcast_bytes = tmp_vport_stats.tx_broadcast_bytes;
vport_stats->tx_errors = tmp_vport_stats.tx_errors;
}
if (!mlx4_is_mfunc(mdev->dev)) {
/* netdevice stats format */
dev = mdev->pndev[port];
dev->if_ipackets = priv->pkstats.rx_packets;
dev->if_opackets = priv->pkstats.tx_packets;
dev->if_ibytes = priv->pkstats.rx_bytes;
dev->if_obytes = priv->pkstats.tx_bytes;
dev->if_ierrors = priv->pkstats.rx_errors;
dev->if_iqdrops = priv->pkstats.rx_dropped;
dev->if_imcasts = priv->pkstats.rx_multicast_packets;
dev->if_omcasts = priv->pkstats.tx_multicast_packets;
dev->if_collisions = 0;
}
priv->pkstats.broadcast =
be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
spin_unlock(&priv->stats_lock);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox_flow);
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mailbox_out:
if (do_if_stat)
priv->last_ifq_jiffies = jiffies;
return err;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -36,39 +36,10 @@
#define SET_PORT_GEN_ALL_VALID 0x7
#define SET_PORT_PROMISC_EN_SHIFT 31
#define SET_PORT_PROMISC_MODE_SHIFT 30
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
#if 0 //moved to port.c - shahark
struct mlx4_set_port_general_context {
u8 reserved[3];
u8 flags;
u16 reserved2;
__be16 mtu;
u8 pptx;
u8 pfctx;
u16 reserved3;
u8 pprx;
u8 pfcrx;
u16 reserved4;
};
struct mlx4_set_port_rqp_calc_context {
__be32 base_qpn;
__be32 flags;
u8 reserved[3];
u8 mac_miss;
u8 intra_no_vlan;
u8 no_vlan;
u8 intra_vlan_miss;
u8 vlan_miss;
u8 reserved2[3];
u8 no_vlan_prio;
__be32 promisc;
__be32 mcast;
};
#endif
#define MLX4_EN_NUM_TC 8
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
@ -83,29 +54,27 @@ enum {
};
enum {
MLX4_EN_1G_SPEED = 0x02,
MLX4_EN_10G_SPEED_XFI = 0x01,
MLX4_EN_10G_SPEED_XAUI = 0x00,
MLX4_EN_10G_SPEED_XFI = 0x01,
MLX4_EN_1G_SPEED = 0x02,
MLX4_EN_20G_SPEED = 0x08,
MLX4_EN_40G_SPEED = 0x40,
MLX4_EN_56G_SPEED = 0x20,
MLX4_EN_OTHER_SPEED = 0x0f,
};
struct mlx4_en_query_port_context {
u8 link_up;
#define MLX4_EN_LINK_UP_MASK 0x80
u8 reserved;
u8 autoneg;
#define MLX4_EN_AUTONEG_MASK 0x80
__be16 mtu;
u8 reserved2;
u8 link_speed;
#define MLX4_EN_SPEED_MASK 0x43
#define MLX4_EN_SPEED_MASK 0x6b
u16 reserved3[5];
__be64 mac;
u8 transceiver;
u8 reserved4[3];
__be32 wavelenth;
u32 reserved5;
__be32 transceiver_code_hi;
__be32 transceiver_code_low;
};
@ -590,6 +559,5 @@ struct mlx4_en_stat_out_mbox {
__be32 TDROP;
};
enum mlx4_query_reply mlx4_en_query(void *endev_ptr, void *int_dev);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -31,24 +31,26 @@
*
*/
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mlx4/qp.h>
#include "mlx4_en.h"
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn,
struct mlx4_qp_context *context)
int user_prio, struct mlx4_qp_context *context)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
memset(context, 0, sizeof *context);
context->flags = cpu_to_be32(7 << 16 | rss << 13);
context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss) {
if (!is_tx && !rss)
context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
}
if (is_tx)
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
else
@ -57,10 +59,25 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
context->pri_path.counter_index = 0xff;
if (user_prio >= 0) {
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = 1 << 6;
}
context->pri_path.counter_index = (u8)(priv->counter_index);
if (!rss &&
(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
context->pri_path.counter_index != 0xFF) {
/* disable multicast loopback to qp with same counter */
context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
context->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
}
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->if_capabilities & IFCAP_VLAN_HWCSUM))
context->param3 |= cpu_to_be32(1 << 30);
}
@ -69,6 +86,8 @@ int mlx4_en_map_buffer(struct mlx4_buf *buf)
struct page **pages;
int i;
// if nbufs == 1 - there is no need to vmap
// if buf->direct.buf is not NULL it means that vmap was already done by mlx4_alloc_buff
if (buf->direct.buf != NULL || buf->nbufs == 1)
return 0;
@ -89,11 +108,10 @@ int mlx4_en_map_buffer(struct mlx4_buf *buf)
void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
{
if (buf->direct.buf != NULL || buf->nbufs == 1)
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return;
vunmap(buf->direct.buf);
buf->direct.buf = NULL;
}
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -31,24 +31,24 @@
*
*/
#include "mlx4_en.h"
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/mlx4/driver.h>
#include "mlx4_en.h"
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
MLX4_CMD_TIME_CLASS_A);
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
{
struct mbuf *mb;
struct sk_buff *skb;
struct ethhdr *ethh;
unsigned char *packet;
unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
@ -57,24 +57,24 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
/* build the pkt before xmit */
mb = netdev_alloc_mb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
if (!mb) {
en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create mb for xmit\n");
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
if (!skb) {
en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
return -ENOMEM;
}
mb_reserve(mb, NET_IP_ALIGN);
skb_reserve(skb, NET_IP_ALIGN);
ethh = (struct ethhdr *)mb_put(mb, sizeof(struct ethhdr));
packet = (unsigned char *)mb_put(mb, packet_size);
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
packet = (unsigned char *)skb_put(skb, packet_size);
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
memset(ethh->h_source, 0, ETH_ALEN);
ethh->h_proto = htons(ETH_P_ARP);
mb_set_mac_header(mb, 0);
skb_set_mac_header(skb, 0);
for (i = 0; i < packet_size; ++i) /* fill our packet */
packet[i] = (unsigned char)(i & 0xff);
/* xmit the pkt */
err = mlx4_en_xmit(mb, priv->dev);
err = mlx4_en_xmit(skb, priv->dev);
return err;
}
@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0;
priv->validate_loopback = 1;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
@ -107,7 +109,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
return (!loopback_ok);
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
@ -127,8 +130,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
/* The device currently only supports 10G speed */
if (priv->port_state.link_speed != SPEED_10000)
/* The device supports 1G, 10G and 40G speed */
if (priv->port_state.link_speed != MLX4_EN_LINK_SPEED_1G &&
priv->port_state.link_speed != MLX4_EN_LINK_SPEED_10G &&
priv->port_state.link_speed != MLX4_EN_LINK_SPEED_40G)
return priv->port_state.link_speed;
return 0;
}
@ -138,7 +143,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *tx_ring;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@ -148,20 +152,16 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
carrier_ok = netif_carrier_ok(dev);
netif_carrier_off(dev);
retry_tx:
/* Wait untill all tx queues are empty.
/* Wait until all tx queues are empty.
* there should not be any additional incoming traffic
* since we turned the carrier off */
msleep(200);
for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
tx_ring = &priv->tx_ring[i];
if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
goto retry_tx;
}
if (priv->mdev->dev->caps.loopback_support){
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
buf[4] = mlx4_en_test_loopback(priv);
if (priv->port_up)
buf[4] = mlx4_en_test_loopback(priv);
}
if (carrier_ok)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -33,6 +33,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
@ -86,6 +87,8 @@ static u64 get_async_ev_mask(struct mlx4_dev *dev)
u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
return async_ev_mask;
}
@ -147,12 +150,9 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i < dev->num_slaves; i++) {
if (i != dev->caps.function &&
master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to "
" generate event "
"for slave %d\n", i);
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate "
"event for slave %d\n", i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@ -197,13 +197,13 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_slave =
&priv->mfunc.master.slave_state[slave];
if (!s_slave->active) {
/*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
if (slave < 0 || slave >= dev->num_slaves ||
slave == dev->caps.function)
return;
if (!priv->mfunc.master.slave_state[slave].active)
return;
}
slave_event(dev, slave, eqe);
}
@ -375,7 +375,7 @@ out:
EXPORT_SYMBOL(set_and_calc_slave_port_state);
int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr, u16 sm_lid, u8 sm_sl)
{
struct mlx4_eqe eqe;
@ -386,6 +386,12 @@ int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
eqe.event.port_mgmt_change.port = port;
eqe.event.port_mgmt_change.params.port_info.changed_attr =
cpu_to_be32((u32) attr);
if (attr & MSTR_SM_CHANGE_MASK) {
eqe.event.port_mgmt_change.params.port_info.mstr_sm_lid =
cpu_to_be16(sm_lid);
eqe.event.port_mgmt_change.params.port_info.mstr_sm_sl =
sm_sl;
}
slave_event(dev, ALL_SLAVES, &eqe);
return 0;
@ -446,6 +452,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
int i;
enum slave_port_gen_event gen_event;
unsigned long flags;
struct mlx4_vport_state *s_info;
while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
/*
@ -495,8 +502,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
__func__);
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
__func__);
/* fall through */
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@ -513,17 +521,15 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
" event: %02x(%02x)\n", __func__,
slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
mlx4_dbg(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
mlx4_warn(dev, "%s: sending event "
"%02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
mlx4_dbg(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
@ -554,7 +560,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
" to slave: %d, port:%d\n",
__func__, i, port);
mlx4_slave_event(dev, i, eqe);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
mlx4_slave_event(dev, i, eqe);
} else { /* IB port */
set_and_calc_slave_port_state(dev, i, port,
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@ -578,7 +586,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
for (i = 0; i < dev->num_slaves; i++) {
if (i == mlx4_master_func_num(dev))
continue;
mlx4_slave_event(dev, i, eqe);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
mlx4_slave_event(dev, i, eqe);
}
else /* IB port */
/* port-up event will be sent to a slave when the
@ -635,11 +645,18 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
"for non master device\n");
break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
eqe->event.comm_channel_arm.bit_vec,
sizeof eqe->event.comm_channel_arm.bit_vec);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.comm_work);
if (!queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.comm_work))
mlx4_warn(dev, "Failed to queue comm channel work\n");
if (!queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.arm_comm_work))
mlx4_warn(dev, "Failed to queue arm comm channel work\n");
break;
case MLX4_EVENT_TYPE_FLR_EVENT:
@ -704,6 +721,27 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
(unsigned long) eqe);
break;
case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
switch (eqe->subtype) {
case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
mlx4_warn(dev, "Bad cable detected on port %u\n",
eqe->event.bad_cable.port);
break;
case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
mlx4_warn(dev, "Unsupported cable detected\n");
break;
default:
mlx4_dbg(dev, "Unhandled recoverable error event "
"detected: %02x(%02x) on EQ %d at index %u. "
"owner=%x, nent=0x%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
}
break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
@ -747,7 +785,6 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
int work = 0;
int i;
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
@ -777,7 +814,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_slave_event_eq_info *event_eq =
priv->mfunc.master.slave_state[slave].event_eq;
u32 in_modifier = vhcr->in_modifier;
u32 eqn = in_modifier & 0x1FF;
u32 eqn = in_modifier & 0x3FF;
u64 in_param = vhcr->in_param;
int err = 0;
int i;
@ -956,7 +993,7 @@ err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
err_out_free_pages:
for (i = 0; i < npages; ++i)
@ -1011,7 +1048,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
eq->page_list[i].map);
kfree(eq->page_list);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
mlx4_free_cmd_mailbox(dev, mailbox);
}
@ -1306,7 +1343,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
}
EXPORT_SYMBOL(mlx4_test_interrupts);
int mlx4_assign_eq(struct mlx4_dev *dev, char *name, int *vector)
int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -32,8 +32,10 @@
* SOFTWARE.
*/
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
#include "fw.h"
#include "icm.h"
@ -106,6 +108,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[40] = "UDP RSS support",
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
[44] = "Cross-channel (sync_qp) operations support",
[48] = "Counters support",
[59] = "Port management change event support",
[60] = "eSwitch support",
@ -126,7 +129,18 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
[3] = "Device manage flow steering support"
[3] = "Device manage flow steering support",
[4] = "FSM (MAC unti-spoofing) support",
[5] = "VST (control vlan insertion/stripping) support",
[6] = "Dynamic QP updates support",
[7] = "Loopback source checks support",
[8] = "Device managed flow steering IPoIB support",
[9] = "ETS configuration support",
[10] = "ETH backplane autoneg report",
[11] = "Ethernet Flow control statistics support",
[12] = "Recoverable error events support",
[13] = "Time stamping support",
[14] = "Report driver version to FW support"
};
int i;
@ -170,7 +184,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u8 field;
u8 field, port;
u32 size;
int err = 0;
@ -178,23 +192,32 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
#define QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET 0xd
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
@ -203,35 +226,45 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
#define QUERY_FUNC_CAP_PROPS_DEF_COUNTER 0x20
#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
if (vhcr->op_modifier == 1) {
field = 0;
/* ensure force vlan and force mac bits are not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
/* ensure that phy_wqe_gid bit is not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
port = vhcr->in_modifier; /* phys-port = logical-port */
MLX4_PUT(outbox->buf, port, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
field = vhcr->in_modifier; /* phys-port = logical-port */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
field = 0;
/* ensure that phy_wqe_gid bit is not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
/* ensure force vlan and force mac bits are not set
* and that default counter bit is set
*/
field = QUERY_FUNC_CAP_PROPS_DEF_COUNTER; /* def counter */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
/* There is always default counter legal or sink counter */
field = mlx4_get_default_counter_index(dev, slave, vhcr->in_modifier);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
/* size is now the QP number */
size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
size = dev->phys_caps.base_proxy_sqpn + 8 * slave + field - 1;
size = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_PROXY);
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
} else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
/* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports;
@ -245,12 +278,18 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
size = dev->caps.num_qps;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
size = dev->caps.num_srqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
size = dev->caps.num_cqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
size = dev->caps.num_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
@ -260,12 +299,17 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
size = dev->caps.num_mpts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
size = dev->caps.num_mtts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
} else
err = -EINVAL;
@ -280,7 +324,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u32 *outbox;
u8 field, op_modifier;
u32 size;
int err = 0;
int err = 0, quotas = 0;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
@ -304,6 +348,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
func_cap->flags = field;
quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
@ -311,29 +356,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
func_cap->pf_context_behaviour = size;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
func_cap->qp_quota = size & 0xFFFFFF;
if (quotas) {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
func_cap->qp_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
func_cap->mcg_quota = size & 0xFFFFFF;
} else {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
func_cap->qp_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
func_cap->mcg_quota = size & 0xFFFFFF;
}
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
func_cap->max_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
func_cap->mcg_quota = size & 0xFFFFFF;
goto out;
}
@ -344,7 +410,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
}
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
@ -357,7 +423,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is "
"enforced on this ib port\n");
@ -373,6 +439,14 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out;
}
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (field & QUERY_FUNC_CAP_PROPS_DEF_COUNTER) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_COUNTER_INDEX_OFFSET);
func_cap->def_counter_index = field;
} else {
func_cap->def_counter_index = MLX4_SINK_COUNTER_INDEX;
}
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
@ -466,7 +540,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_BASIC_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET 0x6c
#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
@ -480,6 +557,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_ETS_CFG_OFFSET 0x9c
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
dev_cap->flags2 = 0;
@ -551,16 +629,23 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
if (field & 0x10)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
dev_cap->timestamp_support = field & 0x80;
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32;
@ -644,6 +729,16 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETS_CFG_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
if (field32 & (1 << 8))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
if (field32 & (1 << 13))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@ -655,6 +750,16 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(dev_cap->max_extended_counters, outbox,
QUERY_DEV_CAP_MAX_EXTENDED_COUNTERS_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
if (field32 & (1 << 19))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
if (field32 & (1 << 20))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
if (field32 & (1 << 26))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
for (i = 1; i <= dev_cap->num_ports; ++i) {
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
@ -786,6 +891,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* turn off device-managed steering capability if not enabled */
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(field, outbox->buf,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
field &= 0x7f;
MLX4_PUT(outbox->buf, field,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
}
return 0;
}
@ -800,8 +913,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
u8 port_type;
u16 short_field;
int err;
int admin_link_state;
#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
#define MLX4_PORT_LINK_UP_MASK 0x80
#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
@ -810,12 +925,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_CMD_NATIVE);
if (!err && dev->caps.function != slave) {
/* set slave default_mac address */
MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
def_mac += slave << 8;
/* if config MAC in DB use it */
if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
/* set slave default_mac address to be zero MAC */
def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */
@ -827,6 +938,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
/* set port type to currently operating port type */
port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
port_type |= MLX4_PORT_LINK_UP_MASK;
else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
port_type &= ~MLX4_PORT_LINK_UP_MASK;
MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
@ -1078,14 +1195,14 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
fw->comm_bar = (fw->comm_bar >> 6) * 2;
mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
fw->comm_bar, (long long)fw->comm_base);
fw->comm_bar, (unsigned long long)fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
fw->clock_bar = (fw->clock_bar >> 6) * 2;
mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
fw->comm_bar, (long long)fw->comm_base);
fw->comm_bar, (unsigned long long)fw->comm_base);
/*
* Round up number of system pages needed in case
@ -1127,7 +1244,7 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
return 0;
}
static void get_board_id(void *vsd, char *board_id)
static void get_board_id(void *vsd, char *board_id, char *vsdstr)
{
int i;
@ -1135,9 +1252,16 @@ static void get_board_id(void *vsd, char *board_id)
#define VSD_OFFSET_SIG2 0xde
#define VSD_OFFSET_MLX_BOARD_ID 0xd0
#define VSD_OFFSET_TS_BOARD_ID 0x20
#define VSD_LEN 0xd0
#define VSD_SIGNATURE_TOPSPIN 0x5ad
memset(vsdstr, 0, MLX4_VSD_LEN);
for (i = 0; i < VSD_LEN / 4; i++)
((u32 *)vsdstr)[i] =
swab32(*(u32 *)(vsd + i * 4));
memset(board_id, 0, MLX4_BOARD_ID_LEN);
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
@ -1164,6 +1288,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
#define QUERY_ADAPTER_OUT_SIZE 0x100
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
#define QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET 0x1e
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@ -1177,8 +1302,11 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
adapter->vsd_vendor_id = be16_to_cpup((u16 *)outbox +
QUERY_ADAPTER_VSD_VENDOR_ID_OFFSET / 2);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
adapter->board_id);
adapter->board_id, adapter->vsd);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
@ -1189,13 +1317,16 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *inbox;
u32 mw_enable;
int err;
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE 64
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
@ -1217,6 +1348,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
#define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
@ -1227,6 +1359,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
#define INIT_HCA_TPT_MW_ENABLE (1 << 31)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
@ -1244,7 +1378,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
((ilog2(CACHE_LINE_SIZE) - 4) << 5) | (1 << 4);
((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
@ -1290,6 +1424,17 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
dev->caps.cqe_size = 32;
}
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
strncpy((u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET,
DRV_NAME_FOR_FW,
INIT_HCA_DRV_NAME_FOR_FW_MAX_SIZE - 1);
mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n",
(u8 *)mailbox->buf + INIT_HCA_DRIVER_VERSION_OFFSET);
}
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@ -1339,15 +1484,16 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) {
if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
MLX4_PUT(inbox, (u8) (1 << 3),
INIT_HCA_UC_STEERING_OFFSET);
}
}
/* TPT attributes */
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
mw_enable = param->mw_enable ? INIT_HCA_TPT_MW_ENABLE : 0;
MLX4_PUT(inbox, mw_enable, INIT_HCA_TPT_MW_OFFSET);
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
@ -1373,6 +1519,7 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
u32 dword_field;
u32 mw_enable;
int err;
u8 byte_field;
@ -1414,13 +1561,12 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
} else {
MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
if (byte_field & 0x8) {
if (byte_field & 0x8)
param->steering_mode = MLX4_STEERING_MODE_B0;
}
else {
else
param->steering_mode = MLX4_STEERING_MODE_A0;
}
}
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(param->log_mc_entry_sz, outbox,
@ -1447,6 +1593,9 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_GET(mw_enable, outbox, INIT_HCA_TPT_MW_OFFSET);
param->mw_enable = (mw_enable & INIT_HCA_TPT_MW_ENABLE) ==
INIT_HCA_TPT_MW_ENABLE;
MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
@ -1682,6 +1831,15 @@ out:
}
EXPORT_SYMBOL_GPL(mlx4_query_diag_counters);
int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
return -EPERM;
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -143,10 +143,13 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u8 physical_port;
u8 port_flags;
u8 def_counter_index;
};
struct mlx4_adapter {
u16 vsd_vendor_id;
char board_id[MLX4_BOARD_ID_LEN];
char vsd[MLX4_VSD_LEN];
u8 inta_pin;
};
@ -175,6 +178,8 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 mw_enable; /* Enable memory windows */
u8 fs_hash_enable_bits;
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -35,6 +35,7 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/mlx4/cmd.h>
@ -288,10 +289,14 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
if (--table->icm[i]->refcount == 0) {
offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
mlx4_UNMAP_ICM(dev, table->virt + offset,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
if (!mlx4_UNMAP_ICM(dev, table->virt + offset,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE)) {
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
} else {
pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n");
}
}
mutex_unlock(&table->mutex);
@ -378,7 +383,7 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, u32 nobj, int reserved,
u64 virt, int obj_size, u64 nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
@ -388,7 +393,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 size;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
num_icm = div_u64((nobj + obj_per_chunk - 1), obj_per_chunk);
table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
if (!table->icm)
@ -431,11 +436,15 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], use_coherent);
if (!mlx4_UNMAP_ICM(dev,
virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE)) {
mlx4_free_icm(dev, table->icm[i], use_coherent);
} else {
pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n");
return -ENOMEM;
}
}
kfree(table->icm);
return -ENOMEM;
@ -443,14 +452,22 @@ err:
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
{
int i;
int i, err = 0;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
err = mlx4_UNMAP_ICM(dev,
table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
if (!err) {
mlx4_free_icm(dev, table->icm[i],
table->coherent);
} else {
pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n");
break;
}
}
kfree(table->icm);
if (!err)
kfree(table->icm);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -37,6 +37,7 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#define MLX4_ICM_CHUNK_LEN \
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
@ -78,7 +79,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end);
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, u32 nobj, int reserved,
u64 virt, int obj_size, u64 nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
@ -122,5 +123,7 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
#endif /* MLX4_ICM_H */

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -32,6 +32,7 @@
*/
#include <linux/slab.h>
#include <linux/module.h>
#include "mlx4.h"
@ -160,7 +161,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
list_for_each_entry(intf, &intf_list, list)
mlx4_remove_device(intf, priv);
list_del(&priv->dev_list);
list_del_init(&priv->dev_list);
mutex_unlock(&intf_mutex);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -32,8 +32,10 @@
*/
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include "mlx4.h"
@ -124,9 +126,14 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
u32 qpn)
{
struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
struct mlx4_steer *s_steer;
struct mlx4_promisc_qp *pqp;
if (port < 1 || port > dev->caps.num_ports)
return NULL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
if (pqp->qpn == qpn)
return pqp;
@ -153,6 +160,9 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port,
u32 prot;
int err;
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
@ -237,6 +247,9 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, port, steer, qpn);
@ -258,7 +271,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
if (qpn == pqp->qpn)
if (qpn == dqp->qpn)
return 0; /* qp is already duplicated */
}
@ -282,6 +295,9 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
if (port < 1 || port > dev->caps.num_ports)
return NULL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
@ -309,20 +325,24 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
return true;
}
/* I a steering entry contains only promisc QPs, it can be removed. */
static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
/*
* returns true if all the QPs != tqpn contained in this entry
* are Promisc QPs. return false otherwise.
*/
static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
unsigned int index, u32 tqpn, u32 *members_count)
{
struct mlx4_steer *s_steer;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
struct mlx4_steer_index *entry = NULL, *tmp_entry;
u32 qpn;
u32 members_count;
u32 m_count;
bool ret = false;
int i;
if (port < 1 || port > dev->caps.num_ports)
return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
@ -332,15 +352,42 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
if (mlx4_READ_ENTRY(dev, index, mailbox))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; i++) {
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count)
*members_count = m_count;
for (i = 0; i < m_count; i++) {
u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
}
/* All the qps currently registered for this entry are promiscuous,
ret = true;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
/* IF a steering entry contains only promisc QPs, it can be removed. */
static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *entry = NULL, *tmp_entry;
u32 members_count;
bool ret = false;
if (port < 1 || port > dev->caps.num_ports)
return NULL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
if (!promisc_steering_entry(dev, port, steer, index, tqpn, &members_count))
goto out;
/* All the qps currently registered for this entry are promiscuous,
* Checking for duplicates */
ret = true;
list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
@ -369,7 +416,6 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
@ -389,6 +435,9 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@ -412,43 +461,45 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
}
mgm = mailbox->buf;
/* the promisc qp needs to be added for each one of the steering
* entries, if it already exists, needs to be added as a duplicate
* for this entry */
list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
/* the promisc qp needs to be added for each one of the steering
* entries, if it already exists, needs to be added as a duplicate
* for this entry */
list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
prot = be32_to_cpu(mgm->members_count) >> 30;
found = false;
for (i = 0; i < members_count; i++) {
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
/* Entry already exists, add to duplicates */
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
prot = be32_to_cpu(mgm->members_count) >> 30;
found = false;
for (i = 0; i < members_count; i++) {
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
/* Entry already exists, add to duplicates */
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
err = -ENOMEM;
goto out_mailbox;
}
dqp->qpn = qpn;
list_add_tail(&dqp->list, &entry->duplicates);
found = true;
}
}
if (!found) {
/* Need to add the qpn to mgm */
if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
}
dqp->qpn = qpn;
list_add_tail(&dqp->list, &entry->duplicates);
found = true;
mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | (prot << 30));
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
}
}
if (!found) {
/* Need to add the qpn to mgm */
if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
}
mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | (prot << 30));
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
}
}
/* add the new qpn to list of promisc qps */
@ -492,7 +543,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
struct mlx4_steer *s_steer;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
struct mlx4_steer_index *entry;
struct mlx4_steer_index *entry, *tmp_entry;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
u32 members_count;
@ -501,6 +552,9 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
int i, loc = -1;
int err;
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
@ -533,49 +587,58 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
if (err)
goto out_mailbox;
/* remove the qp from all the steering entries*/
list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
found = false;
list_for_each_entry(dqp, &entry->duplicates, list) {
if (dqp->qpn == qpn) {
found = true;
break;
}
}
if (found) {
/* a duplicate, no need to change the mgm,
* only update the duplicates list */
list_del(&dqp->list);
kfree(dqp);
} else {
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
loc = i;
if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
/* remove the qp from all the steering entries*/
list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
found = false;
list_for_each_entry(dqp, &entry->duplicates, list) {
if (dqp->qpn == qpn) {
found = true;
break;
}
if (loc < 0) {
mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
qpn, entry->index);
err = -EINVAL;
goto out_mailbox;
}
if (found) {
/* a duplicate, no need to change the mgm,
* only update the duplicates list */
list_del(&dqp->list);
kfree(dqp);
} else {
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (!members_count) {
mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0."
" deleting entry...\n", qpn, entry->index);
list_del(&entry->list);
kfree(entry);
continue;
}
/* copy the last QP in this MGM over removed QP */
mgm->qp[loc] = mgm->qp[members_count - 1];
mgm->qp[members_count - 1] = 0;
mgm->members_count = cpu_to_be32(--members_count |
(MLX4_PROT_ETH << 30));
for (i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
loc = i;
break;
}
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
if (err)
if (loc < 0) {
mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
qpn, entry->index);
err = -EINVAL;
goto out_mailbox;
}
}
/* copy the last QP in this MGM over removed QP */
mgm->qp[loc] = mgm->qp[members_count - 1];
mgm->qp[members_count - 1] = 0;
mgm->members_count = cpu_to_be32(--members_count |
(MLX4_PROT_ETH << 30));
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
}
}
}
out_mailbox:
@ -661,26 +724,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
}
static const u8 __promisc_mode[] = {
[MLX4_FS_REGULAR] = 0x0,
[MLX4_FS_ALL_DEFAULT] = 0x1,
[MLX4_FS_MC_DEFAULT] = 0x3,
[MLX4_FS_UC_SNIFFER] = 0x4,
[MLX4_FS_MC_SNIFFER] = 0x5,
};
int map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type)
{
if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
return -EINVAL;
}
return __promisc_mode[flow_type];
}
EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_mode);
static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
struct mlx4_net_trans_rule_hw_ctrl *hw)
{
static const u8 __promisc_mode[] = {
[MLX4_FS_REGULAR] = 0x0,
[MLX4_FS_ALL_DEFAULT] = 0x1,
[MLX4_FS_MC_DEFAULT] = 0x3,
[MLX4_FS_UC_SNIFFER] = 0x4,
[MLX4_FS_MC_SNIFFER] = 0x5,
};
u8 flags = 0;
u32 dw = 0;
flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
flags |= ctrl->exclusive ? (1 << 2) : 0;
flags |= ctrl->allow_loopback ? (1 << 3) : 0;
dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
dw |= ctrl->exclusive ? (1 << 2) : 0;
dw |= ctrl->allow_loopback ? (1 << 3) : 0;
dw |= __promisc_mode[ctrl->promisc_mode] << 8;
dw |= ctrl->priority << 16;
hw->ctrl = cpu_to_be32(dw);
hw->flags = flags;
hw->type = __promisc_mode[ctrl->promisc_mode];
hw->prio = cpu_to_be16(ctrl->priority);
hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn);
}
@ -694,29 +768,51 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
};
int map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
return __sw_id_hw[id];
}
EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_id);
static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_ETH] =
sizeof(struct mlx4_net_trans_rule_hw_eth),
[MLX4_NET_TRANS_RULE_ID_IB] =
sizeof(struct mlx4_net_trans_rule_hw_ib),
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
[MLX4_NET_TRANS_RULE_ID_IPV4] =
sizeof(struct mlx4_net_trans_rule_hw_ipv4),
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
};
int hw_rule_sz(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
return __rule_hw_sz[id];
}
EXPORT_SYMBOL_GPL(hw_rule_sz);
static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
struct _rule_hw *rule_hw)
{
static const size_t __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_ETH] =
sizeof(struct mlx4_net_trans_rule_hw_eth),
[MLX4_NET_TRANS_RULE_ID_IB] =
sizeof(struct mlx4_net_trans_rule_hw_ib),
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
[MLX4_NET_TRANS_RULE_ID_IPV4] =
sizeof(struct mlx4_net_trans_rule_hw_ipv4),
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
};
if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
if (hw_rule_sz(dev, spec->id) < 0)
return -EINVAL;
}
memset(rule_hw, 0, __rule_hw_sz[spec->id]);
memset(rule_hw, 0, hw_rule_sz(dev, spec->id));
rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
rule_hw->size = __rule_hw_sz[spec->id] >> 2;
rule_hw->size = hw_rule_sz(dev, spec->id) >> 2;
switch (spec->id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
@ -730,12 +826,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->eth.ether_type_enable = 1;
rule_hw->eth.ether_type = spec->eth.ether_type;
}
rule_hw->eth.vlan_id = spec->eth.vlan_id;
rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
rule_hw->eth.vlan_tag = spec->eth.vlan_id;
rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
break;
case MLX4_NET_TRANS_RULE_ID_IB:
rule_hw->ib.r_u_qpn = spec->ib.r_u_qpn;
rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
@ -886,7 +982,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
if (err)
mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
(long long)reg_id);
(unsigned long long)reg_id);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
@ -977,8 +1073,9 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (err)
goto out;
/* if !link, still add the new entry. */
if (!link)
goto out;
goto skip_link;
err = mlx4_READ_ENTRY(dev, prev, mailbox);
if (err)
@ -990,6 +1087,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (err)
goto out;
skip_link:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
@ -1006,7 +1104,7 @@ out:
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
index - dev->caps.num_mgms);
index - dev->caps.num_mgms, MLX4_USE_RR);
}
mutex_unlock(&priv->mcg_table.mutex);
@ -1045,10 +1143,14 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
}
/* if this pq is also a promisc qp, it shouldn't be removed */
/*
if this QP is also a promisc QP, it shouldn't be removed only if
at least one none promisc QP is also attached to this MCG
*/
if (prot == MLX4_PROT_ETH &&
check_duplicate_entry(dev, port, steer, index, qp->qpn))
goto out;
check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
!promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; ++i)
@ -1099,7 +1201,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
amgm_index - dev->caps.num_mgms);
amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
}
} else {
/* Remove entry from AMGM */
@ -1119,7 +1221,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
index - dev->caps.num_mgms);
index - dev->caps.num_mgms, MLX4_USE_RR);
}
out:
@ -1148,7 +1250,7 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
qpn = qp->qpn;
qpn |= (prot << 28);
if (attach && block_loopback)
qpn |= (1U << 31);
qpn |= (1 << 31);
err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
@ -1158,28 +1260,11 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
return err;
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
u8 port, int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id)
int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], u8 port,
int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_MC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 1,
block_mcast_loopback, prot);
return mlx4_qp_attach_common(dev, qp, gid,
block_mcast_loopback, prot,
MLX4_MC_STEER);
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
@ -1213,8 +1298,35 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
list_add_tail(&spec.list, &rule.list);
return mlx4_flow_attach(dev, &rule, reg_id);
}
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
u8 port, int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id)
{
enum mlx4_steer_type steer;
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 1,
block_mcast_loopback, prot);
return mlx4_qp_attach_common(dev, qp, gid,
block_mcast_loopback, prot,
MLX4_MC_STEER);
case MLX4_STEERING_MODE_DEVICE_MANAGED:
return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_mcast_loopback,
prot, reg_id);
default:
return -EINVAL;
}
@ -1224,6 +1336,9 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, u64 reg_id)
{
enum mlx4_steer_type steer;
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
@ -1231,7 +1346,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_MC_STEER << 1);
gid[7] |= (steer << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
@ -1345,8 +1460,8 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
u8 port = vhcr->in_param >> 62;
enum mlx4_steer_type steer = vhcr->in_modifier;
/* Promiscuous unicast is not allowed in mfunc */
if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
/* Promiscuous unicast is not allowed in mfunc for VFs */
if ((slave != dev->caps.function) && (steer == MLX4_UC_STEER))
return 0;
if (vhcr->op_modifier)

View File

@ -2,7 +2,7 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -43,7 +43,7 @@
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
@ -51,8 +51,12 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
#define DRV_VERSION "1.1"
#define DRV_RELDATE "Dec, 2011"
#define DRV_VERSION "2.1"
#define DRV_RELDATE __DATE__
#define DRV_STACK_NAME "Linux-MLNX_OFED"
#define DRV_STACK_VERSION "2.1"
#define DRV_NAME_FOR_FW DRV_STACK_NAME","DRV_STACK_VERSION
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
@ -108,10 +112,10 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
enum mlx4_mr_state {
MLX4_MR_DISABLED = 0,
MLX4_MR_EN_HW,
MLX4_MR_EN_SW
enum mlx4_mpt_state {
MLX4_MPT_DISABLED = 0,
MLX4_MPT_EN_HW,
MLX4_MPT_EN_SW
};
#define MLX4_COMM_TIME 10000
@ -139,9 +143,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
RES_EQ,
RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
@ -180,13 +185,14 @@ struct mlx4_vhcr {
struct mlx4_vhcr_cmd {
__be64 in_param;
__be32 in_modifier;
u32 reserved1;
__be64 out_param;
__be16 token;
u16 reserved;
u8 status;
u8 flags;
__be16 opcode;
};
} __packed;
struct mlx4_cmd_info {
u16 opcode;
@ -194,6 +200,7 @@ struct mlx4_cmd_info {
bool has_outbox;
bool out_is_imm;
bool encode_slave_id;
bool skip_err_print;
int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox);
int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
@ -202,6 +209,10 @@ struct mlx4_cmd_info {
struct mlx4_cmd_info *cmd);
};
enum {
MLX4_DEBUG_MASK_CMD_TIME = 0x100,
};
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
@ -260,6 +271,22 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7)
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@ -353,7 +380,6 @@ struct mlx4_eq {
u16 irq;
u16 have_irq;
int nent;
int load;
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
};
@ -376,7 +402,7 @@ struct mlx4_profile {
int num_cq;
int num_mcg;
int num_mpt;
unsigned num_mtt;
unsigned num_mtt_segs;
};
struct mlx4_fw {
@ -434,6 +460,7 @@ struct mlx4_slave_state {
u8 last_cmd;
u8 init_port_mask;
bool active;
bool old_vlan_api;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@ -455,12 +482,14 @@ struct mlx4_slave_state {
#define MLX4_VGT 4095
#define NO_INDX (-1)
struct mlx4_vport_state {
u64 mac;
u16 default_vlan;
u8 default_qos;
u32 tx_rate;
bool spoofchk;
u32 link_state;
};
struct mlx4_vf_admin_state {
@ -531,6 +560,7 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_resource_tracker res_tracker;
struct workqueue_struct *comm_wq;
struct work_struct comm_work;
struct work_struct arm_comm_work;
struct work_struct slave_event_work;
struct work_struct slave_flr_event_work;
spinlock_t slave_state_lock;
@ -576,6 +606,24 @@ struct mlx4_cmd {
u8 comm_toggle;
};
enum {
MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0,
MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1,
};
struct mlx4_vf_immed_vlan_work {
struct work_struct work;
struct mlx4_priv *priv;
int flags;
int slave;
int vlan_ix;
int orig_vlan_ix;
u8 port;
u8 qos;
u16 vlan_id;
u16 orig_vlan_id;
};
struct mlx4_uar_table {
struct mlx4_bitmap bitmap;
};
@ -592,6 +640,7 @@ struct mlx4_mr_table {
struct mlx4_cq_table {
struct mlx4_bitmap bitmap;
spinlock_t lock;
rwlock_t cq_table_lock;
struct radix_tree_root tree;
struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
@ -724,8 +773,6 @@ struct mlx4_sense {
u8 do_sense_port[MLX4_MAX_PORTS + 1];
u8 sense_allowed[MLX4_MAX_PORTS + 1];
struct delayed_work sense_poll;
struct workqueue_struct *sense_wq;
u32 resched;
};
struct mlx4_msix_ctl {
@ -738,85 +785,6 @@ struct mlx4_steer {
struct list_head steer_entries[MLX4_NUM_STEERS];
};
struct mlx4_net_trans_rule_hw_ctrl {
__be32 ctrl;
u8 rsvd1;
u8 funcid;
u8 vep;
u8 port;
__be32 qpn;
__be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
u8 size;
u8 rsvd1;
__be16 id;
u32 rsvd2;
__be32 r_u_qpn;
__be32 qpn_mask;
u8 dst_gid[16];
u8 dst_gid_msk[16];
} __packed;
struct mlx4_net_trans_rule_hw_eth {
u8 size;
u8 rsvd;
__be16 id;
u8 rsvd1[6];
u8 dst_mac[6];
u16 rsvd2;
u8 dst_mac_msk[6];
u16 rsvd3;
u8 src_mac[6];
u16 rsvd4;
u8 src_mac_msk[6];
u8 rsvd5;
u8 ether_type_enable;
__be16 ether_type;
__be16 vlan_id_msk;
__be16 vlan_id;
} __packed;
struct mlx4_net_trans_rule_hw_tcp_udp {
u8 size;
u8 rsvd;
__be16 id;
__be16 rsvd1[3];
__be16 dst_port;
__be16 rsvd2;
__be16 dst_port_msk;
__be16 rsvd3;
__be16 src_port;
__be16 rsvd4;
__be16 src_port_msk;
} __packed;
struct mlx4_net_trans_rule_hw_ipv4 {
u8 size;
u8 rsvd;
__be16 id;
__be32 rsvd1;
__be32 dst_ip;
__be32 dst_ip_msk;
__be32 src_ip;
__be32 src_ip_msk;
} __packed;
struct _rule_hw {
union {
struct {
u8 size;
u8 rsvd;
__be16 id;
};
struct mlx4_net_trans_rule_hw_eth eth;
struct mlx4_net_trans_rule_hw_ib ib;
struct mlx4_net_trans_rule_hw_ipv4 ipv4;
struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
};
};
enum {
MLX4_PCI_DEV_IS_VF = 1 << 0,
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
@ -826,6 +794,23 @@ struct mlx4_roce_gid_entry {
u8 raw[16];
};
struct counter_index {
struct list_head list;
u32 index;
};
struct mlx4_counters {
struct mlx4_bitmap bitmap;
struct list_head global_port_list[MLX4_MAX_PORTS];
struct list_head vf_list[MLX4_MAX_NUM_VF][MLX4_MAX_PORTS];
struct mutex mutex;
};
enum {
MLX4_NO_RR = 0,
MLX4_USE_RR = 1,
};
struct mlx4_priv {
struct mlx4_dev dev;
@ -851,7 +836,7 @@ struct mlx4_priv {
struct mlx4_srq_table srq_table;
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
struct mlx4_bitmap counters_bitmap;
struct mlx4_counters counters_table;
struct mlx4_catas_err catas_err;
@ -887,10 +872,11 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
int align, u32 skip_mask);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 resetrved_top);
@ -926,10 +912,10 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
int __mlx4_mr_reserve(struct mlx4_dev *dev);
void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
int __mlx4_mpt_reserve(struct mlx4_dev *dev);
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
@ -969,14 +955,20 @@ int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 bf_qp);
int *base, u8 flags);
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int __mlx4_counter_alloc(struct mlx4_dev *dev, int slave, int port, u32 *idx);
void __mlx4_counter_free(struct mlx4_dev *dev, int slave, int port, u32 idx);
int __mlx4_slave_counters_free(struct mlx4_dev *dev, int slave);
int __mlx4_clear_if_stat(struct mlx4_dev *dev,
u8 counter_index);
u8 mlx4_get_default_counter_index(struct mlx4_dev *dev, int slave, int port);
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
@ -997,6 +989,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
void mlx4_master_comm_channel(struct work_struct *work);
void mlx4_master_arm_comm_channel(struct work_struct *work);
void mlx4_gen_slave_eqe(struct work_struct *work);
void mlx4_master_handle_slave_flr(struct work_struct *work);
@ -1164,8 +1157,7 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *defaults);
void mlx4_start_sense(struct mlx4_dev *dev);
void mlx4_stop_sense(struct mlx4_dev *dev);
void mlx4_sense_cleanup(struct mlx4_dev *dev);
int mlx4_sense_init(struct mlx4_dev *dev);
void mlx4_sense_init(struct mlx4_dev *dev);
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type);
int mlx4_change_port_types(struct mlx4_dev *dev,
@ -1238,6 +1230,10 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], u8 port,
int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id);
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@ -1250,8 +1246,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@ -1277,6 +1271,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
@ -1315,5 +1314,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave);
int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave);
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
#endif /* MLX4_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,184 @@
/*
* Copyright (c) 2014 Mellanox Technologies Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _MLX4_STATS_
#define _MLX4_STATS_
#ifdef MLX4_EN_PERF_STAT
#define NUM_PERF_STATS NUM_PERF_COUNTERS
#else
#define NUM_PERF_STATS 0
#endif
#define NUM_PRIORITIES 9
#define NUM_PRIORITY_STATS 2
struct mlx4_en_pkt_stats {
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long rx_multicast_packets;
unsigned long rx_broadcast_packets;
unsigned long rx_errors;
unsigned long rx_dropped;
unsigned long rx_length_errors;
unsigned long rx_over_errors;
unsigned long rx_crc_errors;
unsigned long rx_jabbers;
unsigned long rx_in_range_length_error;
unsigned long rx_out_range_length_error;
unsigned long rx_lt_64_bytes_packets;
unsigned long rx_127_bytes_packets;
unsigned long rx_255_bytes_packets;
unsigned long rx_511_bytes_packets;
unsigned long rx_1023_bytes_packets;
unsigned long rx_1518_bytes_packets;
unsigned long rx_1522_bytes_packets;
unsigned long rx_1548_bytes_packets;
unsigned long rx_gt_1548_bytes_packets;
unsigned long tx_packets;
unsigned long tx_bytes;
unsigned long tx_multicast_packets;
unsigned long tx_broadcast_packets;
unsigned long tx_errors;
unsigned long tx_dropped;
unsigned long tx_lt_64_bytes_packets;
unsigned long tx_127_bytes_packets;
unsigned long tx_255_bytes_packets;
unsigned long tx_511_bytes_packets;
unsigned long tx_1023_bytes_packets;
unsigned long tx_1518_bytes_packets;
unsigned long tx_1522_bytes_packets;
unsigned long tx_1548_bytes_packets;
unsigned long tx_gt_1548_bytes_packets;
unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
#define NUM_PKT_STATS 72
};
struct mlx4_en_vf_stats {
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long rx_multicast_packets;
unsigned long rx_broadcast_packets;
unsigned long rx_errors;
unsigned long rx_dropped;
unsigned long tx_packets;
unsigned long tx_bytes;
unsigned long tx_multicast_packets;
unsigned long tx_broadcast_packets;
unsigned long tx_errors;
#define NUM_VF_STATS 11
};
struct mlx4_en_vport_stats {
unsigned long rx_unicast_packets;
unsigned long rx_unicast_bytes;
unsigned long rx_multicast_packets;
unsigned long rx_multicast_bytes;
unsigned long rx_broadcast_packets;
unsigned long rx_broadcast_bytes;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_unicast_packets;
unsigned long tx_unicast_bytes;
unsigned long tx_multicast_packets;
unsigned long tx_multicast_bytes;
unsigned long tx_broadcast_packets;
unsigned long tx_broadcast_bytes;
unsigned long tx_errors;
#define NUM_VPORT_STATS 15
};
struct mlx4_en_port_stats {
unsigned long tso_packets;
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
unsigned long rx_alloc_failed;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
unsigned long tx_chksum_offload;
#define NUM_PORT_STATS 8
};
struct mlx4_en_perf_stats {
u32 tx_poll;
u64 tx_pktsz_avg;
u32 inflight_avg;
u16 tx_coal_avg;
u16 rx_coal_avg;
u32 napi_quota;
#define NUM_PERF_COUNTERS 6
};
struct mlx4_en_flow_stats {
u64 rx_pause;
u64 rx_pause_duration;
u64 rx_pause_transition;
u64 tx_pause;
u64 tx_pause_duration;
u64 tx_pause_transition;
};
#define MLX4_NUM_PRIORITIES 8
#define NUM_FLOW_PRIORITY_STATS 6
#define NUM_FLOW_STATS (NUM_FLOW_PRIORITY_STATS*MLX4_NUM_PRIORITIES)
struct mlx4_en_stat_out_flow_control_mbox {
/* Total number of PAUSE frames received from the far-end port */
__be64 rx_pause;
/* Total number of microseconds that far-end port requested to pause
* transmission of packets
*/
__be64 rx_pause_duration;
/* Number of received transmission from XOFF state to XON state */
__be64 rx_pause_transition;
/* Total number of PAUSE frames sent from the far-end port */
__be64 tx_pause;
/* Total time in microseconds that transmission of packets has been
* paused
*/
__be64 tx_pause_duration;
/* Number of transmitter transitions from XOFF state to XON state */
__be64 tx_pause_transition;
/* Reserverd */
__be64 reserved[2];
};
int mlx4_get_vport_ethtool_stats(struct mlx4_dev *dev, int port,
struct mlx4_en_vport_stats *vport_stats,
int reset);
#define NUM_ALL_STATS (NUM_PKT_STATS + NUM_FLOW_STATS + NUM_VPORT_STATS + \
NUM_VF_STATS + NUM_PORT_STATS + NUM_PERF_STATS)
#endif

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -32,30 +32,20 @@
* SOFTWARE.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/mlx4/cmd.h>
#include <linux/math64.h>
#include "mlx4.h"
#include "icm.h"
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{
int o;
@ -129,9 +119,8 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
if (!buddy->bits[i]) {
goto err_out_free;
}
if (!buddy->bits[i])
goto err_out_free;
}
set_bit(0, buddy->bits[buddy->max_order]);
@ -141,8 +130,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
if ( buddy->bits[i] )
kfree(buddy->bits[i]);
kfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@ -156,7 +144,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
kfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@ -315,7 +303,7 @@ static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
mr->size = size;
mr->pd = pd;
mr->access = access;
mr->enabled = MLX4_MR_DISABLED;
mr->enabled = MLX4_MPT_DISABLED;
mr->key = hw_index_to_key(mridx);
return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
@ -329,14 +317,14 @@ static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int __mlx4_mr_reserve(struct mlx4_dev *dev)
int __mlx4_mpt_reserve(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
}
static int mlx4_mr_reserve(struct mlx4_dev *dev)
static int mlx4_mpt_reserve(struct mlx4_dev *dev)
{
u64 out_param;
@ -347,17 +335,17 @@ static int mlx4_mr_reserve(struct mlx4_dev *dev)
return -1;
return get_param_l(&out_param);
}
return __mlx4_mr_reserve(dev);
return __mlx4_mpt_reserve(dev);
}
void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
}
static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
u64 in_param = 0;
@ -370,17 +358,17 @@ static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
index);
return;
}
__mlx4_mr_release(dev, index);
__mlx4_mpt_release(dev, index);
}
int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param = 0;
@ -391,17 +379,17 @@ static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
return __mlx4_mr_alloc_icm(dev, index);
return __mlx4_mpt_alloc_icm(dev, index);
}
void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_table_put(dev, &mr_table->dmpt_table, index);
}
static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
u64 in_param = 0;
@ -414,7 +402,7 @@ static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
index);
return;
}
return __mlx4_mr_free_icm(dev, index);
return __mlx4_mpt_free_icm(dev, index);
}
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
@ -423,41 +411,52 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
u32 index;
int err;
index = mlx4_mr_reserve(dev);
index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
access, npages, page_shift, mr);
if (err)
mlx4_mr_release(dev, index);
mlx4_mpt_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
int err;
if (mr->enabled == MLX4_MR_EN_HW) {
if (mr->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
if (err) {
mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
return err;
}
mr->enabled = MLX4_MR_EN_SW;
mr->enabled = MLX4_MPT_EN_SW;
}
mlx4_mtt_cleanup(dev, &mr->mtt);
return 0;
}
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
mlx4_mr_free_reserved(dev, mr);
int ret;
ret = mlx4_mr_free_reserved(dev, mr);
if (ret)
return ret;
if (mr->enabled)
mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
mlx4_mr_release(dev, key_to_hw_index(mr->key));
mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
mlx4_mpt_release(dev, key_to_hw_index(mr->key));
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
@ -467,7 +466,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
struct mlx4_mpt_entry *mpt_entry;
int err;
err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@ -514,7 +513,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
mr->enabled = MLX4_MR_EN_HW;
mr->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@ -524,7 +523,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@ -651,6 +650,95 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
struct mlx4_mw *mw)
{
u32 index;
index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
mw->key = hw_index_to_key(index);
mw->pd = pd;
mw->type = type;
mw->enabled = MLX4_MPT_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
if (err)
return err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_table;
}
mpt_entry = mailbox->buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region.
*/
mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
mpt_entry->pd_flags = cpu_to_be32(mw->pd);
if (mw->type == MLX4_MW_TYPE_2) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
}
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mw->key) &
(dev->caps.num_mpts - 1));
if (err) {
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
mw->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
return 0;
err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mw_enable);
void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
{
int err;
if (mw->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mw->key) &
(dev->caps.num_mpts - 1));
if (err)
mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
mw->enabled = MLX4_MPT_EN_SW;
}
if (mw->enabled)
mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
mlx4_mpt_release(dev, key_to_hw_index(mw->key));
}
EXPORT_SYMBOL_GPL(mlx4_mw_free);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@ -671,8 +759,8 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
ilog2((u32)dev->caps.num_mtts /
(1 << log_mtts_per_seg)));
ilog2(div_u64(dev->caps.num_mtts,
(1 << log_mtts_per_seg))));
if (err)
goto err_buddy;
@ -791,7 +879,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err = -ENOMEM;
int err = -ENOMEM, ret;
if (max_maps > dev->caps.max_fmr_maps)
return -EINVAL;
@ -825,7 +913,9 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
return 0;
err_free:
mlx4_mr_free(dev, &fmr->mr);
ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
mlx4_err(dev, "Error deregistering MR. The system may have become unstable.");
return err;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
@ -851,40 +941,48 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
u32 key;
if (!fmr->maps)
return;
key = key_to_hw_index(fmr->mr.key) & (dev->caps.num_mpts - 1);
*(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
/* Make sure MPT status is visible before changing MPT fields */
wmb();
fmr->mr.key = hw_index_to_key(key);
fmr->mpt->key = cpu_to_be32(key);
fmr->mpt->lkey = cpu_to_be32(key);
fmr->mpt->length = 0;
fmr->mpt->start = 0;
/* Make sure MPT data is visible before changing MPT status */
wmb();
*(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
/* Make sure MPT satus is visible */
wmb();
fmr->maps = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
mlx4_warn(dev, "mlx4_alloc_cmd_mailbox failed (%d)\n", err);
return;
}
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(fmr->mr.key) &
(dev->caps.num_mpts - 1));
mlx4_free_cmd_mailbox(dev, mailbox);
if (err) {
mlx4_warn(dev, "mlx4_HW2SW_MPT failed (%d)\n", err);
return;
}
fmr->mr.enabled = MLX4_MR_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
int ret;
if (fmr->maps)
return -EBUSY;
mlx4_mr_free(dev, &fmr->mr);
fmr->mr.enabled = MLX4_MR_DISABLED;
ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
return ret;
fmr->mr.enabled = MLX4_MPT_DISABLED;
return 0;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -32,6 +32,7 @@
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io-mapping.h>
#include <asm/page.h>
@ -57,7 +58,7 @@ EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
@ -94,7 +95,7 @@ EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
}
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
@ -162,7 +163,7 @@ EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -32,10 +32,14 @@
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/mlx4/cmd.h>
#include <linux/moduleparam.h>
#include "mlx4.h"
#include "mlx4_stats.h"
int mlx4_set_4k_mtu = -1;
module_param_named(set_4k_mtu, mlx4_set_4k_mtu, int, 0444);
@ -48,12 +52,6 @@ MODULE_PARM_DESC(set_4k_mtu,
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
#define MLX4_STATS_IF_RX_ERRORS_COUNTERS_MASK 0x8010ULL
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
@ -85,7 +83,7 @@ static int validate_index(struct mlx4_dev *dev,
{
int err = 0;
if (index < 0 || index >= table->max || !table->entries[index]) {
if (index < 0 || index >= table->max || !table->refs[index]) {
mlx4_warn(dev, "No valid Mac entry for the given index\n");
err = -EINVAL;
}
@ -140,14 +138,15 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
mutex_lock(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (free < 0 && !table->entries[i]) {
if (free < 0 && !table->refs[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
if ((mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) &&
table->refs[i]) {
/* MAC already registered, Must not have duplicates */
err = i;
err = i;
++table->refs[i];
goto out;
}
@ -184,13 +183,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
u64 out_param = 0;
int err;
int err = -EINVAL;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, mac, &out_param,
((u32) port) << 8 | (u32) RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
err = mlx4_cmd_imm(dev, mac, &out_param,
((u32) port) << 8 | (u32) RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
if (err && err == -EINVAL && mlx4_is_slave(dev)) {
/* retry using old REG_MAC format */
set_param_l(&out_param, port);
err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
dev->flags |= MLX4_FLAG_OLD_REG_MAC;
}
if (err)
return err;
@ -245,10 +255,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
(void) mlx4_cmd_imm(dev, mac, &out_param,
((u32) port) << 8 | (u32) RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
(void) mlx4_cmd_imm(dev, mac, &out_param,
((u32) port) << 8 | (u32) RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
} else {
/* use old unregister mac format */
set_param_l(&out_param, port);
(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
return;
}
__mlx4_unregister_mac(dev, port, mac);
@ -535,17 +553,21 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
__be32 new_cap_mask;
port = in_mod & 0xff;
in_modifier = in_mod >> 8;
in_modifier = (in_mod >> 8) & 0xff;
is_eth = op_mod;
port_info = &priv->port[port];
if (op_mod > 1)
return -EINVAL;
/* Slaves cannot perform SET_PORT operations except changing MTU */
if (is_eth) {
if (slave != dev->caps.function &&
in_modifier != MLX4_SET_PORT_GENERAL &&
in_modifier != MLX4_SET_PORT_GID_TABLE) {
mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
slave);
mlx4_warn(dev, "denying SET_PORT for slave:%d,"
"port %d, config_select 0x%x\n",
slave, port, in_modifier);
return -EINVAL;
}
switch (in_modifier) {
@ -570,7 +592,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
/* Mtu is configured as the max MTU among all the
* the functions on the port. */
mtu = be16_to_cpu(gen_context->mtu);
mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
prev_mtu = slave_st->mtu[port];
slave_st->mtu[port] = mtu;
if (mtu > master->max_mtu[port])
@ -650,7 +673,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
break;
}
return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
}
@ -727,19 +750,10 @@ enum {
MLX4_CHANGE_PORT_MTU_CAP = 22,
};
#define CX3_PPF_DEV_ID 0x1003
static int vl_cap_start(struct mlx4_dev *dev)
{
/* for non CX3 devices, start with 4 VLs to avoid errors in syslog */
if (dev->pdev->device != CX3_PPF_DEV_ID)
return 4;
return 8;
}
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
{
struct mlx4_cmd_mailbox *mailbox;
int err, vl_cap, pkey_tbl_flag = 0;
int err = -EINVAL, vl_cap, pkey_tbl_flag = 0;
u32 in_mod;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_NONE)
@ -765,7 +779,8 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
}
/* IB VL CAP enum isn't used by the firmware, just numerical values */
for (vl_cap = vl_cap_start(dev); vl_cap >= 1; vl_cap >>= 1) {
for (vl_cap = dev->caps.vl_cap[port];
vl_cap >= 1; vl_cap >>= 1) {
((__be32 *) mailbox->buf)[0] = cpu_to_be32(
(1 << MLX4_CHANGE_PORT_MTU_CAP) |
(1 << MLX4_CHANGE_PORT_VL_CAP) |
@ -822,10 +837,9 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u32 in_mod;
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
MCAST_DIRECT : MCAST_DEFAULT;
/*
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
return 0;
*/
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@ -834,10 +848,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
/*
* This assignment breaks vlan support - I don't know why. Probablya an A0 issue - shahar Klein
* context->n_mac = dev->caps.log_num_macs;
*/
context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
base_qpn);
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
@ -960,40 +971,44 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
u32 in_mod, struct mlx4_cmd_mailbox *outbox)
{
return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
}
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
if (slave != dev->caps.function)
return 0;
return mlx4_common_dump_eth_stats(dev, slave,
vhcr->in_modifier, outbox);
return 0;
}
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, unsigned long *stats_bitmap)
{
if (!mlx4_is_mfunc(dev)) {
*stats_bitmap = 0;
return;
int last_i = 0;
bitmap_zero(stats_bitmap, NUM_ALL_STATS);
if (mlx4_is_slave(dev)) {
last_i = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN ?
NUM_PKT_STATS + NUM_FLOW_STATS : NUM_PKT_STATS;
} else {
bitmap_set(stats_bitmap, last_i, NUM_PKT_STATS);
last_i = NUM_PKT_STATS;
if (dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
bitmap_set(stats_bitmap, last_i, NUM_FLOW_STATS);
last_i += NUM_FLOW_STATS;
}
}
*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
MLX4_STATS_TRAFFIC_DROPS_MASK |
MLX4_STATS_PORT_COUNTERS_MASK |
MLX4_STATS_IF_RX_ERRORS_COUNTERS_MASK);
if (mlx4_is_slave(dev))
bitmap_set(stats_bitmap, last_i, NUM_VF_STATS);
last_i += NUM_VF_STATS;
if (mlx4_is_master(dev))
*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
bitmap_set(stats_bitmap, last_i, NUM_VPORT_STATS);
last_i += NUM_VPORT_STATS;
bitmap_set(stats_bitmap, last_i, NUM_PORT_STATS);
}
EXPORT_SYMBOL(mlx4_set_stats_bitmap);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
u64 size;
u64 start;
int type;
u32 num;
u64 num;
int log_num;
};
@ -112,7 +112,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
profile[MLX4_RES_MTT].num = ((u64)request->num_mtt_segs) *
(1 << log_mtts_per_seg);
profile[MLX4_RES_MCG].num = request->num_mcg;
for (i = 0; i < MLX4_RES_NUM; ++i) {

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -33,6 +33,10 @@
* SOFTWARE.
*/
#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@ -210,13 +214,18 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 bf_qp)
int *base, u8 flags)
{
int bf_qp = !!(flags & (u8) MLX4_RESERVE_BF_QP);
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
/* Only IPoIB uses a large cnt. In this case, just allocate
* as usual, ignoring bf skipping, since IPoIB does not run over RoCE
*/
if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
return -ENOMEM;
bf_qp = 0;
*base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
bf_qp ? MLX4_BF_QP_SKIP_MASK : 0);
@ -227,14 +236,14 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
}
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 bf_qp)
int *base, u8 flags)
{
u64 in_param = 0;
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, (((!!bf_qp) << 31) | (u32)cnt));
set_param_l(&in_param, (((u32) flags) << 24) | (u32) cnt);
set_param_h(&in_param, align);
err = mlx4_cmd_imm(dev, in_param, &out_param,
RES_QP, RES_OP_RESERVE,
@ -246,7 +255,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
*base = get_param_l(&out_param);
return 0;
}
return __mlx4_qp_reserve_range(dev, cnt, align, base, bf_qp);
return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
@ -257,7 +266,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -77,7 +77,7 @@ int mlx4_reset(struct mlx4_dev *dev)
goto out;
}
pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
pcie_cap = pci_pcie_cap(dev->pdev);
for (i = 0; i < 64; ++i) {
if (i == 22 || i == 23)
@ -119,8 +119,8 @@ int mlx4_reset(struct mlx4_dev *dev)
writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
iounmap(reset);
/* Docs say to wait one second before accessing device */
msleep(2000);
/* wait half a second before accessing device */
msleep(500);
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
do {
@ -138,11 +138,10 @@ int mlx4_reset(struct mlx4_dev *dev)
goto out;
}
/* Now restore the PCI headers */
if (pcie_cap) {
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express "
@ -150,7 +149,7 @@ int mlx4_reset(struct mlx4_dev *dev)
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express "

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -53,7 +53,7 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
}
if (out_param > 2) {
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", (long long)out_param);
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", (unsigned long long)out_param);
return -EINVAL;
}
@ -108,9 +108,8 @@ static void mlx4_sense_port(struct work_struct *work)
sense_again:
mutex_unlock(&priv->port_mutex);
if (sense->resched)
queue_delayed_work(sense->sense_wq , &sense->sense_poll,
round_jiffies(MLX4_SENSE_RANGE));
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_start_sense(struct mlx4_dev *dev)
@ -121,40 +120,24 @@ void mlx4_start_sense(struct mlx4_dev *dev)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
return;
sense->resched = 1;
queue_delayed_work(sense->sense_wq , &sense->sense_poll,
round_jiffies(MLX4_SENSE_RANGE));
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_stop_sense(struct mlx4_dev *dev)
{
mlx4_priv(dev)->sense.resched = 0;
cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
}
void mlx4_sense_cleanup(struct mlx4_dev *dev)
{
mlx4_stop_sense(dev);
cancel_delayed_work(&mlx4_priv(dev)->sense.sense_poll);
destroy_workqueue(mlx4_priv(dev)->sense.sense_wq);
}
int mlx4_sense_init(struct mlx4_dev *dev)
void mlx4_sense_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
int port;
sense->dev = dev;
sense->sense_wq = create_singlethread_workqueue("mlx4_sense");
if (!sense->sense_wq)
return -ENOMEM;
for (port = 1; port <= dev->caps.num_ports; port++)
sense->do_sense_port[port] = 1;
INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
return 0;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -32,6 +32,8 @@
*/
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/srq.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include "mlx4.h"
@ -113,7 +115,7 @@ err_put:
mlx4_table_put(dev, &srq_table->table, *srqn);
err_out:
mlx4_bitmap_free(&srq_table->bitmap, *srqn);
mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
return err;
}
@ -141,7 +143,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
mlx4_table_put(dev, &srq_table->table, srqn);
mlx4_bitmap_free(&srq_table->bitmap, srqn);
mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
@ -295,3 +297,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
unsigned long flags;
spin_lock_irqsave(&srq_table->lock, flags);
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
spin_unlock_irqrestore(&srq_table->lock, flags);
return srq;
}
EXPORT_SYMBOL_GPL(mlx4_srq_lookup);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010 Mellanox Technologies. All rights reserved.
* Copyright (c) 2010, 2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -39,8 +39,6 @@
#if defined(CONFIG_X86) && defined(CONFIG_APM_MODULE)
/* Each CPU is put into a group. In most cases, the group number is
* equal to the CPU number of one of the CPUs in the group. The
* exception is group NR_CPUS which is the default group. This is

View File

@ -0,0 +1,189 @@
/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
/*
* Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/queue.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/module.h>
#include <sys/priv.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/hash.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/taskqueue.h>
#include <sys/eventhandler.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_clone.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <net/bpf.h>
#if defined(INET) || defined(INET6)
#include <netinet/in.h>
#endif
#ifdef INET
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#endif
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/in6_var.h>
#include <netinet6/in6_ifattach.h>
#endif
#include <net/if_vlan_var.h>
#include "utils.h"
/* XXX this code should be factored out */
/* XXX copied from if_lagg.c */
static const void *
mlx4_en_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
{
if (m->m_pkthdr.len < (off + len)) {
return (NULL);
} else if (m->m_len < (off + len)) {
m_copydata(m, off, len, buf);
return (buf);
}
return (mtod(m, char *) + off);
}
uint32_t
mlx4_en_hashmbuf(uint32_t flags, struct mbuf *m, uint32_t key)
{
uint16_t etype;
uint32_t p = key;
int off;
struct ether_header *eh;
const struct ether_vlan_header *vlan;
#ifdef INET
const struct ip *ip;
const uint32_t *ports;
int iphlen;
#endif
#ifdef INET6
const struct ip6_hdr *ip6;
uint32_t flow;
#endif
union {
#ifdef INET
struct ip ip;
#endif
#ifdef INET6
struct ip6_hdr ip6;
#endif
struct ether_vlan_header vlan;
uint32_t port;
} buf;
off = sizeof(*eh);
if (m->m_len < off)
goto out;
eh = mtod(m, struct ether_header *);
etype = ntohs(eh->ether_type);
if (flags & MLX4_F_HASHL2) {
p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p);
p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
}
/* Special handling for encapsulating VLAN frames */
if ((m->m_flags & M_VLANTAG) && (flags & MLX4_F_HASHL2)) {
p = hash32_buf(&m->m_pkthdr.ether_vtag,
sizeof(m->m_pkthdr.ether_vtag), p);
} else if (etype == ETHERTYPE_VLAN) {
vlan = mlx4_en_gethdr(m, off, sizeof(*vlan), &buf);
if (vlan == NULL)
goto out;
if (flags & MLX4_F_HASHL2)
p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
etype = ntohs(vlan->evl_proto);
off += sizeof(*vlan) - sizeof(*eh);
}
switch (etype) {
#ifdef INET
case ETHERTYPE_IP:
ip = mlx4_en_gethdr(m, off, sizeof(*ip), &buf);
if (ip == NULL)
goto out;
if (flags & MLX4_F_HASHL3) {
p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
}
if (!(flags & MLX4_F_HASHL4))
break;
switch (ip->ip_p) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
iphlen = ip->ip_hl << 2;
if (iphlen < sizeof(*ip))
break;
off += iphlen;
ports = mlx4_en_gethdr(m, off, sizeof(*ports), &buf);
if (ports == NULL)
break;
p = hash32_buf(ports, sizeof(*ports), p);
break;
}
break;
#endif
#ifdef INET6
case ETHERTYPE_IPV6:
if (!(flags & MLX4_F_HASHL3))
break;
ip6 = mlx4_en_gethdr(m, off, sizeof(*ip6), &buf);
if (ip6 == NULL)
goto out;
p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
break;
#endif
}
out:
return (p);
}

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2014 Mellanox Technologies Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _MLX4_UTILS_H_
#define _MLX4_UTILS_H_
/* Lagg flags */
#define MLX4_F_HASHL2 0x00000001 /* hash layer 2 */
#define MLX4_F_HASHL3 0x00000002 /* hash layer 3 */
#define MLX4_F_HASHL4 0x00000004 /* hash layer 4 */
#define MLX4_F_HASHMASK 0x00000007
uint32_t mlx4_en_hashmbuf(uint32_t flags, struct mbuf *m, uint32_t key);
#endif /* _MLX4_UTILS_H_ */

View File

@ -34,6 +34,7 @@
#define MLX4_CMD_H
#include <linux/dma-mapping.h>
#include <linux/types.h>
enum {
/* initialization and general commands */
@ -111,6 +112,7 @@ enum {
MLX4_CMD_INIT2INIT_QP = 0x2d,
MLX4_CMD_SUSPEND_QP = 0x32,
MLX4_CMD_UNSUSPEND_QP = 0x33,
MLX4_CMD_UPDATE_QP = 0x61,
/* special QP and management commands */
MLX4_CMD_CONF_SPECIAL_QP = 0x23,
MLX4_CMD_MAD_IFC = 0x24,
@ -152,10 +154,6 @@ enum {
MLX4_CMD_QUERY_IF_STAT = 0X54,
MLX4_CMD_SET_IF_STAT = 0X55,
/* set port opcode modifiers */
MLX4_SET_PORT_PRIO2TC = 0x8,
MLX4_SET_PORT_SCHEDULER = 0x9,
/* register/delete flow steering network rules */
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@ -175,12 +173,14 @@ enum {
enum {
/* set port opcode modifiers */
MLX4_SET_PORT_GENERAL = 0x0,
MLX4_SET_PORT_RQP_CALC = 0x1,
MLX4_SET_PORT_MAC_TABLE = 0x2,
MLX4_SET_PORT_VLAN_TABLE = 0x3,
MLX4_SET_PORT_PRIO_MAP = 0x4,
MLX4_SET_PORT_GID_TABLE = 0x5,
MLX4_SET_PORT_GENERAL = 0x0,
MLX4_SET_PORT_RQP_CALC = 0x1,
MLX4_SET_PORT_MAC_TABLE = 0x2,
MLX4_SET_PORT_VLAN_TABLE = 0x3,
MLX4_SET_PORT_PRIO_MAP = 0x4,
MLX4_SET_PORT_GID_TABLE = 0x5,
MLX4_SET_PORT_PRIO2TC = 0x8,
MLX4_SET_PORT_SCHEDULER = 0x9
};
enum {
@ -237,7 +237,21 @@ u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf);
/*
* mlx4_get_slave_default_vlan -
* retrun true if VST ( default vlan)
* if VST will fill vlan & qos (if not NULL)
*/
bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos);
enum {
IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
IFLA_VF_LINK_STATE_ENABLE, /* link always up */
IFLA_VF_LINK_STATE_DISABLE, /* link always down */
__IFLA_VF_LINK_STATE_MAX,
};
#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)

View File

@ -42,17 +42,31 @@ struct mlx4_cqe {
__be32 vlan_my_qpn;
__be32 immed_rss_invalid;
__be32 g_mlpath_rqpn;
__be16 sl_vid;
__be16 rlid;
__be16 status;
u8 ipv6_ext_mask;
u8 badfcs_enc;
union {
struct {
union {
struct {
__be16 sl_vid;
__be16 rlid;
};
__be32 timestamp_16_47;
};
__be16 status;
u8 ipv6_ext_mask;
u8 badfcs_enc;
};
struct {
__be16 reserved1;
u8 smac[6];
};
};
__be32 byte_cnt;
__be16 wqe_index;
__be16 checksum;
u8 reserved[3];
u8 reserved2[1];
__be16 timestamp_0_15;
u8 owner_sr_opcode;
};
} __packed;
struct mlx4_err_cqe {
__be32 my_qpn;
@ -83,6 +97,7 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
MLX4_CQE_QPN_MASK = 0xffffff,
MLX4_CQE_VID_MASK = 0xfff,
};
enum {

View File

@ -36,11 +36,12 @@
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/radix-tree.h>
//#include <linux/cpu_rmap.h> /* XXX SK Probably not needed in freeBSD XXX */
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <linux/clocksource.h> /* XXX SK ported to freeBSD */
#include <linux/clocksource.h>
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
@ -61,9 +62,7 @@
#define MLX4_RATELIMIT_1G_UNITS 4 /* 1 Gbps */
#define MLX4_RATELIMIT_DEFAULT 0x00ff
#define MLX4_LEAST_ATTACHED_VECTOR 0xffffffff
#define CORE_CLOCK_MASK 0xffffffffffffULL
enum {
MLX4_FLAG_MSI_X = 1 << 0,
@ -71,6 +70,8 @@ enum {
MLX4_FLAG_MASTER = 1 << 2,
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_DEV_NUM_STR = 1 << 5,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
};
enum {
@ -91,7 +92,8 @@ enum {
#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
enum {
MLX4_BOARD_ID_LEN = 64
MLX4_BOARD_ID_LEN = 64,
MLX4_VSD_LEN = 208
};
enum {
@ -159,13 +161,13 @@ enum {
MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
MLX4_DEV_CAP_FLAG_CROSS_CHANNEL = 1LL << 44,
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
MLX4_DEV_CAP_FLAG_COUNTERS_EXT = 1LL << 49,
MLX4_DEV_CAP_FLAG_SET_PORT_ETH_SCHED = 1LL << 53,
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
MLX4_DEV_CAP_FLAG_FAST_DROP = 1LL << 57,
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
MLX4_DEV_CAP_FLAG_ESWITCH_SUPPORT = 1LL << 60,
MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61,
MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62
};
@ -174,7 +176,34 @@ enum {
MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3
MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
MLX4_DEV_CAP_FLAG2_FSM = 1LL << 4,
MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 5,
MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 6,
MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1LL << 7,
MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 8,
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 9,
MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 10,
MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 11,
MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 12,
MLX4_DEV_CAP_FLAG2_TS = 1LL << 13,
MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1LL << 14
};
/* bit enums for an 8-bit flags field indicating special use
* QPs which require special handling in qp_reserve_range.
* Currently, this only includes QPs used by the ETH interface,
* where we expect to use blueflame. These QPs must not have
* bits 6 and 7 set in their qp number.
*
* This enum may use only bits 0..7.
*/
enum {
MLX4_RESERVE_BF_QP = 1 << 7,
};
enum {
MLX4_DEV_CAP_CQ_FLAG_IO = 1 << 0
};
enum {
@ -190,22 +219,11 @@ enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0
};
/* bit enums for an 8-bit flags field indicating special use
* QPs which require special handling in qp_reserve_range.
* Currently, this only includes QPs used by the ETH interface,
* where we expect to use blueflame. These QPs must not have
* bits 6 and 7 set in their qp number.
*
* This enum may use only bits 0..7.
*/
enum {
MLX4_RESERVE_BF_QP = 1 << 7,
};
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1,
MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
@ -238,6 +256,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
MLX4_EVENT_TYPE_NONE = 0xff,
};
@ -246,6 +265,11 @@ enum {
MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4
};
enum {
MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
};
enum {
MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
};
@ -274,7 +298,8 @@ enum {
MLX4_PERM_LOCAL_WRITE = 1 << 11,
MLX4_PERM_REMOTE_READ = 1 << 12,
MLX4_PERM_REMOTE_WRITE = 1 << 13,
MLX4_PERM_ATOMIC = 1 << 14
MLX4_PERM_ATOMIC = 1 << 14,
MLX4_PERM_BIND_MW = 1 << 15,
};
enum {
@ -335,7 +360,8 @@ enum mlx4_port_type {
MLX4_PORT_TYPE_NONE = 0,
MLX4_PORT_TYPE_IB = 1,
MLX4_PORT_TYPE_ETH = 2,
MLX4_PORT_TYPE_AUTO = 3
MLX4_PORT_TYPE_AUTO = 3,
MLX4_PORT_TYPE_NA = 4
};
enum mlx4_special_vlan_idx {
@ -435,7 +461,7 @@ struct mlx4_caps {
int comp_pool;
int num_mpts;
int max_fmr_maps;
int num_mtts;
u64 num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
int reserved_mrws;
@ -476,6 +502,7 @@ struct mlx4_caps {
u8 port_ib_mtu[MLX4_MAX_PORTS + 1];
u16 sqp_demux;
u32 sync_qp;
u32 cq_flags;
u32 eqe_size;
u32 cqe_size;
u8 eqe_factor;
@ -485,6 +512,7 @@ struct mlx4_caps {
u16 hca_core_clock;
u32 max_basic_counters;
u32 max_extended_counters;
u8 def_counter_index[MLX4_MAX_PORTS + 1];
};
struct mlx4_buf_list {
@ -548,6 +576,18 @@ struct mlx4_mr {
int enabled;
};
enum mlx4_mw_type {
MLX4_MW_TYPE_1 = 1,
MLX4_MW_TYPE_2 = 2,
};
struct mlx4_mw {
u32 key;
u32 pd;
enum mlx4_mw_type type;
int enabled;
};
struct mlx4_fmr {
struct mlx4_mr mr;
struct mlx4_mpt_entry *mpt;
@ -641,7 +681,8 @@ struct mlx4_eth_av {
u8 hop_limit;
__be32 sl_tclass_flowlabel;
u8 dgid[16];
u32 reserved4[2];
u8 s_mac[6];
u8 reserved4[2];
__be16 vlan;
u8 mac[6];
};
@ -731,6 +772,8 @@ struct mlx4_dev {
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
u16 vsd_vendor_id;
char vsd[MLX4_VSD_LEN];
int num_vfs;
int numa_node;
int oper_log_mgm_entry_size;
@ -738,6 +781,12 @@ struct mlx4_dev {
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
};
struct mlx4_clock_params {
u64 offset;
u8 bar;
u8 size;
};
struct mlx4_eqe {
u8 reserved1;
u8 type;
@ -807,6 +856,11 @@ struct mlx4_eqe {
} __packed tbl_change_info;
} params;
} __packed port_mgmt_change;
struct {
u8 reserved[3];
u8 port;
u32 reserved1[5];
} __packed bad_cable;
} event;
u8 slave_id;
u8 reserved3[2];
@ -842,6 +896,8 @@ struct mlx4_init_port_param {
#define MLX4_INVALID_SLAVE_ID 0xFF
#define MLX4_SINK_COUNTER_INDEX 0xff
void handle_port_mgmt_change_event(struct work_struct *work);
static inline int mlx4_master_func_num(struct mlx4_dev *dev)
@ -915,8 +971,12 @@ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int npages, int page_shift, struct mlx4_mr *mr);
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
struct mlx4_mw *mw);
void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
@ -936,7 +996,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 bf_qp);
int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
@ -990,12 +1050,14 @@ static inline int map_hw_to_sw_id(u16 header_id)
}
return -EINVAL;
}
enum mlx4_net_trans_promisc_mode {
MLX4_FS_REGULAR = 0,
MLX4_FS_ALL_DEFAULT = 1,
MLX4_FS_MC_DEFAULT = 3,
MLX4_FS_UC_SNIFFER = 4,
MLX4_FS_MC_SNIFFER = 5,
MLX4_FS_REGULAR = 1,
MLX4_FS_ALL_DEFAULT,
MLX4_FS_MC_DEFAULT,
MLX4_FS_UC_SNIFFER,
MLX4_FS_MC_SNIFFER,
MLX4_FS_MODE_NUM, /* should be last */
};
struct mlx4_spec_eth {
@ -1024,7 +1086,7 @@ struct mlx4_spec_ipv4 {
};
struct mlx4_spec_ib {
__be32 r_u_qpn;
__be32 l3_qpn;
__be32 qpn_msk;
u8 dst_gid[16];
u8 dst_gid_msk[16];
@ -1057,6 +1119,87 @@ struct mlx4_net_trans_rule {
u32 qpn;
};
struct mlx4_net_trans_rule_hw_ctrl {
__be16 prio;
u8 type;
u8 flags;
u8 rsvd1;
u8 funcid;
u8 vep;
u8 port;
__be32 qpn;
__be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
u8 size;
u8 rsvd1;
__be16 id;
u32 rsvd2;
__be32 l3_qpn;
__be32 qpn_mask;
u8 dst_gid[16];
u8 dst_gid_msk[16];
} __packed;
struct mlx4_net_trans_rule_hw_eth {
u8 size;
u8 rsvd;
__be16 id;
u8 rsvd1[6];
u8 dst_mac[6];
u16 rsvd2;
u8 dst_mac_msk[6];
u16 rsvd3;
u8 src_mac[6];
u16 rsvd4;
u8 src_mac_msk[6];
u8 rsvd5;
u8 ether_type_enable;
__be16 ether_type;
__be16 vlan_tag_msk;
__be16 vlan_tag;
} __packed;
struct mlx4_net_trans_rule_hw_tcp_udp {
u8 size;
u8 rsvd;
__be16 id;
__be16 rsvd1[3];
__be16 dst_port;
__be16 rsvd2;
__be16 dst_port_msk;
__be16 rsvd3;
__be16 src_port;
__be16 rsvd4;
__be16 src_port_msk;
} __packed;
struct mlx4_net_trans_rule_hw_ipv4 {
u8 size;
u8 rsvd;
__be16 id;
__be32 rsvd1;
__be32 dst_ip;
__be32 dst_ip_msk;
__be32 src_ip;
__be32 src_ip_msk;
} __packed;
struct _rule_hw {
union {
struct {
u8 size;
u8 rsvd;
__be16 id;
};
struct mlx4_net_trans_rule_hw_eth eth;
struct mlx4_net_trans_rule_hw_ib ib;
struct mlx4_net_trans_rule_hw_ipv4 ipv4;
struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
};
};
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@ -1071,7 +1214,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, unsigned long *stats_bitmap);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
@ -1097,18 +1240,23 @@ int mlx4_query_diag_counters(struct mlx4_dev *mlx4_dev, int array_length,
u32 counter_out[]);
int mlx4_test_interrupts(struct mlx4_dev *dev);
int mlx4_assign_eq(struct mlx4_dev *dev, char *name, int *vector);
int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_counter_alloc(struct mlx4_dev *dev, u8 port, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u8 port, u32 idx);
int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
int map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type);
int map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val);
@ -1118,7 +1266,7 @@ int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr, u16 lid, u8 sl);
int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
@ -1130,6 +1278,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, u
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn);
cycle_t mlx4_read_clock(struct mlx4_dev *dev);
int mlx4_read_clock(struct mlx4_dev *dev);
int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
struct mlx4_clock_params *params);
#endif /* MLX4_DEVICE_H */

View File

@ -51,32 +51,72 @@ enum mlx4_dev_event {
MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
};
enum mlx4_query_reply {
MLX4_QUERY_NOT_MINE = -1,
MLX4_QUERY_MINE_NOPORT = 0
};
enum mlx4_mcast_prot {
MLX4_MCAST_PROT_IB = 0,
MLX4_MCAST_PROT_EN = 1,
};
struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
void (*event) (struct mlx4_dev *dev, void *context,
enum mlx4_dev_event event, unsigned long param);
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
enum mlx4_query_reply (*query) (void *context, void *);
struct list_head list;
enum mlx4_protocol protocol;
};
enum {
MLX4_MAX_DEVICES = 32,
MLX4_DEVS_TBL_SIZE = MLX4_MAX_DEVICES + 1,
MLX4_DBDF2VAL_STR_SIZE = 512,
MLX4_STR_NAME_SIZE = 64,
MLX4_MAX_BDF_VALS = 2,
MLX4_ENDOF_TBL = -1LL
};
struct mlx4_dbdf2val {
u64 dbdf;
int val[MLX4_MAX_BDF_VALS];
};
struct mlx4_range {
int min;
int max;
};
/*
* mlx4_dbdf2val_lst struct holds all the data needed to convert
* dbdf-to-value-list string into dbdf-to-value table.
* dbdf-to-value-list string is a comma separated list of dbdf-to-value strings.
* the format of dbdf-to-value string is: "[mmmm:]bb:dd.f-v1[;v2]"
* mmmm - Domain number (optional)
* bb - Bus number
* dd - device number
* f - Function number
* v1 - First value related to the domain-bus-device-function.
* v2 - Second value related to the domain-bus-device-function (optional).
* bb, dd - Two hexadecimal digits without preceding 0x.
* mmmm - Four hexadecimal digits without preceding 0x.
* f - One hexadecimal without preceding 0x.
* v1,v2 - Number with normal convention (e.g 100, 0xd3).
* dbdf-to-value-list string format:
* "[mmmm:]bb:dd.f-v1[;v2],[mmmm:]bb:dd.f-v1[;v2],..."
*
*/
struct mlx4_dbdf2val_lst {
char name[MLX4_STR_NAME_SIZE]; /* String name */
char str[MLX4_DBDF2VAL_STR_SIZE]; /* dbdf2val list str */
struct mlx4_dbdf2val tbl[MLX4_DEVS_TBL_SIZE];/* dbdf to value table */
int num_vals; /* # of vals per dbdf */
int def_val[MLX4_MAX_BDF_VALS]; /* Default values */
struct mlx4_range range; /* Valid values range */
};
int mlx4_fill_dbdf2val_tbl(struct mlx4_dbdf2val_lst *dbdf2val_lst);
int mlx4_get_val(struct mlx4_dbdf2val *tbl, struct pci_dev *pdev, int idx,
int *val);
int mlx4_register_interface(struct mlx4_interface *intf);
void mlx4_unregister_interface(struct mlx4_interface *intf);
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto,
int port);
#ifndef ETH_ALEN
#define ETH_ALEN 6

View File

@ -104,9 +104,9 @@ enum {
MLX4_QP_BIT_RWE = 1 << 14,
MLX4_QP_BIT_RAE = 1 << 13,
MLX4_QP_BIT_RIC = 1 << 4,
MLX4_QP_BIT_COLL_SYNC_RQ = 1 << 2,
MLX4_QP_BIT_COLL_SYNC_SQ = 1 << 1,
MLX4_QP_BIT_COLL_MASTER = 1 << 0
MLX4_QP_BIT_COLL_SYNC_RQ = 1 << 2,
MLX4_QP_BIT_COLL_SYNC_SQ = 1 << 1,
MLX4_QP_BIT_COLL_MASTER = 1 << 0
};
enum {
@ -138,7 +138,7 @@ struct mlx4_rss_context {
struct mlx4_qp_path {
u8 fl;
u8 reserved1[1];
u8 vlan_control;
u8 disable_pkey_check;
u8 pkey_index;
u8 counter_index;
@ -153,11 +153,35 @@ struct mlx4_qp_path {
u8 sched_queue;
u8 vlan_index;
u8 feup;
u8 reserved3;
u8 fvl_rx;
u8 reserved4[2];
u8 dmac[6];
};
enum { /* fl */
MLX4_FL_CV = 1 << 6,
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
};
enum { /* vlan_control */
MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1,/* 802.1p priorty tag*/
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
};
enum { /* feup */
MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
};
enum { /* fvl_rx */
MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
};
struct mlx4_qp_context {
__be32 flags;
__be32 pd;
@ -197,6 +221,45 @@ struct mlx4_qp_context {
u32 reserved5[10];
};
struct mlx4_update_qp_context {
__be64 qp_mask;
__be64 primary_addr_path_mask;
__be64 secondary_addr_path_mask;
u64 reserved1;
struct mlx4_qp_context qp_context;
u64 reserved2[58];
};
enum {
MLX4_UPD_QP_MASK_PM_STATE = 32,
MLX4_UPD_QP_MASK_VSD = 33,
};
enum {
MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32,
MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32,
MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32,
MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32,
MLX4_UPD_QP_PATH_MASK_CV = 4 + 32,
MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32,
MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32,
MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
};
enum { /* param3 */
MLX4_STRIP_VLAN = 1 << 30
};
/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
@ -277,6 +340,11 @@ struct mlx4_wqe_lso_seg {
__be32 header[0];
};
enum mlx4_wqe_bind_seg_flags2 {
MLX4_WQE_BIND_TYPE_2 = (1<<31),
MLX4_WQE_BIND_ZERO_BASED = (1<<30),
};
struct mlx4_wqe_bind_seg {
__be32 flags1;
__be32 flags2;
@ -289,9 +357,9 @@ struct mlx4_wqe_bind_seg {
enum {
MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31
MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
};
struct mlx4_wqe_fmr_seg {
@ -316,12 +384,10 @@ struct mlx4_wqe_fmr_ext_seg {
};
struct mlx4_wqe_local_inval_seg {
__be32 flags;
u32 reserved1;
u64 reserved1;
__be32 mem_key;
u32 reserved2[2];
__be32 guest_id;
__be64 pa;
u32 reserved2;
u64 reserved3[2];
};
struct mlx4_wqe_raddr_seg {

View File

@ -39,4 +39,6 @@ struct mlx4_wqe_srq_next_seg {
u32 reserved2[3];
};
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn);
#endif /* MLX4_SRQ_H */