From 8cc487045ef31c2c4dc709d08b47e720549cf24c Mon Sep 17 00:00:00 2001
From: Hans Petter Selasky <hselasky@FreeBSD.org>
Date: Mon, 13 Nov 2017 10:49:18 +0000
Subject: [PATCH] Update mlx4ib(4) to Linux 4.9.

Sponsored by:	Mellanox Technologies
---
 sys/conf/files                            |    2 -
 sys/dev/mlx4/mlx4_ib/mlx4_exp.h           |   46 -
 sys/dev/mlx4/mlx4_ib/mlx4_ib.h            |  279 +-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_ah.c         |   55 +-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c |  481 +++-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c         |   42 +-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_cq.c         |  261 +-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_exp.c        |  116 -
 sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c        |  672 +++--
 sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c       | 2980 ++++++++++++---------
 sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c        |   49 +-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_mr.c         |  802 ++----
 sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c         | 1877 ++++++-------
 sys/dev/mlx4/mlx4_ib/mlx4_ib_srq.c        |   32 +-
 sys/dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c      |  182 +-
 sys/dev/mlx4/mlx4_ib/user.h               |  107 -
 sys/modules/mlx4ib/Makefile               |    2 +-
 17 files changed, 3997 insertions(+), 3988 deletions(-)
 delete mode 100644 sys/dev/mlx4/mlx4_ib/mlx4_exp.h
 delete mode 100644 sys/dev/mlx4/mlx4_ib/mlx4_ib_exp.c
 delete mode 100644 sys/dev/mlx4/mlx4_ib/user.h

diff --git a/sys/conf/files b/sys/conf/files
index 1415e6106467..09dee9979315 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -4550,8 +4550,6 @@ dev/mlx4/mlx4_ib/mlx4_ib_mad.c			optional mlx4ib pci ofed \
 	compile-with "${OFED_C}"
 dev/mlx4/mlx4_ib/mlx4_ib_main.c			optional mlx4ib pci ofed \
 	compile-with "${OFED_C}"
-dev/mlx4/mlx4_ib/mlx4_ib_exp.c			optional mlx4ib pci ofed \
-	compile-with "${OFED_C}"
 dev/mlx4/mlx4_ib/mlx4_ib_mr.c			optional mlx4ib pci ofed \
 	compile-with "${OFED_C}"
 dev/mlx4/mlx4_ib/mlx4_ib_qp.c			optional mlx4ib pci ofed \
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_exp.h b/sys/dev/mlx4/mlx4_ib/mlx4_exp.h
deleted file mode 100644
index 58675a4add73..000000000000
--- a/sys/dev/mlx4/mlx4_ib/mlx4_exp.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_EXP_H
-#define MLX4_EXP_H
-
-#include <rdma/ib_verbs_exp.h>
-#include "mlx4_ib.h"
-
-struct ib_qp *mlx4_ib_exp_create_qp(struct ib_pd *pd,
-				    struct ib_exp_qp_init_attr *init_attr,
-				    struct ib_udata *udata);
-int mlx4_ib_exp_query_device(struct ib_device *ibdev,
-				    struct ib_exp_device_attr *props);
-
-#endif /* MLX4_EXP_H */
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib.h b/sys/dev/mlx4/mlx4_ib/mlx4_ib.h
index e395d6d5f499..87cc448a9046 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib.h
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib.h
@@ -41,6 +41,8 @@
 #include <linux/rbtree.h>
 #include <linux/notifier.h>
 
+#include <asm/atomic64.h>
+
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_umem.h>
 #include <rdma/ib_mad.h>
@@ -59,9 +61,6 @@
 #define mlx4_ib_warn(ibdev, format, arg...) \
 	dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
 
-#define mlx4_ib_info(ibdev, format, arg...) \
-	dev_info((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
-
 enum {
 	MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
 	MLX4_IB_MAX_HEADROOM	 = 2048
@@ -75,17 +74,25 @@ extern int mlx4_ib_sm_guid_assign;
 extern struct proc_dir_entry *mlx4_mrs_dir_entry;
 
 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
-#define MLX4_IB_UC_MAX_NUM_QPS     (256 * 1024)
+#define MLX4_IB_UC_MAX_NUM_QPS     256
 
+enum hw_bar_type {
+	HW_BAR_BF,
+	HW_BAR_DB,
+	HW_BAR_CLOCK,
+	HW_BAR_COUNT
+};
 
-#define MLX4_IB_MMAP_CMD_MASK 0xFF
-#define MLX4_IB_MMAP_CMD_BITS 8
+struct mlx4_ib_vma_private_data {
+	struct vm_area_struct *vma;
+};
 
 struct mlx4_ib_ucontext {
 	struct ib_ucontext	ibucontext;
 	struct mlx4_uar		uar;
 	struct list_head	db_page_list;
 	struct mutex		db_page_mutex;
+	struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
 };
 
 struct mlx4_ib_pd {
@@ -111,11 +118,6 @@ struct mlx4_ib_cq_resize {
 	int			cqe;
 };
 
-struct mlx4_shared_mr_info {
-	int mr_id;
-	struct ib_umem	       *umem;
-};
-
 struct mlx4_ib_cq {
 	struct ib_cq		ibcq;
 	struct mlx4_cq		mcq;
@@ -127,15 +129,22 @@ struct mlx4_ib_cq {
 	struct ib_umem	       *umem;
 	struct ib_umem	       *resize_umem;
 	int			create_flags;
+	/* List of qps that it serves.*/
+	struct list_head		send_qp_list;
+	struct list_head		recv_qp_list;
 };
 
+#define MLX4_MR_PAGES_ALIGN 0x40
+
 struct mlx4_ib_mr {
 	struct ib_mr		ibmr;
+	__be64			*pages;
+	dma_addr_t		page_map;
+	u32			npages;
+	u32			max_pages;
 	struct mlx4_mr		mmr;
 	struct ib_umem	       *umem;
-	struct mlx4_shared_mr_info	*smr_info;
-	atomic_t      invalidated;
-	struct completion invalidation_comp;
+	size_t			page_map_size;
 };
 
 struct mlx4_ib_mw {
@@ -143,21 +152,22 @@ struct mlx4_ib_mw {
 	struct mlx4_mw		mmw;
 };
 
-struct mlx4_ib_fast_reg_page_list {
-	struct ib_fast_reg_page_list	ibfrpl;
-	__be64			       *mapped_page_list;
-	dma_addr_t			map;
-};
-
 struct mlx4_ib_fmr {
 	struct ib_fmr           ibfmr;
 	struct mlx4_fmr         mfmr;
 };
 
+#define MAX_REGS_PER_FLOW 2
+
+struct mlx4_flow_reg_id {
+	u64 id;
+	u64 mirror;
+};
+
 struct mlx4_ib_flow {
 	struct ib_flow ibflow;
 	/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
-	u64 reg_id[2];
+	struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
 };
 
 struct mlx4_ib_wq {
@@ -172,13 +182,18 @@ struct mlx4_ib_wq {
 	unsigned		tail;
 };
 
+enum {
+	MLX4_IB_QP_CREATE_ROCE_V2_GSI = IB_QP_CREATE_RESERVED_START
+};
+
 enum mlx4_ib_qp_flags {
 	MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
 	MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
-	MLX4_IB_QP_CAP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
-	MLX4_IB_QP_CAP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
-	MLX4_IB_QP_CAP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
 	MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
+	MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
+
+	/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
+	MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
 	MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
 	MLX4_IB_SRIOV_SQP = 1 << 31,
 };
@@ -190,13 +205,6 @@ struct mlx4_ib_gid_entry {
 	u8			port;
 };
 
-enum mlx4_ib_mmap_cmd {
-	MLX4_IB_MMAP_UAR_PAGE		= 0,
-	MLX4_IB_MMAP_BLUE_FLAME_PAGE	= 1,
-	MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES	= 2,
-	MLX4_IB_MMAP_GET_HW_CLOCK	= 3,
-};
-
 enum mlx4_ib_qp_type {
 	/*
 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
@@ -287,17 +295,6 @@ struct mlx4_roce_smac_vlan_info {
 	int update_vid;
 };
 
-struct mlx4_ib_qpg_data {
-	unsigned long *tss_bitmap;
-	unsigned long *rss_bitmap;
-	struct mlx4_ib_qp *qpg_parent;
-	int tss_qpn_base;
-	int rss_qpn_base;
-	u32 tss_child_count;
-	u32 rss_child_count;
-	u32 qpg_tss_mask_sz;
-};
-
 struct mlx4_ib_qp {
 	struct ib_qp		ibqp;
 	struct mlx4_qp		mqp;
@@ -327,22 +324,16 @@ struct mlx4_ib_qp {
 	u8			sq_no_prefetch;
 	u8			state;
 	int			mlx_type;
-	enum ib_qpg_type	qpg_type;
-	struct mlx4_ib_qpg_data *qpg_data;
 	struct list_head	gid_list;
 	struct list_head	steering_rules;
 	struct mlx4_ib_buf	*sqp_proxy_rcv;
 	struct mlx4_roce_smac_vlan_info pri;
 	struct mlx4_roce_smac_vlan_info alt;
-	struct list_head	rules_list;
 	u64			reg_id;
-	int                     max_inline_data;
-	struct mlx4_bf          bf;
-
-	/*
-	 * Experimental data
-	 */
-	int			max_inlr_data;
+	struct list_head	qps_list;
+	struct list_head	cq_recv_list;
+	struct list_head	cq_send_list;
+	struct counter_index	*counter_index;
 };
 
 struct mlx4_ib_srq {
@@ -376,23 +367,12 @@ struct mlx4_ib_ah {
 #define MLX4_NOT_SET_GUID		(0x00LL)
 #define MLX4_GUID_FOR_DELETE_VAL	(~(0x00LL))
 
-/****************************************/
-/* ioctl codes */
-/****************************************/
-#define MLX4_IOC_MAGIC 'm'
-#define MLX4_IOCHWCLOCKOFFSET _IOR(MLX4_IOC_MAGIC, 1, int)
-
 enum mlx4_guid_alias_rec_status {
 	MLX4_GUID_INFO_STATUS_IDLE,
 	MLX4_GUID_INFO_STATUS_SET,
-	MLX4_GUID_INFO_STATUS_PENDING,
 };
 
-enum mlx4_guid_alias_rec_ownership {
-	MLX4_GUID_DRIVER_ASSIGN,
-	MLX4_GUID_SYSADMIN_ASSIGN,
-	MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
-};
+#define GUID_STATE_NEED_PORT_INIT 0x01
 
 enum mlx4_guid_alias_rec_method {
 	MLX4_GUID_INFO_RECORD_SET	= IB_MGMT_METHOD_SET,
@@ -403,8 +383,8 @@ struct mlx4_sriov_alias_guid_info_rec_det {
 	u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
 	ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
 	enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
-	u8 method; /*set or delete*/
-	enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/
+	unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
+	u64 time_to_run;
 };
 
 struct mlx4_sriov_alias_guid_port_rec_det {
@@ -412,6 +392,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
 	struct workqueue_struct *wq;
 	struct delayed_work alias_guid_work;
 	u8 port;
+	u32 state_flags;
 	struct mlx4_sriov_alias_guid *parent;
 	struct list_head cb_list;
 };
@@ -461,7 +442,6 @@ struct mlx4_ib_demux_pv_ctx {
 	struct ib_device *ib_dev;
 	struct ib_cq *cq;
 	struct ib_pd *pd;
-	struct ib_mr *mr;
 	struct work_struct work;
 	struct workqueue_struct *wq;
 	struct mlx4_ib_demux_pv_qp qp[2];
@@ -473,7 +453,7 @@ struct mlx4_ib_demux_ctx {
 	struct workqueue_struct *wq;
 	struct workqueue_struct *ud_wq;
 	spinlock_t ud_lock;
-	__be64 subnet_prefix;
+	atomic64_t subnet_prefix;
 	__be64 guid_cache[128];
 	struct mlx4_ib_dev *dev;
 	/* the following lock protects both mcg_table and mcg_mgid0_list */
@@ -503,13 +483,27 @@ struct mlx4_ib_sriov {
 	struct idr pv_id_table;
 };
 
+struct gid_cache_context {
+	int real_index;
+	int refcount;
+};
+
+struct gid_entry {
+	union ib_gid	gid;
+	enum ib_gid_type gid_type;
+	struct gid_cache_context *ctx;
+};
+
+struct mlx4_port_gid_table {
+	struct gid_entry gids[MLX4_MAX_PORT_GIDS];
+};
+
 struct mlx4_ib_iboe {
 	spinlock_t		lock;
 	struct net_device      *netdevs[MLX4_MAX_PORTS];
-	struct net_device      *masters[MLX4_MAX_PORTS];
-	struct notifier_block	nb;
-	struct notifier_block	nb_inet;
-	union ib_gid		gid_table[MLX4_MAX_PORTS][128];
+	atomic64_t		mac[MLX4_MAX_PORTS];
+	struct notifier_block 	nb;
+	struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
 };
 
 struct pkey_mgt {
@@ -548,15 +542,32 @@ struct mlx4_ib_iov_port {
 	struct mlx4_ib_iov_sysfs_attr mcg_dentry;
 };
 
-struct mlx4_ib_counter {
-	int counter_index;
-	int status;
+struct counter_index {
+	struct  list_head       list;
+	u32		index;
+	u8		allocated;
+};
+
+struct mlx4_ib_counters {
+	struct list_head        counters_list;
+	struct mutex            mutex; /* mutex for accessing counters list */
+	u32			default_counter;
+};
+
+#define MLX4_DIAG_COUNTERS_TYPES 2
+
+struct mlx4_ib_diag_counters {
+	const char **name;
+	u32 *offset;
+	u32 num_counters;
 };
 
 struct mlx4_ib_dev {
 	struct ib_device	ib_dev;
 	struct mlx4_dev	       *dev;
 	int			num_ports;
+	void __iomem	       *uar_map;
+
 	struct mlx4_uar		priv_uar;
 	u32			priv_pdn;
 	MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
@@ -564,14 +575,14 @@ struct mlx4_ib_dev {
 	struct ib_mad_agent    *send_agent[MLX4_MAX_PORTS][2];
 	struct ib_ah	       *sm_ah[MLX4_MAX_PORTS];
 	spinlock_t		sm_lock;
+	atomic64_t		sl2vl[MLX4_MAX_PORTS];
 	struct mlx4_ib_sriov	sriov;
 
 	struct mutex		cap_mask_mutex;
 	bool			ib_active;
 	struct mlx4_ib_iboe	iboe;
-	struct mlx4_ib_counter	counters[MLX4_MAX_PORTS];
+	struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
 	int		       *eq_table;
-	int			eq_added;
 	struct kobject	       *iov_parent;
 	struct kobject	       *ports_parent;
 	struct kobject	       *dev_ports_parent[MLX4_MFUNC_MAX];
@@ -580,12 +591,22 @@ struct mlx4_ib_dev {
 	unsigned long *ib_uc_qpns_bitmap;
 	int steer_qpn_count;
 	int steer_qpn_base;
+	int steering_support;
+	struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
+	/* lock when destroying qp1_proxy and getting netdev events */
+	struct mutex		qp1_proxy_lock[MLX4_MAX_PORTS];
+	u8			bond_next_port;
+	/* protect resources needed as part of reset flow */
+	spinlock_t		reset_flow_resource_lock;
+	struct list_head		qp_list;
+	struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
 };
 
 struct ib_event_work {
 	struct work_struct	work;
 	struct mlx4_ib_dev	*ib_dev;
 	struct mlx4_eqe		ib_eqe;
+	int			port;
 };
 
 struct mlx4_ib_qp_tunnel_init_attr {
@@ -595,6 +616,21 @@ struct mlx4_ib_qp_tunnel_init_attr {
 	u8 port;
 };
 
+struct mlx4_uverbs_ex_query_device {
+	__u32 comp_mask;
+	__u32 reserved;
+};
+
+enum query_device_resp_mask {
+	QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
+};
+
+struct mlx4_uverbs_ex_query_device_resp {
+	__u32 comp_mask;
+	__u32 response_length;
+	__u64 hca_core_clock_offset;
+};
+
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
 {
 	return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -635,11 +671,6 @@ static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
 	return container_of(ibmw, struct mlx4_ib_mw, ibmw);
 }
 
-static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
-{
-	return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
-}
-
 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
 {
 	return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
@@ -675,6 +706,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
 	return container_of(ibah, struct mlx4_ib_ah, ibah);
 }
 
+static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
+{
+	dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
+
+	return dev->bond_next_port + 1;
+}
+
 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
 
@@ -685,30 +723,22 @@ void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 			   struct ib_umem *umem);
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
-						u64 start_va,
-						int *num_of_mtts);
 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				  u64 virt_addr, int access_flags,
-				  struct ib_udata *udata, int mr_id);
+				  struct ib_udata *udata);
 int mlx4_ib_dereg_mr(struct ib_mr *mr);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
-		    struct ib_mw_bind *mw_bind);
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+			       struct ib_udata *udata);
 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
-struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
-					int max_page_list_len);
-struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
-							       int page_list_len);
-void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
-
-int mlx4_ib_modify_cq(struct ib_cq *cq,
-		      struct ib_cq_attr *cq_attr,
-		      int cq_attr_mask);
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
+			       enum ib_mr_type mr_type,
+			       u32 max_num_sg);
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+		      unsigned int *sg_offset);
+int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
-int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq);
 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
-				struct ib_cq_init_attr *attr,
+				const struct ib_cq_init_attr *attr,
 				struct ib_ucontext *context,
 				struct ib_udata *udata);
 int mlx4_ib_destroy_cq(struct ib_cq *cq);
@@ -746,11 +776,13 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 		      struct ib_recv_wr **bad_wr);
 
 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
-		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
-		 void *in_mad, void *response_mad);
+		 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+		 const void *in_mad, void *response_mad);
 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num,
-			struct ib_wc *in_wc, struct ib_grh *in_grh,
-			struct ib_mad *in_mad, struct ib_mad *out_mad);
+			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+			const struct ib_mad_hdr *in, size_t in_mad_size,
+			struct ib_mad_hdr *out, size_t *out_mad_size,
+			u16 *out_mad_pkey_index);
 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
 
@@ -768,28 +800,15 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
 			union ib_gid *gid, int netw_view);
 
-int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
-			u8 *mac, int *is_mcast, u8 port);
-
-int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
-		       union mlx4_counter *counter, u8 clear);
-
-static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
+static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
 {
 	u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
 
 	if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
-		return 1;
+		return true;
 
 	return !!(ah->av.ib.g_slid & 0x80);
 }
-static inline int mlx4_ib_qp_has_rq(struct ib_qp_init_attr *attr)
-{
-	if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
-		return 0;
-
-	return !attr->srq;
-}
 
 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
@@ -815,13 +834,16 @@ void mlx4_ib_tunnels_update_work(struct work_struct *work);
 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 			  enum ib_qp_type qpt, struct ib_wc *wc,
 			  struct ib_grh *grh, struct ib_mad *mad);
+
 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 			 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
-			 u32 qkey, struct ib_ah_attr *attr, u8 *s_mac, struct ib_mad *mad);
+			 u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
+			 u16 vlan_id, struct ib_mad *mad);
+
 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
 
 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
-		struct ib_mad *mad, int is_eth);
+		struct ib_mad *mad);
 
 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
 		struct ib_mad *mad);
@@ -848,6 +870,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
 			     struct attribute *attr);
 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
+void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
+				    int port, int slave_init);
 
 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
 
@@ -859,7 +883,16 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 			 int is_attach);
-int mlx4_ib_query_device(struct ib_device *ibdev,
-			 struct ib_device_attr *props);
+int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+			  u64 start, u64 length, u64 virt_addr,
+			  int mr_access_flags, struct ib_pd *pd,
+			  struct ib_udata *udata);
+int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
+				    u8 port_num, int index);
+
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+				     int port);
+
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
 
 #endif /* MLX4_IB_H */
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_ah.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_ah.c
index 1c30fa996796..d9916212d40f 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_ah.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_ah.c
@@ -38,28 +38,10 @@
 
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/etherdevice.h>
 
 #include "mlx4_ib.h"
 
-int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
-			u8 *mac, int *is_mcast, u8 port)
-{
-	struct in6_addr in6;
-
-	*is_mcast = 0;
-
-	memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
-	if (rdma_link_local_addr(&in6))
-		rdma_get_ll_mac(&in6, mac);
-	else if (rdma_is_multicast_addr(&in6)) {
-		rdma_get_mcast_mac(&in6, mac);
-		*is_mcast = 1;
-	} else
-		return -EINVAL;
-
-	return 0;
-}
-
 static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 				  struct mlx4_ib_ah *ah)
 {
@@ -67,6 +49,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 
 	ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
 	ah->av.ib.g_slid  = ah_attr->src_path_bits;
+	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 	if (ah_attr->ah_flags & IB_AH_GRH) {
 		ah->av.ib.g_slid   |= 0x80;
 		ah->av.ib.gid_index = ah_attr->grh.sgid_index;
@@ -84,7 +67,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 		       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
 			--ah->av.ib.stat_rate;
 	}
-	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 
 	return &ah->ibah;
 }
@@ -96,21 +78,38 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
 	struct mlx4_dev *dev = ibdev->dev;
 	int is_mcast = 0;
 	struct in6_addr in6;
-	u16 vlan_tag;
+	u16 vlan_tag = 0xffff;
+	union ib_gid sgid;
+	struct ib_gid_attr gid_attr;
+	int ret;
 
 	memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
 	if (rdma_is_multicast_addr(&in6)) {
 		is_mcast = 1;
-		resolve_mcast_mac(&in6, ah->av.eth.mac);
+		rdma_get_mcast_mac(&in6, ah->av.eth.mac);
 	} else {
-		memcpy(ah->av.eth.mac, ah_attr->dmac, 6);
+		memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
+	}
+	ret = ib_get_cached_gid(pd->device, ah_attr->port_num,
+				ah_attr->grh.sgid_index, &sgid, &gid_attr);
+	if (ret)
+		return ERR_PTR(ret);
+	eth_zero_addr(ah->av.eth.s_mac);
+	if (gid_attr.ndev) {
+		if (is_vlan_dev(gid_attr.ndev))
+			vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
+		memcpy(ah->av.eth.s_mac, IF_LLADDR(gid_attr.ndev), ETH_ALEN);
+		dev_put(gid_attr.ndev);
 	}
-	vlan_tag = ah_attr->vlan_id;
 	if (vlan_tag < 0x1000)
 		vlan_tag |= (ah_attr->sl & 7) << 13;
 	ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
-	ah->av.eth.gid_index = ah_attr->grh.sgid_index;
+	ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+	if (ret < 0)
+		return ERR_PTR(ret);
+	ah->av.eth.gid_index = ret;
 	ah->av.eth.vlan = cpu_to_be16(vlan_tag);
+	ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
 	if (ah_attr->static_rate) {
 		ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
 		while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
@@ -168,9 +167,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
 	enum rdma_link_layer ll;
 
 	memset(ah_attr, 0, sizeof *ah_attr);
-	ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
 	ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
 	ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+	if (ll == IB_LINK_LAYER_ETHERNET)
+		ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
+	else
+		ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+
 	ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
 	if (ah->av.ib.stat_rate)
 		ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c
index 352219acbf67..91d240b45583 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c
@@ -42,6 +42,8 @@
 #include <linux/errno.h>
 #include <rdma/ib_user_verbs.h>
 #include <linux/delay.h>
+#include <linux/math64.h>
+#include <linux/ktime.h>
 #include "mlx4_ib.h"
 
 /*
@@ -57,15 +59,19 @@ struct mlx4_alias_guid_work_context {
 	int			query_id;
 	struct list_head	list;
 	int			block_num;
+	ib_sa_comp_mask		guid_indexes;
 	u8			method;
 };
 
 struct mlx4_next_alias_guid_work {
 	u8 port;
 	u8 block_num;
+	u8 method;
 	struct mlx4_sriov_alias_guid_info_rec_det rec_det;
 };
 
+static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
+				     int *resched_delay_sec);
 
 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
 					 u8 port_num, u8 *p_data)
@@ -119,6 +125,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
 	return IB_SA_COMP_MASK(4 + index);
 }
 
+void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
+				    int port,  int slave_init)
+{
+	__be64 curr_guid, required_guid;
+	int record_num = slave / 8;
+	int index = slave % 8;
+	int port_index = port - 1;
+	unsigned long flags;
+	int do_work = 0;
+
+	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+	if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
+	    GUID_STATE_NEED_PORT_INIT)
+		goto unlock;
+	if (!slave_init) {
+		curr_guid = *(__be64 *)&dev->sriov.
+			alias_guid.ports_guid[port_index].
+			all_rec_per_port[record_num].
+			all_recs[GUID_REC_SIZE * index];
+		if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
+		    !curr_guid)
+			goto unlock;
+		required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
+	} else {
+		required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
+		if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+			goto unlock;
+	}
+	*(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
+		all_rec_per_port[record_num].
+		all_recs[GUID_REC_SIZE * index] = required_guid;
+	dev->sriov.alias_guid.ports_guid[port_index].
+		all_rec_per_port[record_num].guid_indexes
+		|= mlx4_ib_get_aguid_comp_mask_from_ix(index);
+	dev->sriov.alias_guid.ports_guid[port_index].
+		all_rec_per_port[record_num].status
+		= MLX4_GUID_INFO_STATUS_IDLE;
+	/* set to run immediately */
+	dev->sriov.alias_guid.ports_guid[port_index].
+		all_rec_per_port[record_num].time_to_run = 0;
+	dev->sriov.alias_guid.ports_guid[port_index].
+		all_rec_per_port[record_num].
+		guids_retry_schedule[index] = 0;
+	do_work = 1;
+unlock:
+	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+
+	if (do_work)
+		mlx4_ib_init_alias_guid_work(dev, port_index);
+}
+
 /*
  * Whenever new GUID is set/unset (guid table change) create event and
  * notify the relevant slave (master also should be notified).
@@ -134,15 +191,20 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
 {
 	int i;
 	u64 guid_indexes;
-	int slave_id;
+	int slave_id, slave_port;
 	enum slave_port_state new_state;
 	enum slave_port_state prev_state;
 	__be64 tmp_cur_ag, form_cache_ag;
 	enum slave_port_gen_event gen_event;
+	struct mlx4_sriov_alias_guid_info_rec_det *rec;
+	unsigned long flags;
+	__be64 required_value;
 
 	if (!mlx4_is_master(dev->dev))
 		return;
 
+	rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
+			all_rec_per_port[block_num];
 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
 				   ports_guid[port_num - 1].
 				   all_rec_per_port[block_num].guid_indexes);
@@ -156,8 +218,13 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
 			continue;
 
 		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
-		if (slave_id >= dev->dev->num_slaves)
+		if (slave_id >= dev->dev->persist->num_vfs + 1)
 			return;
+
+		slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
+		if (slave_port < 0) /* this port isn't available for the VF */
+			continue;
+
 		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
 		form_cache_ag = get_cached_alias_guid(dev, port_num,
 					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
@@ -168,8 +235,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
 		 */
 		if (tmp_cur_ag != form_cache_ag)
 			continue;
-		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
 
+		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+		required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
+
+		if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+			required_value = 0;
+
+		if (tmp_cur_ag == required_value) {
+			rec->guid_indexes = rec->guid_indexes &
+			       ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
+		} else {
+			/* may notify port down if value is 0 */
+			if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
+				spin_unlock_irqrestore(&dev->sriov.
+					alias_guid.ag_work_lock, flags);
+				continue;
+			}
+		}
+		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
+				       flags);
+		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
 		/*2 cases: Valid GUID, and Invalid Guid*/
 
 		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
@@ -190,10 +276,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
 			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
 						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
 						      &gen_event);
-			pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
-				 slave_id, port_num);
-			mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
-						       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+			if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
+				pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+					 slave_id, port_num);
+				mlx4_gen_port_state_change_eqe(dev->dev,
+							       slave_id,
+							       port_num,
+							       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+			}
 		}
 	}
 }
@@ -208,6 +298,9 @@ static void aliasguid_query_handler(int status,
 	int i;
 	struct mlx4_sriov_alias_guid_info_rec_det *rec;
 	unsigned long flags, flags1;
+	ib_sa_comp_mask declined_guid_indexes = 0;
+	ib_sa_comp_mask applied_guid_indexes = 0;
+	unsigned int resched_delay_sec = 0;
 
 	if (!context)
 		return;
@@ -218,9 +311,9 @@ static void aliasguid_query_handler(int status,
 		all_rec_per_port[cb_ctx->block_num];
 
 	if (status) {
-		rec->status = MLX4_GUID_INFO_STATUS_IDLE;
 		pr_debug("(port: %d) failed: status = %d\n",
 			 cb_ctx->port, status);
+		rec->time_to_run = ktime_get_ns() + 1 * NSEC_PER_SEC;
 		goto out;
 	}
 
@@ -237,69 +330,101 @@ static void aliasguid_query_handler(int status,
 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
 		all_rec_per_port[guid_rec->block_num];
 
-	rec->status = MLX4_GUID_INFO_STATUS_SET;
-	rec->method = MLX4_GUID_INFO_RECORD_SET;
-
+	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
 	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
-		__be64 tmp_cur_ag;
-		tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
-		if ((cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE)
-		    && (MLX4_NOT_SET_GUID == tmp_cur_ag)) {
-			pr_debug("%s:Record num %d in block_num:%d "
-				"was deleted by SM,ownership by %d "
-				"(0 = driver, 1=sysAdmin, 2=None)\n",
-				__func__, i, guid_rec->block_num,
-				rec->ownership);
-			rec->guid_indexes = rec->guid_indexes &
-				~mlx4_ib_get_aguid_comp_mask_from_ix(i);
+		__be64 sm_response, required_val;
+
+		if (!(cb_ctx->guid_indexes &
+			mlx4_ib_get_aguid_comp_mask_from_ix(i)))
 			continue;
+		sm_response = *(__be64 *)&guid_rec->guid_info_list
+				[i * GUID_REC_SIZE];
+		required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
+		if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
+			if (required_val ==
+			    cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+				goto next_entry;
+
+			/* A new value was set till we got the response */
+			pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
+				 (long long)be64_to_cpu(required_val),
+				 i, guid_rec->block_num);
+			goto entry_declined;
 		}
 
 		/* check if the SM didn't assign one of the records.
-		 * if it didn't, if it was not sysadmin request:
-		 * ask the SM to give a new GUID, (instead of the driver request).
+		 * if it didn't, re-ask for.
 		 */
-		if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
-			mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
-				     "block_num: %d was declined by SM, "
-				     "ownership by %d (0 = driver, 1=sysAdmin,"
-				     " 2=None)\n", __func__, i,
-				     guid_rec->block_num, rec->ownership);
-			if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
-				/* if it is driver assign, asks for new GUID from SM*/
-				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
-					MLX4_NOT_SET_GUID;
-
-				/* Mark the record as not assigned, and let it
-				 * be sent again in the next work sched.*/
-				rec->status = MLX4_GUID_INFO_STATUS_IDLE;
-				rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
-			}
+		if (sm_response == MLX4_NOT_SET_GUID) {
+			if (rec->guids_retry_schedule[i] == 0)
+				mlx4_ib_warn(&dev->ib_dev,
+					     "%s:Record num %d in  block_num: %d was declined by SM\n",
+					     __func__, i,
+					     guid_rec->block_num);
+			goto entry_declined;
 		} else {
 		       /* properly assigned record. */
 		       /* We save the GUID we just got from the SM in the
 			* admin_guid in order to be persistent, and in the
 			* request from the sm the process will ask for the same GUID */
-			if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
-			    tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
-				/* the sysadmin assignment failed.*/
-				mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
-					     " admin guid after SysAdmin "
-					     "configuration. "
-					     "Record num %d in block_num:%d "
-					     "was declined by SM, "
-					     "new val(0x%llx) was kept\n",
-					      __func__, i,
-					     guid_rec->block_num,
-					     (long long)be64_to_cpu(*(__be64 *) &
-							 rec->all_recs[i * GUID_REC_SIZE]));
+			if (required_val &&
+			    sm_response != required_val) {
+				/* Warn only on first retry */
+				if (rec->guids_retry_schedule[i] == 0)
+					mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
+						     " admin guid after SysAdmin "
+						     "configuration. "
+						     "Record num %d in block_num:%d "
+						     "was declined by SM, "
+						     "new val(0x%llx) was kept, SM returned (0x%llx)\n",
+						      __func__, i,
+						     guid_rec->block_num,
+						     (long long)be64_to_cpu(required_val),
+						     (long long)be64_to_cpu(sm_response));
+				goto entry_declined;
 			} else {
-				memcpy(&rec->all_recs[i * GUID_REC_SIZE],
-				       &guid_rec->guid_info_list[i * GUID_REC_SIZE],
-				       GUID_REC_SIZE);
+				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
+					sm_response;
+				if (required_val == 0)
+					mlx4_set_admin_guid(dev->dev,
+							    sm_response,
+							    (guid_rec->block_num
+							    * NUM_ALIAS_GUID_IN_REC) + i,
+							    cb_ctx->port);
+				goto next_entry;
 			}
 		}
+entry_declined:
+		declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
+		rec->guids_retry_schedule[i] =
+			(rec->guids_retry_schedule[i] == 0) ?  1 :
+			min((unsigned int)60,
+			    rec->guids_retry_schedule[i] * 2);
+		/* using the minimum value among all entries in that record */
+		resched_delay_sec = (resched_delay_sec == 0) ?
+				rec->guids_retry_schedule[i] :
+				min(resched_delay_sec,
+				    rec->guids_retry_schedule[i]);
+		continue;
+
+next_entry:
+		rec->guids_retry_schedule[i] = 0;
 	}
+
+	applied_guid_indexes =  cb_ctx->guid_indexes & ~declined_guid_indexes;
+	if (declined_guid_indexes ||
+	    rec->guid_indexes & ~(applied_guid_indexes)) {
+		pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
+			 guid_rec->block_num,
+			 (long long)be64_to_cpu((__force __be64)rec->guid_indexes),
+			 (long long)be64_to_cpu((__force __be64)applied_guid_indexes),
+			 (long long)be64_to_cpu((__force __be64)declined_guid_indexes));
+		rec->time_to_run = ktime_get_ns() +
+			resched_delay_sec * NSEC_PER_SEC;
+	} else {
+		rec->status = MLX4_GUID_INFO_STATUS_SET;
+	}
+	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
 	/*
 	The func is call here to close the cases when the
 	sm doesn't send smp, so in the sa response the driver
@@ -311,10 +436,13 @@ static void aliasguid_query_handler(int status,
 out:
 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
-	if (!dev->sriov.is_going_down)
+	if (!dev->sriov.is_going_down) {
+		get_low_record_time_index(dev, port_index, &resched_delay_sec);
 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
 				   &dev->sriov.alias_guid.ports_guid[port_index].
-				   alias_guid_work, 0);
+				   alias_guid_work,
+				   msecs_to_jiffies(resched_delay_sec * 1000));
+	}
 	if (cb_ctx->sa_query) {
 		list_del(&cb_ctx->list);
 		kfree(cb_ctx);
@@ -331,9 +459,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
 	ib_sa_comp_mask comp_mask = 0;
 
 	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
-		= MLX4_GUID_INFO_STATUS_IDLE;
-	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
-		= MLX4_GUID_INFO_RECORD_SET;
+		= MLX4_GUID_INFO_STATUS_SET;
 
 	/* calculate the comp_mask for that record.*/
 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -347,19 +473,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
 		need to assign GUIDs, then don't put it up for assignment.
 		*/
 		if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
-		    (!index && !i) ||
-		    MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
-		    ports_guid[port - 1].all_rec_per_port[index].ownership)
+		    (!index && !i))
 			continue;
 		comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
 	}
 	dev->sriov.alias_guid.ports_guid[port - 1].
-		all_rec_per_port[index].guid_indexes = comp_mask;
+		all_rec_per_port[index].guid_indexes |= comp_mask;
+	if (dev->sriov.alias_guid.ports_guid[port - 1].
+	    all_rec_per_port[index].guid_indexes)
+		dev->sriov.alias_guid.ports_guid[port - 1].
+		all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
+
 }
 
 static int set_guid_rec(struct ib_device *ibdev,
-			u8 port, int index,
-			struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+			struct mlx4_next_alias_guid_work *rec)
 {
 	int err;
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
@@ -368,6 +496,9 @@ static int set_guid_rec(struct ib_device *ibdev,
 	struct ib_port_attr attr;
 	struct mlx4_alias_guid_work_context *callback_context;
 	unsigned long resched_delay, flags, flags1;
+	u8 port = rec->port + 1;
+	int index = rec->block_num;
+	struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
 	struct list_head *head =
 		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
 
@@ -394,7 +525,9 @@ static int set_guid_rec(struct ib_device *ibdev,
 	callback_context->port = port;
 	callback_context->dev = dev;
 	callback_context->block_num = index;
-	callback_context->method = rec_det->method;
+	callback_context->guid_indexes = rec_det->guid_indexes;
+	callback_context->method = rec->method;
+
 	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
 
 	guid_info_rec.lid = cpu_to_be16(attr.lid);
@@ -413,7 +546,7 @@ static int set_guid_rec(struct ib_device *ibdev,
 	callback_context->query_id =
 		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
 					  ibdev, port, &guid_info_rec,
-					  comp_mask, rec_det->method, 1000,
+					  comp_mask, rec->method, 1000,
 					  GFP_KERNEL, aliasguid_query_handler,
 					  callback_context,
 					  &callback_context->sa_query);
@@ -448,6 +581,30 @@ out:
 	return err;
 }
 
+static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
+{
+	int j, k, entry;
+	__be64 guid;
+
+	/*Check if the SM doesn't need to assign the GUIDs*/
+	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+		for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
+			entry = j * NUM_ALIAS_GUID_IN_REC + k;
+			/* no request for the 0 entry (hw guid) */
+			if (!entry || entry > dev->dev->persist->num_vfs ||
+			    !mlx4_is_slave_active(dev->dev, entry))
+				continue;
+			guid = mlx4_get_admin_guid(dev->dev, entry, port);
+			*(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
+				all_rec_per_port[j].all_recs
+				[GUID_REC_SIZE * k] = guid;
+			pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
+				 entry,
+				 (long long)be64_to_cpu(guid),
+				 port);
+		}
+	}
+}
 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
 {
 	int i;
@@ -457,6 +614,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
 
 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+
+	if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
+		GUID_STATE_NEED_PORT_INIT) {
+		mlx4_ib_guid_port_init(dev, port);
+		dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
+			(~GUID_STATE_NEED_PORT_INIT);
+	}
 	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
 		invalidate_guid_record(dev, port, i);
 
@@ -476,60 +640,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
 }
 
+static void set_required_record(struct mlx4_ib_dev *dev, u8 port,
+				struct mlx4_next_alias_guid_work *next_rec,
+				int record_index)
+{
+	int i;
+	int lowset_time_entry = -1;
+	int lowest_time = 0;
+	ib_sa_comp_mask delete_guid_indexes = 0;
+	ib_sa_comp_mask set_guid_indexes = 0;
+	struct mlx4_sriov_alias_guid_info_rec_det *rec =
+			&dev->sriov.alias_guid.ports_guid[port].
+			all_rec_per_port[record_index];
+
+	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
+		if (!(rec->guid_indexes &
+			mlx4_ib_get_aguid_comp_mask_from_ix(i)))
+			continue;
+
+		if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
+				cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+			delete_guid_indexes |=
+				mlx4_ib_get_aguid_comp_mask_from_ix(i);
+		else
+			set_guid_indexes |=
+				mlx4_ib_get_aguid_comp_mask_from_ix(i);
+
+		if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
+			lowest_time) {
+			lowset_time_entry = i;
+			lowest_time = rec->guids_retry_schedule[i];
+		}
+	}
+
+	memcpy(&next_rec->rec_det, rec, sizeof(*rec));
+	next_rec->port = port;
+	next_rec->block_num = record_index;
+
+	if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
+				cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
+		next_rec->rec_det.guid_indexes = delete_guid_indexes;
+		next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
+	} else {
+		next_rec->rec_det.guid_indexes = set_guid_indexes;
+		next_rec->method = MLX4_GUID_INFO_RECORD_SET;
+	}
+}
+
+/* return index of record that should be updated based on lowest
+ * rescheduled time
+ */
+static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
+				     int *resched_delay_sec)
+{
+	int record_index = -1;
+	u64 low_record_time = 0;
+	struct mlx4_sriov_alias_guid_info_rec_det rec;
+	int j;
+
+	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+		rec = dev->sriov.alias_guid.ports_guid[port].
+			all_rec_per_port[j];
+		if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
+		    rec.guid_indexes) {
+			if (record_index == -1 ||
+			    rec.time_to_run < low_record_time) {
+				record_index = j;
+				low_record_time = rec.time_to_run;
+			}
+		}
+	}
+	if (resched_delay_sec) {
+		u64 curr_time = ktime_get_ns();
+
+		*resched_delay_sec = (low_record_time < curr_time) ? 0 :
+			div_u64((low_record_time - curr_time), NSEC_PER_SEC);
+	}
+
+	return record_index;
+}
+
 /* The function returns the next record that was
  * not configured (or failed to be configured) */
 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
 				     struct mlx4_next_alias_guid_work *rec)
 {
-	int j;
 	unsigned long flags;
+	int record_index;
+	int ret = 0;
 
-	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
-		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
-		if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
-		    MLX4_GUID_INFO_STATUS_IDLE) {
-			memcpy(&rec->rec_det,
-			       &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
-			       sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
-			rec->port = port;
-			rec->block_num = j;
-			dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
-				MLX4_GUID_INFO_STATUS_PENDING;
-			spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
-			return 0;
-		}
-		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+	record_index = get_low_record_time_index(dev, port, NULL);
+
+	if (record_index < 0) {
+		ret = -ENOENT;
+		goto out;
 	}
-	return -ENOENT;
-}
 
-static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
-					     int rec_index,
-					     struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
-{
-	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
-		rec_det->guid_indexes;
-	memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
-	       rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
-	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
-		rec_det->status;
-}
-
-static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
-{
-	int j;
-	struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
-
-	for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
-		memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
-		rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
-			IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
-			IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
-			IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
-			IB_SA_GUIDINFO_REC_GID7;
-		rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
-		set_administratively_guid_record(dev, port, j, &rec_det);
-	}
+	set_required_record(dev, port, rec, record_index);
+out:
+	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+	return ret;
 }
 
 static void alias_guid_work(struct work_struct *work)
@@ -559,9 +770,7 @@ static void alias_guid_work(struct work_struct *work)
 		goto out;
 	}
 
-	set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
-		     &rec->rec_det);
-
+	set_guid_rec(&dev->ib_dev, rec);
 out:
 	kfree(rec);
 }
@@ -576,6 +785,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
 	if (!dev->sriov.is_going_down) {
+		/* If there is pending one should cancell then run, otherwise
+		  * won't run till previous one is ended as same work
+		  * struct is used.
+		  */
+		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
+				    alias_guid_work);
 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
 			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
 	}
@@ -623,7 +838,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
 {
 	char alias_wq_name[15];
 	int ret = 0;
-	int i, j, k;
+	int i, j;
 	union ib_gid gid;
 
 	if (!mlx4_is_master(dev->dev))
@@ -647,37 +862,29 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
 	for (i = 0 ; i < dev->num_ports; i++) {
 		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
 		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
-		/*Check if the SM doesn't need to assign the GUIDs*/
+		dev->sriov.alias_guid.ports_guid[i].state_flags |=
+				GUID_STATE_NEED_PORT_INIT;
 		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
-			if (mlx4_ib_sm_guid_assign) {
-				dev->sriov.alias_guid.ports_guid[i].
-					all_rec_per_port[j].
-					ownership = MLX4_GUID_DRIVER_ASSIGN;
-				continue;
-			}
-			dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
-					ownership = MLX4_GUID_NONE_ASSIGN;
-			/*mark each val as it was deleted,
-			  till the sysAdmin will give it valid val*/
-			for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
-				*(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
-					all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
-						cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
-			}
+			/* mark each val as it was deleted */
+			memset(dev->sriov.alias_guid.ports_guid[i].
+				all_rec_per_port[j].all_recs, 0xFF,
+				sizeof(dev->sriov.alias_guid.ports_guid[i].
+				all_rec_per_port[j].all_recs));
 		}
 		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
 		/*prepare the records, set them to be allocated by sm*/
+		if (mlx4_ib_sm_guid_assign)
+			for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
+				mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
 		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
 			invalidate_guid_record(dev, i + 1, j);
 
 		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
 		dev->sriov.alias_guid.ports_guid[i].port  = i;
-		if (mlx4_ib_sm_guid_assign)
-			set_all_slaves_guids(dev, i);
 
 		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
 		dev->sriov.alias_guid.ports_guid[i].wq =
-			create_singlethread_workqueue(alias_wq_name);
+			alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
 		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
 			ret = -ENOMEM;
 			goto err_thread;
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c
index 4a89c545c594..4097369af293 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_cm.c
@@ -242,8 +242,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
 static struct id_map_entry *
 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
 {
-	int ret, id;
-	static int next_id;
+	int ret;
 	struct id_map_entry *ent;
 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
 
@@ -259,25 +258,22 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
 	ent->dev = to_mdev(ibdev);
 	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
 
-	do {
-		spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
-		ret = idr_get_new_above(&sriov->pv_id_table, ent,
-					next_id, &id);
-		if (!ret) {
-			next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
-			ent->pv_cm_id = (u32)id;
-			sl_id_map_add(ibdev, ent);
-		}
+	idr_preload(GFP_KERNEL);
+	spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
 
-		spin_unlock(&sriov->id_map_lock);
-	} while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
-	/*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
-	if (!ret) {
-		spin_lock(&sriov->id_map_lock);
+	ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT);
+	if (ret >= 0) {
+		ent->pv_cm_id = (u32)ret;
+		sl_id_map_add(ibdev, ent);
 		list_add_tail(&ent->list, &sriov->cm_list);
-		spin_unlock(&sriov->id_map_lock);
-		return ent;
 	}
+
+	spin_unlock(&sriov->id_map_lock);
+	idr_preload_end();
+
+	if (ret >= 0)
+		return ent;
+
 	/*error flow*/
 	kfree(ent);
 	mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
@@ -327,8 +323,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
 
 	if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
 			mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
-			mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
-			mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+			mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
 		sl_cm_id = get_local_comm_id(mad);
 		id = id_map_alloc(ibdev, slave_id, sl_cm_id);
 		if (IS_ERR(id)) {
@@ -361,7 +356,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
 }
 
 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
-			     struct ib_mad *mad, int is_eth)
+			     struct ib_mad *mad)
 {
 	u32 pv_cm_id;
 	struct id_map_entry *id;
@@ -370,7 +365,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
 	    mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
 		union ib_gid gid;
 
-		if (is_eth)
+		if (!slave)
 			return 0;
 
 		gid = gid_from_req_msg(ibdev, mad);
@@ -391,7 +386,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
 		return -ENOENT;
 	}
 
-	if (!is_eth)
+	if (slave)
 		*slave = id->slave_id;
 	set_remote_comm_id(mad, id->sl_cm_id);
 
@@ -411,7 +406,6 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
 	INIT_LIST_HEAD(&dev->sriov.cm_list);
 	dev->sriov.sl_id_map = RB_ROOT;
 	idr_init(&dev->sriov.pv_id_table);
-	idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
 }
 
 /* slave = -1 ==> all slaves */
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_cq.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_cq.c
index cec37169f3d5..6ebca11b0451 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_cq.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_cq.c
@@ -34,14 +34,11 @@
 #include <dev/mlx4/cq.h>
 #include <dev/mlx4/qp.h>
 #include <dev/mlx4/srq.h>
+#include <dev/mlx4/driver.h>
 #include <linux/slab.h>
 
 #include "mlx4_ib.h"
-#include "user.h"
-
-/* Which firmware version adds support for Resize CQ */
-#define MLX4_FW_VER_RESIZE_CQ  mlx4_fw_ver(2, 5, 0)
-#define MLX4_FW_VER_IGNORE_OVERRUN_CQ mlx4_fw_ver(2, 7, 8200)
+#include <rdma/mlx4-abi.h>
 
 static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
 {
@@ -93,33 +90,12 @@ static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
 	return get_sw_cqe(cq, cq->mcq.cons_index);
 }
 
-int mlx4_ib_modify_cq(struct ib_cq *cq,
-		      struct ib_cq_attr *cq_attr,
-		      int cq_attr_mask)
+int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 {
-	int err = 0;
 	struct mlx4_ib_cq *mcq = to_mcq(cq);
 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
 
-	if (cq_attr_mask & IB_CQ_CAP_FLAGS) {
-		if (cq_attr->cq_cap_flags & IB_CQ_TIMESTAMP)
-			return -ENOTSUPP;
-
-		if (cq_attr->cq_cap_flags & IB_CQ_IGNORE_OVERRUN) {
-			if (dev->dev->caps.cq_flags & MLX4_DEV_CAP_CQ_FLAG_IO)
-				err = mlx4_cq_ignore_overrun(dev->dev, &mcq->mcq);
-			else
-				err = -ENOSYS;
-		}
-	}
-
-	if (!err)
-		if (cq_attr_mask & IB_CQ_MODERATION)
-			err = mlx4_cq_modify(dev->dev, &mcq->mcq,
-					cq_attr->moderation.cq_count,
-					cq_attr->moderation.cq_period);
-
-	return err;
+	return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
 }
 
 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
@@ -127,7 +103,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
 	int err;
 
 	err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
-			     PAGE_SIZE * 2, &buf->buf);
+			     PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
 
 	if (err)
 		goto out;
@@ -138,7 +114,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
 	if (err)
 		goto err_buf;
 
-	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
+	err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
 	if (err)
 		goto err_mtt;
 
@@ -165,18 +141,14 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
 {
 	int err;
 	int cqe_size = dev->dev->caps.cqe_size;
-	int shift;
-	int n;
 
 	*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
 			    IB_ACCESS_LOCAL_WRITE, 1);
 	if (IS_ERR(*umem))
 		return PTR_ERR(*umem);
 
-	n = ib_umem_page_count(*umem);
-	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
-	err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
-
+	err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
+			    ilog2((*umem)->page_size), &buf->mtt);
 	if (err)
 		goto err_buf;
 
@@ -195,20 +167,18 @@ err_buf:
 	return err;
 }
 
-/* we don't support system timestamping */
-#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_TIMESTAMP
-
+#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
-				struct ib_cq_init_attr *attr,
+				const struct ib_cq_init_attr *attr,
 				struct ib_ucontext *context,
 				struct ib_udata *udata)
 {
+	int entries = attr->cqe;
+	int vector = attr->comp_vector;
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	struct mlx4_ib_cq *cq;
 	struct mlx4_uar *uar;
 	int err;
-	int entries = attr->cqe;
-	int vector = attr->comp_vector;
 
 	if (entries < 1 || entries > dev->dev->caps.max_cqes)
 		return ERR_PTR(-EINVAL);
@@ -216,7 +186,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 	if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
 		return ERR_PTR(-EINVAL);
 
-	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	cq = kmalloc(sizeof *cq, GFP_KERNEL);
 	if (!cq)
 		return ERR_PTR(-ENOMEM);
 
@@ -227,6 +197,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 	cq->resize_buf = NULL;
 	cq->resize_umem = NULL;
 	cq->create_flags = attr->flags;
+	INIT_LIST_HEAD(&cq->send_qp_list);
+	INIT_LIST_HEAD(&cq->recv_qp_list);
 
 	if (context) {
 		struct mlx4_ib_create_cq ucmd;
@@ -248,7 +220,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 
 		uar = &to_mucontext(context)->uar;
 	} else {
-		err = mlx4_db_alloc(dev->dev, &cq->db, 1);
+		err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
 		if (err)
 			goto err_cq;
 
@@ -269,21 +241,24 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
 
 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
 			    cq->db.dma, &cq->mcq, vector, 0,
-			    !!(cq->create_flags & IB_CQ_TIMESTAMP));
+			    !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
 	if (err)
 		goto err_dbmap;
 
-	cq->mcq.comp  = mlx4_ib_cq_comp;
+	cq->mcq.comp = mlx4_ib_cq_comp;
 	cq->mcq.event = mlx4_ib_cq_event;
 
 	if (context)
 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
 			err = -EFAULT;
-			goto err_dbmap;
+			goto err_cq_free;
 		}
 
 	return &cq->ibcq;
 
+err_cq_free:
+	mlx4_cq_free(dev->dev, &cq->mcq);
+
 err_dbmap:
 	if (context)
 		mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
@@ -314,7 +289,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
 	if (cq->resize_buf)
 		return -EBUSY;
 
-	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
 	if (!cq->resize_buf)
 		return -ENOMEM;
 
@@ -342,7 +317,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
 	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
 		return -EFAULT;
 
-	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+	cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
 	if (!cq->resize_buf)
 		return -ENOMEM;
 
@@ -370,17 +345,15 @@ static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
 	return i - cq->mcq.cons_index;
 }
 
-static int mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
+static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
 {
 	struct mlx4_cqe *cqe, *new_cqe;
 	int i;
 	int cqe_size = cq->buf.entry_size;
 	int cqe_inc = cqe_size == 64 ? 1 : 0;
-	struct mlx4_cqe *start_cqe;
 
 	i = cq->mcq.cons_index;
 	cqe = get_cqe(cq, i & cq->ibcq.cqe);
-	start_cqe = cqe;
 	cqe += cqe_inc;
 
 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
@@ -392,15 +365,9 @@ static int mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
 		new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
 			(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
 		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
-		if (cqe == start_cqe) {
-			pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", cq->mcq.cqn);
-			return -ENOMEM;
-		}
 		cqe += cqe_inc;
-
 	}
 	++cq->mcq.cons_index;
-	return 0;
 }
 
 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
@@ -411,9 +378,6 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 	int outst_cqe;
 	int err;
 
-	if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ)
-		return -ENOSYS;
-
 	mutex_lock(&cq->resize_mutex);
 	if (entries < 1 || entries > dev->dev->caps.max_cqes) {
 		err = -EINVAL;
@@ -439,7 +403,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 		/* Can't be smaller than the number of outstanding CQEs */
 		outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
 		if (entries < outst_cqe + 1) {
-			err = 0;
+			err = -EINVAL;
 			goto out;
 		}
 
@@ -470,7 +434,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
 
 		spin_lock_irq(&cq->lock);
 		if (cq->resize_buf) {
-			err = mlx4_ib_cq_resize_copy_cqes(cq);
+			mlx4_ib_cq_resize_copy_cqes(cq);
 			tmp_buf = cq->buf;
 			tmp_cqe = cq->ibcq.cqe;
 			cq->buf      = cq->resize_buf->buf;
@@ -507,17 +471,6 @@ out:
 	return err;
 }
 
-int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq)
-{
-	struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
-	struct mlx4_ib_cq *cq = to_mcq(ibcq);
-
-	if (dev->dev->caps.fw_ver < MLX4_FW_VER_IGNORE_OVERRUN_CQ)
-		return -ENOSYS;
-
-	return mlx4_cq_ignore_overrun(dev->dev, &cq->mcq);
-}
-
 int mlx4_ib_destroy_cq(struct ib_cq *cq)
 {
 	struct mlx4_ib_dev *dev = to_mdev(cq->device);
@@ -624,8 +577,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
 		checksum == cpu_to_be16(0xffff);
 }
 
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
-			   unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+			    unsigned tail, struct mlx4_cqe *cqe, int is_eth)
 {
 	struct mlx4_ib_proxy_sqp_hdr *hdr;
 
@@ -643,12 +596,60 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
 		wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
 		memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
 		memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
+		wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
 	} else {
 		wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
 		wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
 	}
+}
 
-	return 0;
+static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
+			       struct ib_wc *wc, int *npolled, int is_send)
+{
+	struct mlx4_ib_wq *wq;
+	unsigned cur;
+	int i;
+
+	wq = is_send ? &qp->sq : &qp->rq;
+	cur = wq->head - wq->tail;
+
+	if (cur == 0)
+		return;
+
+	for (i = 0;  i < cur && *npolled < num_entries; i++) {
+		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+		wc->status = IB_WC_WR_FLUSH_ERR;
+		wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
+		wq->tail++;
+		(*npolled)++;
+		wc->qp = &qp->ibqp;
+		wc++;
+	}
+}
+
+static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
+				 struct ib_wc *wc, int *npolled)
+{
+	struct mlx4_ib_qp *qp;
+
+	*npolled = 0;
+	/* Find uncompleted WQEs belonging to that cq and retrun
+	 * simulated FLUSH_ERR completions
+	 */
+	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
+		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
+		if (*npolled >= num_entries)
+			goto out;
+	}
+
+	list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
+		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
+		if (*npolled >= num_entries)
+			goto out;
+	}
+
+out:
+	return;
 }
 
 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
@@ -662,11 +663,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
 	struct mlx4_srq *msrq = NULL;
 	int is_send;
 	int is_error;
+	int is_eth;
 	u32 g_mlpath_rqpn;
 	u16 wqe_ctr;
 	unsigned tail = 0;
-	int timestamp_en = !!(cq->create_flags & IB_CQ_TIMESTAMP);
-
 
 repoll:
 	cqe = next_cqe_sw(cq);
@@ -688,12 +688,6 @@ repoll:
 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
 		MLX4_CQE_OPCODE_ERROR;
 
-	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
-		     is_send)) {
-		pr_warn("Completion for NOP opcode detected!\n");
-		return -EINVAL;
-	}
-
 	/* Resize CQ in progress */
 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
 		if (cq->resize_buf) {
@@ -719,12 +713,6 @@ repoll:
 		 */
 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
 				       be32_to_cpu(cqe->vlan_my_qpn));
-		if (unlikely(!mqp)) {
-			pr_warn("CQ %06x with entry for unknown QPN %06x\n",
-			       cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
-			return -EINVAL;
-		}
-
 		*cur_qp = to_mibqp(mqp);
 	}
 
@@ -737,11 +725,6 @@ repoll:
 		/* SRQ is also in the radix tree */
 		msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
 				       srq_num);
-		if (unlikely(!msrq)) {
-			pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
-				cq->mcq.cqn, srq_num);
-			return -EINVAL;
-		}
 	}
 
 	if (is_send) {
@@ -781,7 +764,6 @@ repoll:
 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
 		case MLX4_OPCODE_RDMA_WRITE_IMM:
 			wc->wc_flags |= IB_WC_WITH_IMM;
-			/* fall through */
 		case MLX4_OPCODE_RDMA_WRITE:
 			wc->opcode    = IB_WC_RDMA_WRITE;
 			break;
@@ -811,14 +793,11 @@ repoll:
 			wc->opcode    = IB_WC_MASKED_FETCH_ADD;
 			wc->byte_len  = 8;
 			break;
-		case MLX4_OPCODE_BIND_MW:
-			wc->opcode    = IB_WC_BIND_MW;
-			break;
 		case MLX4_OPCODE_LSO:
 			wc->opcode    = IB_WC_LSO;
 			break;
 		case MLX4_OPCODE_FMR:
-			wc->opcode    = IB_WC_FAST_REG_MR;
+			wc->opcode    = IB_WC_REG_MR;
 			break;
 		case MLX4_OPCODE_LOCAL_INVAL:
 			wc->opcode    = IB_WC_LOCAL_INV;
@@ -849,35 +828,20 @@ repoll:
 			break;
 		}
 
+		is_eth = (rdma_port_get_link_layer(wc->qp->device,
+						  (*cur_qp)->port) ==
+			  IB_LINK_LAYER_ETHERNET);
 		if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
 			if ((*cur_qp)->mlx4_ib_qp_type &
 			    (MLX4_IB_QPT_PROXY_SMI_OWNER |
-			     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
-				return use_tunnel_data
-					(*cur_qp, cq, wc, tail, cqe,
-					 rdma_port_get_link_layer
-						(wc->qp->device,
-						 (*cur_qp)->port) ==
-						IB_LINK_LAYER_ETHERNET);
+			     MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+				use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+						is_eth);
+				return 0;
+			}
 		}
 
-		if (timestamp_en) {
-			/* currently, only CQ_CREATE_WITH_TIMESTAMPING_RAW is
-			 * supported. CQ_CREATE_WITH_TIMESTAMPING_SYS isn't
-			 * supported */
-			if (cq->create_flags & IB_CQ_TIMESTAMP_TO_SYS_TIME) {
-				wc->ts.timestamp = 0;
-			} else {
-				wc->ts.timestamp =
-					((u64)(be32_to_cpu(cqe->timestamp_16_47)
-					       + !cqe->timestamp_0_15) << 16)
-					| be16_to_cpu(cqe->timestamp_0_15);
-				wc->wc_flags |= IB_WC_WITH_TIMESTAMP;
-			}
-		} else {
-			wc->wc_flags |= IB_WC_WITH_SLID;
-			wc->slid	   = be16_to_cpu(cqe->rlid);
-		}
+		wc->slid	   = be16_to_cpu(cqe->rlid);
 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
 		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -885,27 +849,21 @@ repoll:
 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
 		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,
 					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
-		if (!timestamp_en) {
-			if (rdma_port_get_link_layer(wc->qp->device,
-						     (*cur_qp)->port) ==
-						      IB_LINK_LAYER_ETHERNET)
-				wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
-			else
-				wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
-			wc->wc_flags	  |= IB_WC_WITH_SL;
-		}
-		if ((be32_to_cpu(cqe->vlan_my_qpn) &
-		    MLX4_CQE_VLAN_PRESENT_MASK) && !timestamp_en) {
-			wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
-				MLX4_CQE_VID_MASK;
-			wc->wc_flags	  |= IB_WC_WITH_VLAN;
+		if (is_eth) {
+			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
+			if (be32_to_cpu(cqe->vlan_my_qpn) &
+					MLX4_CQE_CVLAN_PRESENT_MASK) {
+				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
+					MLX4_CQE_VID_MASK;
+			} else {
+				wc->vlan_id = 0xffff;
+			}
+			memcpy(wc->smac, cqe->smac, ETH_ALEN);
+			wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
 		} else {
+			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
 			wc->vlan_id = 0xffff;
 		}
-		if (!timestamp_en) {
-			memcpy(wc->smac, cqe->smac, 6);
-			wc->wc_flags |= IB_WC_WITH_SMAC;
-		}
 	}
 
 	return 0;
@@ -917,24 +875,25 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 	struct mlx4_ib_qp *cur_qp = NULL;
 	unsigned long flags;
 	int npolled;
-	int err = 0;
+	struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
 
 	spin_lock_irqsave(&cq->lock, flags);
+	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+		mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+		goto out;
+	}
 
 	for (npolled = 0; npolled < num_entries; ++npolled) {
-		err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
-		if (err)
+		if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
 			break;
 	}
 
 	mlx4_cq_set_ci(&cq->mcq);
 
+out:
 	spin_unlock_irqrestore(&cq->lock, flags);
 
-	if (err == 0 || err == -EAGAIN)
-		return npolled;
-	else
-		return err;
+	return npolled;
 }
 
 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
@@ -942,7 +901,7 @@ int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 	mlx4_cq_arm(&to_mcq(ibcq)->mcq,
 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
 		    MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
-		    to_mdev(ibcq->device)->priv_uar.map,
+		    to_mdev(ibcq->device)->uar_map,
 		    MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
 
 	return 0;
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_exp.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_exp.c
deleted file mode 100644
index 8a706e377adb..000000000000
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_exp.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "mlx4_ib.h"
-#include "mlx4_exp.h"
-#include <dev/mlx4/qp.h>
-
-int mlx4_ib_exp_query_device(struct ib_device *ibdev,
-			     struct ib_exp_device_attr *props)
-{
-	struct ib_device_attr *base = &props->base;
-	struct mlx4_ib_dev *dev = to_mdev(ibdev);
-	int ret = mlx4_ib_query_device(ibdev, &props->base);
-
-	props->exp_comp_mask = IB_EXP_DEVICE_ATTR_INLINE_RECV_SZ;
-	props->inline_recv_sz = dev->dev->caps.max_rq_sg * sizeof(struct mlx4_wqe_data_seg);
-	props->device_cap_flags2 = 0;
-
-	/* move RSS device cap from device_cap to device_cap_flags2 */
-	if (base->device_cap_flags & IB_DEVICE_QPG) {
-		props->device_cap_flags2 |= IB_EXP_DEVICE_QPG;
-		if (base->device_cap_flags & IB_DEVICE_UD_RSS)
-			props->device_cap_flags2 |= IB_EXP_DEVICE_UD_RSS;
-	}
-	base->device_cap_flags &= ~(IB_DEVICE_QPG |
-				    IB_DEVICE_UD_RSS |
-				    IB_DEVICE_UD_TSS);
-
-	if (base->max_rss_tbl_sz > 0) {
-		props->max_rss_tbl_sz = base->max_rss_tbl_sz;
-		props->exp_comp_mask |= IB_EXP_DEVICE_ATTR_RSS_TBL_SZ;
-	} else {
-		props->max_rss_tbl_sz = 0;
-		props->exp_comp_mask &= ~IB_EXP_DEVICE_ATTR_RSS_TBL_SZ;
-	}
-
-	if (props->device_cap_flags2)
-		props->exp_comp_mask |= IB_EXP_DEVICE_ATTR_CAP_FLAGS2;
-
-	return ret;
-}
-
-/*
- * Experimental functions
- */
-struct ib_qp *mlx4_ib_exp_create_qp(struct ib_pd *pd,
-				    struct ib_exp_qp_init_attr *init_attr,
-				    struct ib_udata *udata)
-{
-	int rwqe_size;
-	struct ib_qp *qp;
-	struct mlx4_ib_qp *mqp;
-	int use_inlr;
-	struct mlx4_ib_dev *dev;
-
-	if (init_attr->max_inl_recv && !udata)
-		return ERR_PTR(-EINVAL);
-
-	use_inlr = mlx4_ib_qp_has_rq((struct ib_qp_init_attr *)init_attr) &&
-		   init_attr->max_inl_recv && pd;
-	if (use_inlr) {
-		rwqe_size = roundup_pow_of_two(max(1U, init_attr->cap.max_recv_sge)) *
-					       sizeof(struct mlx4_wqe_data_seg);
-		if (rwqe_size < init_attr->max_inl_recv) {
-			dev = to_mdev(pd->device);
-			init_attr->max_inl_recv = min(init_attr->max_inl_recv,
-						      (u32)(dev->dev->caps.max_rq_sg *
-						      sizeof(struct mlx4_wqe_data_seg)));
-			init_attr->cap.max_recv_sge = roundup_pow_of_two(init_attr->max_inl_recv) /
-						      sizeof(struct mlx4_wqe_data_seg);
-		}
-	} else {
-		init_attr->max_inl_recv = 0;
-	}
-	qp = mlx4_ib_create_qp(pd, (struct ib_qp_init_attr *)init_attr, udata);
-	if (IS_ERR(qp))
-		return qp;
-
-	if (use_inlr) {
-		mqp = to_mqp(qp);
-		mqp->max_inlr_data = 1 << mqp->rq.wqe_shift;
-		init_attr->max_inl_recv = mqp->max_inlr_data;
-	}
-
-	return qp;
-}
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
index e86764b68f40..183ccecd2171 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
@@ -37,6 +37,7 @@
 
 #include <linux/random.h>
 #include <dev/mlx4/cmd.h>
+#include <dev/mlx4/driver.h>
 #include <linux/gfp.h>
 #include <rdma/ib_pma.h>
 
@@ -103,8 +104,9 @@ __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
 }
 
 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
-		 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
-		 void *in_mad, void *response_mad)
+		 int port, const struct ib_wc *in_wc,
+		 const struct ib_grh *in_grh,
+		 const void *in_mad, void *response_mad)
 {
 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
 	void *inbox;
@@ -212,7 +214,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
  * Snoop SM MADs for port info, GUID info, and  P_Key table sets, so we can
  * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
  */
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
+static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
 		      u16 prev_lid)
 {
 	struct ib_port_info *pinfo;
@@ -228,6 +230,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
 	    mad->mad_hdr.method == IB_MGMT_METHOD_SET)
 		switch (mad->mad_hdr.attr_id) {
 		case IB_SMP_ATTR_PORT_INFO:
+			if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+				return;
 			pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
 			lid = be16_to_cpu(pinfo->lid);
 
@@ -243,6 +247,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
 			break;
 
 		case IB_SMP_ATTR_PKEY_TABLE:
+			if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+				return;
 			if (!mlx4_is_mfunc(dev->dev)) {
 				mlx4_ib_dispatch_event(dev, port_num,
 						       IB_EVENT_PKEY_CHANGE);
@@ -279,6 +285,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
 			break;
 
 		case IB_SMP_ATTR_GUID_INFO:
+			if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+				return;
 			/* paravirtualized master's guid is guid 0 -- does not change */
 			if (!mlx4_is_master(dev->dev))
 				mlx4_ib_dispatch_event(dev, port_num,
@@ -294,6 +302,25 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
 			}
 			break;
 
+		case IB_SMP_ATTR_SL_TO_VL_TABLE:
+			/* cache sl to vl mapping changes for use in
+			 * filling QP1 LRH VL field when sending packets
+			 */
+			if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
+			    dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
+				return;
+			if (!mlx4_is_slave(dev->dev)) {
+				union sl2vl_tbl_to_u64 sl2vl64;
+				int jj;
+
+				for (jj = 0; jj < 8; jj++) {
+					sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
+					pr_debug("sl2vl[%d] = %02x\n", jj, sl2vl64.sl8[jj]);
+				}
+				atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
+			}
+			break;
+
 		default:
 			break;
 		}
@@ -343,12 +370,13 @@ static void node_desc_override(struct ib_device *dev,
 	    mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
 	    mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
 		spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
-		memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+		memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+		       IB_DEVICE_NODE_DESC_MAX);
 		spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
 	}
 }
 
-static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
+static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
 {
 	int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
 	struct ib_mad_send_buf *send_buf;
@@ -358,7 +386,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
 
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
-					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+					      IB_MGMT_MAD_DATA, GFP_ATOMIC,
+					      IB_MGMT_BASE_VERSION);
 		if (IS_ERR(send_buf))
 			return;
 		/*
@@ -455,7 +484,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 			  struct ib_grh *grh, struct ib_mad *mad)
 {
 	struct ib_sge list;
-	struct ib_send_wr wr, *bad_wr;
+	struct ib_ud_wr wr;
+	struct ib_send_wr *bad_wr;
 	struct mlx4_ib_demux_pv_ctx *tun_ctx;
 	struct mlx4_ib_demux_pv_qp *tun_qp;
 	struct mlx4_rcv_tunnel_mad *tun_mad;
@@ -478,10 +508,6 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 	if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
 		return -EAGAIN;
 
-	/* QP0 forwarding only for Dom0 */
-	if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
-		return -EINVAL;
-
 	if (!dest_qpt)
 		tun_qp = &tun_ctx->qp[0];
 	else
@@ -527,7 +553,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 		tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
 	spin_unlock(&tun_qp->tx_lock);
 	if (ret)
-		goto out;
+		goto end;
 
 	tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
 	if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -551,14 +577,20 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 	if (is_eth) {
 		u16 vlan = 0;
 		if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
-		    NULL)) {
-			if (vlan != wc->vlan_id)
-				/* VST and default vlan is not the packet vlan drop the
-				 * packet*/
+						NULL)) {
+			/* VST mode */
+			if (vlan != wc->vlan_id) {
+				/* Packet vlan is not the VST-assigned vlan.
+				 * Drop the packet.
+				 */
+				ret = -EPERM;
 				goto out;
-			 else
-				/* VST , remove hide the vlan from the VF */
-				vlan = 0;
+			} else {
+				/* Remove the vlan tag before forwarding
+				 * the packet to the VF.
+				 */
+				vlan = 0xffff;
+			}
 		} else {
 			vlan = wc->vlan_id;
 		}
@@ -578,23 +610,29 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 
 	list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
 	list.length = sizeof (struct mlx4_rcv_tunnel_mad);
-	list.lkey = tun_ctx->mr->lkey;
+	list.lkey = tun_ctx->pd->local_dma_lkey;
 
-	wr.wr.ud.ah = ah;
-	wr.wr.ud.port_num = port;
-	wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
-	wr.wr.ud.remote_qpn = dqpn;
-	wr.next = NULL;
-	wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
-	wr.sg_list = &list;
-	wr.num_sge = 1;
-	wr.opcode = IB_WR_SEND;
-	wr.send_flags = IB_SEND_SIGNALED;
+	wr.ah = ah;
+	wr.port_num = port;
+	wr.remote_qkey = IB_QP_SET_QKEY;
+	wr.remote_qpn = dqpn;
+	wr.wr.next = NULL;
+	wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
+	wr.wr.sg_list = &list;
+	wr.wr.num_sge = 1;
+	wr.wr.opcode = IB_WR_SEND;
+	wr.wr.send_flags = IB_SEND_SIGNALED;
 
-	ret = ib_post_send(src_qp, &wr, &bad_wr);
-out:
-	if (ret)
-		ib_destroy_ah(ah);
+	ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
+	if (!ret)
+		return 0;
+ out:
+	spin_lock(&tun_qp->tx_lock);
+	tun_qp->tx_ix_tail++;
+	spin_unlock(&tun_qp->tx_lock);
+	tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+	ib_destroy_ah(ah);
 	return ret;
 }
 
@@ -603,8 +641,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 			struct ib_mad *mad)
 {
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
-	int err;
-	int slave;
+	int err, other_port;
+	int slave = -1;
 	u8 *slave_id;
 	int is_eth = 0;
 
@@ -622,7 +660,17 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 			mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
 			return -EINVAL;
 		}
-		if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
+		err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
+		if (err && mlx4_is_mf_bonded(dev->dev)) {
+			other_port = (port == 1) ? 2 : 1;
+			err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
+			if (!err) {
+				port = other_port;
+				pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
+					 slave, grh->dgid.raw, port, other_port);
+			}
+		}
+		if (err) {
 			mlx4_ib_warn(ibdev, "failed matching grh\n");
 			return -ENOENT;
 		}
@@ -632,7 +680,7 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 			return -ENOENT;
 		}
 
-		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
+		if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
 			return 0;
 
 		err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
@@ -663,13 +711,28 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 	}
 	/* Class-specific handling */
 	switch (mad->mad_hdr.mgmt_class) {
+	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+		/* 255 indicates the dom0 */
+		if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
+			if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
+				return -EPERM;
+			/* for a VF. drop unsolicited MADs */
+			if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
+				mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
+					     slave, mad->mad_hdr.mgmt_class,
+					     mad->mad_hdr.method);
+				return -EINVAL;
+			}
+		}
+		break;
 	case IB_MGMT_CLASS_SUBN_ADM:
 		if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
 					     (struct ib_sa_mad *) mad))
 			return 0;
 		break;
 	case IB_MGMT_CLASS_CM:
-		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad, is_eth))
+		if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
 			return 0;
 		break;
 	case IB_MGMT_CLASS_DEVICE_MGMT:
@@ -699,8 +762,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 }
 
 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-			struct ib_wc *in_wc, struct ib_grh *in_grh,
-			struct ib_mad *in_mad, struct ib_mad *out_mad)
+			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+			const struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
 	u16 slid, prev_lid = 0;
 	int err;
@@ -770,8 +833,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 		return IB_MAD_RESULT_FAILURE;
 
 	if (!out_mad->mad_hdr.status) {
-		if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
-			smp_snoop(ibdev, port_num, in_mad, prev_lid);
+		smp_snoop(ibdev, port_num, in_mad, prev_lid);
 		/* slaves get node desc from FW */
 		if (!mlx4_is_slave(to_mdev(ibdev)->dev))
 			node_desc_override(ibdev, out_mad);
@@ -788,230 +850,91 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 }
 
-static void edit_counter_ext(struct mlx4_if_stat_extended *cnt, void *counters,
-			     __be16 attr_id)
+static void edit_counter(struct mlx4_counter *cnt, void *counters,
+			 __be16 attr_id)
 {
 	switch (attr_id) {
 	case IB_PMA_PORT_COUNTERS:
 	{
 		struct ib_pma_portcounters *pma_cnt =
-				(struct ib_pma_portcounters *)counters;
-		pma_cnt->port_xmit_data =
-			cpu_to_be32((be64_to_cpu(cnt->counters[0].
-						 IfTxUnicastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfTxMulticastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfTxBroadcastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfTxDroppedOctets)) >> 2);
-		pma_cnt->port_rcv_data  =
-			cpu_to_be32((be64_to_cpu(cnt->counters[0].
-						 IfRxUnicastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxMulticastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxBroadcastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxNoBufferOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxErrorOctets)) >> 2);
-		pma_cnt->port_xmit_packets =
-			cpu_to_be32(be64_to_cpu(cnt->counters[0].
-						IfTxUnicastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxMulticastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxBroadcastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxDroppedFrames));
-		pma_cnt->port_rcv_packets  =
-			cpu_to_be32(be64_to_cpu(cnt->counters[0].
-						IfRxUnicastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxMulticastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxBroadcastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxNoBufferFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxErrorFrames));
-		pma_cnt->port_rcv_errors = cpu_to_be32(be64_to_cpu(cnt->
-						       counters[0].
-						       IfRxErrorFrames));
-		break;
-	}
+			(struct ib_pma_portcounters *)counters;
 
-	case IB_PMA_PORT_COUNTERS_EXT:
-	{
-		struct ib_pma_portcounters_ext *pma_cnt_ext =
-				(struct ib_pma_portcounters_ext *)counters;
-
-		pma_cnt_ext->port_xmit_data =
-			cpu_to_be64((be64_to_cpu(cnt->counters[0].
-						 IfTxUnicastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfTxMulticastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfTxBroadcastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfTxDroppedOctets)) >> 2);
-		pma_cnt_ext->port_rcv_data  =
-			cpu_to_be64((be64_to_cpu(cnt->counters[0].
-						 IfRxUnicastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxMulticastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxBroadcastOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxNoBufferOctets) +
-				     be64_to_cpu(cnt->counters[0].
-						 IfRxErrorOctets)) >> 2);
-		pma_cnt_ext->port_xmit_packets =
-			cpu_to_be64(be64_to_cpu(cnt->counters[0].
-						IfTxUnicastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxMulticastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxBroadcastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxDroppedFrames));
-		pma_cnt_ext->port_rcv_packets  =
-			cpu_to_be64(be64_to_cpu(cnt->counters[0].
-						IfRxUnicastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxMulticastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxBroadcastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxNoBufferFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfRxErrorFrames));
-		pma_cnt_ext->port_unicast_xmit_packets = cnt->counters[0].
-						IfTxUnicastFrames;
-		pma_cnt_ext->port_unicast_rcv_packets = cnt->counters[0].
-						IfRxUnicastFrames;
-		pma_cnt_ext->port_multicast_xmit_packets =
-			cpu_to_be64(be64_to_cpu(cnt->counters[0].
-						IfTxMulticastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxBroadcastFrames));
-		pma_cnt_ext->port_multicast_rcv_packets =
-			cpu_to_be64(be64_to_cpu(cnt->counters[0].
-						IfTxMulticastFrames) +
-				    be64_to_cpu(cnt->counters[0].
-						IfTxBroadcastFrames));
-
-		break;
-	}
-
-	default:
-		pr_warn("Unsupported attr_id 0x%x\n", attr_id);
-		break;
-	}
-
-}
-
-static void edit_counter(struct mlx4_if_stat_basic *cnt, void *counters,
-			 __be16	attr_id)
-{
-	switch (attr_id) {
-	case IB_PMA_PORT_COUNTERS:
-	{
-		struct ib_pma_portcounters *pma_cnt =
-				(struct ib_pma_portcounters *) counters;
-		pma_cnt->port_xmit_data =
-			cpu_to_be32(be64_to_cpu(
-				    cnt->counters[0].IfTxOctets) >> 2);
-		pma_cnt->port_rcv_data  =
-			cpu_to_be32(be64_to_cpu(
-				    cnt->counters[0].IfRxOctets) >> 2);
-		pma_cnt->port_xmit_packets =
-			cpu_to_be32(be64_to_cpu(cnt->counters[0].IfTxFrames));
-		pma_cnt->port_rcv_packets  =
-			cpu_to_be32(be64_to_cpu(cnt->counters[0].IfRxFrames));
+		ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+				     (be64_to_cpu(cnt->tx_bytes) >> 2));
+		ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+				     (be64_to_cpu(cnt->rx_bytes) >> 2));
+		ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+				     be64_to_cpu(cnt->tx_frames));
+		ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+				     be64_to_cpu(cnt->rx_frames));
 		break;
 	}
 	case IB_PMA_PORT_COUNTERS_EXT:
 	{
 		struct ib_pma_portcounters_ext *pma_cnt_ext =
-				(struct ib_pma_portcounters_ext *) counters;
+			(struct ib_pma_portcounters_ext *)counters;
 
 		pma_cnt_ext->port_xmit_data =
-			cpu_to_be64((be64_to_cpu(cnt->counters[0].
-						 IfTxOctets) >> 2));
-		pma_cnt_ext->port_rcv_data  =
-			cpu_to_be64((be64_to_cpu(cnt->counters[0].
-						 IfRxOctets) >> 2));
-		pma_cnt_ext->port_xmit_packets = cnt->counters[0].IfTxFrames;
-		pma_cnt_ext->port_rcv_packets  = cnt->counters[0].IfRxFrames;
+			cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
+		pma_cnt_ext->port_rcv_data =
+			cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
+		pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
+		pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
 		break;
 	}
 	default:
-		pr_warn("Unsupported attr_id 0x%x\n", attr_id);
 		break;
 	}
 }
 
-int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
-		       union mlx4_counter *counter, u8 clear)
+static int iboe_process_mad_port_info(void *out_mad)
 {
-	struct mlx4_cmd_mailbox *mailbox;
-	int err;
-	u32 inmod = counter_index | ((clear & 1) << 31);
+	struct ib_class_port_info cpi = {};
 
-	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
-	if (IS_ERR(mailbox))
-		return IB_MAD_RESULT_FAILURE;
-
-	err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
-			   MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
-			   MLX4_CMD_NATIVE);
-	if (!err)
-		memcpy(counter, mailbox->buf, MLX4_IF_STAT_SZ(1));
-
-	mlx4_free_cmd_mailbox(dev->dev, mailbox);
-
-	return err;
+	cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+	memcpy(out_mad, &cpi, sizeof(cpi));
+	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 }
 
 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-			struct ib_wc *in_wc, struct ib_grh *in_grh,
-			struct ib_mad *in_mad, struct ib_mad *out_mad)
+			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+			const struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
+	struct mlx4_counter counter_stats;
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
-	int err;
-	u32 counter_index = dev->counters[port_num - 1].counter_index & 0xffff;
-	u8 mode;
-	char				counter_buf[MLX4_IF_STAT_SZ(1)] __aligned(8);
-	union  mlx4_counter		*counter = (union mlx4_counter *)
-						   counter_buf;
+	struct counter_index *tmp_counter;
+	int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
 
 	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
 		return -EINVAL;
 
-	/* in case of default counter IB shares the counter with ETH */
-	/* the state could be -EEXIST or -ENOSPC */
-	if (dev->counters[port_num - 1].status) {
-		memset(out_mad->data, 0, sizeof out_mad->data);
-		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-	} else {
-		if (mlx4_ib_query_if_stat(dev, counter_index, counter, 0))
-			return IB_MAD_RESULT_FAILURE;
+	if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
+		return iboe_process_mad_port_info((void *)(out_mad->data + 40));
 
-		memset(out_mad->data, 0, sizeof(out_mad->data));
-		mode = counter->control.cnt_mode & 0xFF;
-		err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-		switch (mode & 0xf) {
+	memset(&counter_stats, 0, sizeof(counter_stats));
+	mutex_lock(&dev->counters_table[port_num - 1].mutex);
+	list_for_each_entry(tmp_counter,
+			    &dev->counters_table[port_num - 1].counters_list,
+			    list) {
+		err = mlx4_get_counter_stats(dev->dev,
+					     tmp_counter->index,
+					     &counter_stats, 0);
+		if (err) {
+			err = IB_MAD_RESULT_FAILURE;
+			stats_avail = 0;
+			break;
+		}
+		stats_avail = 1;
+	}
+	mutex_unlock(&dev->counters_table[port_num - 1].mutex);
+	if (stats_avail) {
+		memset(out_mad->data, 0, sizeof out_mad->data);
+		switch (counter_stats.counter_mode & 0xf) {
 		case 0:
-			edit_counter((void *)counter,
+			edit_counter(&counter_stats,
 				     (void *)(out_mad->data + 40),
 				     in_mad->mad_hdr.attr_id);
-			break;
-		case 1:
-			edit_counter_ext((void *)counter,
-					 (void *)(out_mad->data + 40),
-					 in_mad->mad_hdr.attr_id);
+			err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 			break;
 		default:
 			err = IB_MAD_RESULT_FAILURE;
@@ -1022,19 +945,41 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 }
 
 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-			struct ib_wc *in_wc, struct ib_grh *in_grh,
-			struct ib_mad *in_mad, struct ib_mad *out_mad)
+			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+			const struct ib_mad_hdr *in, size_t in_mad_size,
+			struct ib_mad_hdr *out, size_t *out_mad_size,
+			u16 *out_mad_pkey_index)
 {
-	switch (rdma_port_get_link_layer(ibdev, port_num)) {
-	case IB_LINK_LAYER_INFINIBAND:
+	struct mlx4_ib_dev *dev = to_mdev(ibdev);
+	const struct ib_mad *in_mad = (const struct ib_mad *)in;
+	struct ib_mad *out_mad = (struct ib_mad *)out;
+	enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
+
+	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+			 *out_mad_size != sizeof(*out_mad)))
+		return IB_MAD_RESULT_FAILURE;
+
+	/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+	 * queries, should be called only by VFs and for that specific purpose
+	 */
+	if (link == IB_LINK_LAYER_INFINIBAND) {
+		if (mlx4_is_slave(dev->dev) &&
+		    (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+		     (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+		      in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+		      in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
+			return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+						in_grh, in_mad, out_mad);
+
 		return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
 				      in_grh, in_mad, out_mad);
-	case IB_LINK_LAYER_ETHERNET:
-		return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
-					  in_grh, in_mad, out_mad);
-	default:
-		return -EINVAL;
 	}
+
+	if (link == IB_LINK_LAYER_ETHERNET)
+		return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+					in_grh, in_mad, out_mad);
+
+	return -EINVAL;
 }
 
 static void send_handler(struct ib_mad_agent *agent,
@@ -1059,7 +1004,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
 				agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
 							      q ? IB_QPT_GSI : IB_QPT_SMI,
 							      NULL, 0, send_handler,
-							      NULL, NULL);
+							      NULL, NULL, 0);
 				if (IS_ERR(agent)) {
 					ret = PTR_ERR(agent);
 					goto err;
@@ -1106,7 +1051,7 @@ static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
 
 	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
 		mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
-					    MLX4_EQ_PORT_INFO_LID_CHANGE_MASK, 0, 0);
+					    MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
 }
 
 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
@@ -1118,9 +1063,26 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
 		if (!dev->sriov.is_going_down) {
 			mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
 			mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
-						    MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK, 0, 0);
+						    MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
 		}
 	}
+
+	/* Update the sl to vl table from inside client rereg
+	 * only if in secure-host mode (snooping is not possible)
+	 * and the sl-to-vl change event is not generated by FW.
+	 */
+	if (!mlx4_is_slave(dev->dev) &&
+	    dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+	    !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
+		if (mlx4_is_master(dev->dev))
+			/* already in work queue from mlx4_ib_event queueing
+			 * mlx4_handle_port_mgmt_change_event, which calls
+			 * this procedure. Therefore, call sl2vl_update directly.
+			 */
+			mlx4_ib_sl2vl_update(dev, port_num);
+		else
+			mlx4_sched_ib_sl2vl_update_work(dev, port_num);
+	}
 	mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
 }
 
@@ -1204,11 +1166,6 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 			u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
 			u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
 			update_sm_ah(dev, port, lid, sl);
-			mlx4_ib_dispatch_event(dev, port, IB_EVENT_SM_CHANGE);
-			if (mlx4_is_master(dev->dev))
-				mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
-							    changed_attr & MSTR_SM_CHANGE_MASK,
-							    lid, sl);
 		}
 
 		/* Check if it is a lid change event */
@@ -1217,11 +1174,32 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 
 		/* Generate GUID changed event */
 		if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+			if (mlx4_is_master(dev->dev)) {
+				union ib_gid gid;
+				int err = 0;
+
+				if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+					err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+				else
+					gid.global.subnet_prefix =
+						eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+				if (err) {
+					pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+						port, err);
+				} else {
+					pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+						 port,
+						 (long long)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+						 (long long)be64_to_cpu(gid.global.subnet_prefix));
+					atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+						     be64_to_cpu(gid.global.subnet_prefix));
+				}
+			}
 			mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
 			/*if master, notify all slaves*/
 			if (mlx4_is_master(dev->dev))
 				mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
-							    MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK, 0, 0);
+							    MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
 		}
 
 		if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
@@ -1244,6 +1222,23 @@ void handle_port_mgmt_change_event(struct work_struct *work)
 			handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
 		}
 		break;
+
+	case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
+		/* cache sl to vl mapping changes for use in
+		 * filling QP1 LRH VL field when sending packets
+		 */
+		if (!mlx4_is_slave(dev->dev)) {
+			union sl2vl_tbl_to_u64 sl2vl64;
+			int jj;
+
+			for (jj = 0; jj < 8; jj++) {
+				sl2vl64.sl8[jj] =
+					eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
+				pr_debug("sl2vl[%d] = %02x\n", jj, sl2vl64.sl8[jj]);
+			}
+			atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
+		}
+		break;
 	default:
 		pr_warn("Unsupported subtype 0x%x for "
 			"Port Management Change event\n", eqe->subtype);
@@ -1288,7 +1283,7 @@ static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
 
 	sg_list.addr = tun_qp->ring[index].map;
 	sg_list.length = size;
-	sg_list.lkey = ctx->mr->lkey;
+	sg_list.lkey = ctx->pd->local_dma_lkey;
 
 	recv_wr.next = NULL;
 	recv_wr.sg_list = &sg_list;
@@ -1327,10 +1322,11 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 			 enum ib_qp_type dest_qpt, u16 pkey_index,
 			 u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
-			 u8 *s_mac, struct ib_mad *mad)
+			 u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
 {
 	struct ib_sge list;
-	struct ib_send_wr wr, *bad_wr;
+	struct ib_ud_wr wr;
+	struct ib_send_wr *bad_wr;
 	struct mlx4_ib_demux_pv_ctx *sqp_ctx;
 	struct mlx4_ib_demux_pv_qp *sqp;
 	struct mlx4_mad_snd_buf *sqp_mad;
@@ -1349,10 +1345,6 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 	if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
 		return -EAGAIN;
 
-	/* QP0 forwarding only for Dom0 */
-	if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
-		return -EINVAL;
-
 	if (dest_qpt == IB_QPT_SMI) {
 		src_qpnum = 0;
 		sqp = &sqp_ctx->qp[0];
@@ -1403,58 +1395,53 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 
 	list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
 	list.length = sizeof (struct mlx4_mad_snd_buf);
-	list.lkey = sqp_ctx->mr->lkey;
+	list.lkey = sqp_ctx->pd->local_dma_lkey;
 
-	wr.wr.ud.ah = ah;
-	wr.wr.ud.port_num = port;
-	wr.wr.ud.pkey_index = wire_pkey_ix;
-	wr.wr.ud.remote_qkey = qkey;
-	wr.wr.ud.remote_qpn = remote_qpn;
-	wr.next = NULL;
-	wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
-	wr.sg_list = &list;
-	wr.num_sge = 1;
-	wr.opcode = IB_WR_SEND;
-	wr.send_flags = IB_SEND_SIGNALED;
+	wr.ah = ah;
+	wr.port_num = port;
+	wr.pkey_index = wire_pkey_ix;
+	wr.remote_qkey = qkey;
+	wr.remote_qpn = remote_qpn;
+	wr.wr.next = NULL;
+	wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
+	wr.wr.sg_list = &list;
+	wr.wr.num_sge = 1;
+	wr.wr.opcode = IB_WR_SEND;
+	wr.wr.send_flags = IB_SEND_SIGNALED;
 	if (s_mac)
 		memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
+	if (vlan_id < 0x1000)
+		vlan_id |= (attr->sl & 7) << 13;
+	to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
 
 
-	ret = ib_post_send(send_qp, &wr, &bad_wr);
+	ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+	if (!ret)
+		return 0;
+
+	spin_lock(&sqp->tx_lock);
+	sqp->tx_ix_tail++;
+	spin_unlock(&sqp->tx_lock);
+	sqp->tx_ring[wire_tx_ix].ah = NULL;
 out:
-	if (ret)
-		ib_destroy_ah(ah);
+	ib_destroy_ah(ah);
 	return ret;
 }
 
 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
 {
-	int gids;
-	int vfs;
-
 	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
 		return slave;
-
-	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
-	vfs = dev->dev->num_vfs;
-
-	if (slave == 0)
-		return 0;
-	if (slave <= gids % vfs)
-		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
-
-	return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1));
+	return mlx4_get_base_gid_ix(dev->dev, slave, port);
 }
 
-static int get_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
-			       struct ib_ah_attr *ah_attr)
+static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
+				    struct ib_ah_attr *ah_attr)
 {
-	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) {
+	if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
 		ah_attr->grh.sgid_index = slave;
-		return 0;
-	}
-	ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
-	return 0;
+	else
+		ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
 }
 
 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
@@ -1467,6 +1454,8 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
 	struct ib_ah_attr ah_attr;
 	u8 *slave_id;
 	int slave;
+	int port;
+	u16 vlan_id;
 
 	/* Get slave that sent this packet */
 	if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1482,11 +1471,6 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
 			     "belongs to another slave\n", wc->src_qp);
 		return;
 	}
-	if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
-		mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
-			     "non-master trying to send QP0 packets\n", wc->src_qp);
-		return;
-	}
 
 	/* Map transaction ID */
 	ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
@@ -1514,6 +1498,12 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
 
 	/* Class-specific handling */
 	switch (tunnel->mad.mad_hdr.mgmt_class) {
+	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
+		if (slave != mlx4_master_func_num(dev->dev) &&
+		    !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
+			return;
+		break;
 	case IB_MGMT_CLASS_SUBN_ADM:
 		if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
 			      (struct ib_sa_mad *) &tunnel->mad))
@@ -1542,15 +1532,22 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
 	 * stadard address handle by decoding the tunnelled mlx4_ah fields */
 	memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
 	ah.ibah.device = ctx->ib_dev;
+
+	port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
+	port = mlx4_slave_convert_port(dev->dev, slave, port);
+	if (port < 0)
+		return;
+	ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+
 	mlx4_ib_query_ah(&ah.ibah, &ah_attr);
 	if (ah_attr.ah_flags & IB_AH_GRH)
-		if (get_real_sgid_index(dev, slave, ctx->port, &ah_attr))
-			return;
+		fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
+
 	memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
-	ah_attr.vlan_id = tunnel->hdr.vlan;
+	vlan_id = be16_to_cpu(tunnel->hdr.vlan);
 	/* if slave have default vlan use it */
 	mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
-				    &ah_attr.vlan_id, &ah_attr.sl);
+				    &vlan_id, &ah_attr.sl);
 
 	mlx4_ib_send_to_wire(dev, slave, ctx->port,
 			     is_proxy_qp0(dev, wc->src_qp, slave) ?
@@ -1558,7 +1555,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
 			     be16_to_cpu(tunnel->hdr.pkey_index),
 			     be32_to_cpu(tunnel->hdr.remote_qpn),
 			     be32_to_cpu(tunnel->hdr.qkey),
-			     &ah_attr, wc->smac, &tunnel->mad);
+			     &ah_attr, wc->smac, vlan_id, &tunnel->mad);
 }
 
 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1603,9 +1600,7 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
 							tun_qp->ring[i].addr,
 							rx_buf_size,
 							DMA_FROM_DEVICE);
-		if (unlikely(ib_dma_mapping_error(ctx->ib_dev,
-						  tun_qp->ring[i].map))) {
-			mlx4_ib_warn(ctx->ib_dev, "ib_dma_map_single failed\n");
+		if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
 			kfree(tun_qp->ring[i].addr);
 			goto err;
 		}
@@ -1621,9 +1616,8 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
 					  tun_qp->tx_ring[i].buf.addr,
 					  tx_buf_size,
 					  DMA_TO_DEVICE);
-		if (unlikely(ib_dma_mapping_error(ctx->ib_dev,
-						  tun_qp->tx_ring[i].buf.map))) {
-			mlx4_ib_warn(ctx->ib_dev, "ib_dma_map_single failed\n");
+		if (ib_dma_mapping_error(ctx->ib_dev,
+					 tun_qp->tx_ring[i].buf.map)) {
 			kfree(tun_qp->tx_ring[i].buf.addr);
 			goto tx_err;
 		}
@@ -1783,7 +1777,8 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
 	qp_init_attr.init_attr.cap.max_recv_sge = 1;
 	if (create_tun) {
 		qp_init_attr.init_attr.qp_type = IB_QPT_UD;
-		qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_TUNNEL_QP;
+		qp_init_attr.init_attr.create_flags =
+		    (enum ib_qp_create_flags)MLX4_IB_SRIOV_TUNNEL_QP;
 		qp_init_attr.port = ctx->port;
 		qp_init_attr.slave = ctx->slave;
 		qp_init_attr.proxy_qp_type = qp_type;
@@ -1791,7 +1786,8 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
 			   IB_QP_QKEY | IB_QP_PORT;
 	} else {
 		qp_init_attr.init_attr.qp_type = qp_type;
-		qp_init_attr.init_attr.create_flags = (enum ib_qp_create_flags)MLX4_IB_SRIOV_SQP;
+		qp_init_attr.init_attr.create_flags =
+		    (enum ib_qp_create_flags)MLX4_IB_SRIOV_SQP;
 		qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
 	}
 	qp_init_attr.init_attr.port_num = ctx->port;
@@ -1811,7 +1807,8 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
 	ret = 0;
 	if (create_tun)
 		ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
-					      ctx->port, 0xFFFF, &attr.pkey_index);
+					      ctx->port, IB_DEFAULT_PKEY_FULL,
+					      &attr.pkey_index);
 	if (ret || !create_tun)
 		attr.pkey_index =
 			to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
@@ -1948,14 +1945,15 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
 			       int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
 {
 	int ret, cq_size;
+	struct ib_cq_init_attr cq_attr = {};
 
 	if (ctx->state != DEMUX_PV_STATE_DOWN)
 		return -EEXIST;
 
 	ctx->state = DEMUX_PV_STATE_STARTING;
-	/* have QP0 only on port owner, and only if link layer is IB */
-	if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
-	    rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
+	/* have QP0 only if link layer is IB */
+	if (rdma_port_get_link_layer(ibdev, ctx->port) ==
+	    IB_LINK_LAYER_INFINIBAND)
 		ctx->has_smi = 1;
 
 	if (ctx->has_smi) {
@@ -1976,34 +1974,28 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
 	if (ctx->has_smi)
 		cq_size *= 2;
 
+	cq_attr.cqe = cq_size;
 	ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
-			       NULL, ctx, cq_size, 0);
+			       NULL, ctx, &cq_attr);
 	if (IS_ERR(ctx->cq)) {
 		ret = PTR_ERR(ctx->cq);
 		pr_err("Couldn't create tunnel CQ (%d)\n", ret);
 		goto err_buf;
 	}
 
-	ctx->pd = ib_alloc_pd(ctx->ib_dev);
+	ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
 	if (IS_ERR(ctx->pd)) {
 		ret = PTR_ERR(ctx->pd);
 		pr_err("Couldn't create tunnel PD (%d)\n", ret);
 		goto err_cq;
 	}
 
-	ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
-	if (IS_ERR(ctx->mr)) {
-		ret = PTR_ERR(ctx->mr);
-		pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
-		goto err_pd;
-	}
-
 	if (ctx->has_smi) {
 		ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
 		if (ret) {
 			pr_err("Couldn't create %s QP0 (%d)\n",
 			       create_tun ? "tunnel for" : "",  ret);
-			goto err_mr;
+			goto err_pd;
 		}
 	}
 
@@ -2040,10 +2032,6 @@ err_qp0:
 		ib_destroy_qp(ctx->qp[0].qp);
 	ctx->qp[0].qp = NULL;
 
-err_mr:
-	ib_dereg_mr(ctx->mr);
-	ctx->mr = NULL;
-
 err_pd:
 	ib_dealloc_pd(ctx->pd);
 	ctx->pd = NULL;
@@ -2080,8 +2068,6 @@ static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
 		ib_destroy_qp(ctx->qp[1].qp);
 		ctx->qp[1].qp = NULL;
 		mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
-		ib_dereg_mr(ctx->mr);
-		ctx->mr = NULL;
 		ib_dealloc_pd(ctx->pd);
 		ctx->pd = NULL;
 		ib_destroy_cq(ctx->cq);
@@ -2146,7 +2132,16 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
 	ctx->port = port;
 	ctx->ib_dev = &dev->ib_dev;
 
-	for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+	for (i = 0;
+	     i < min(dev->dev->caps.sqp_demux,
+	     (u16)(dev->dev->persist->num_vfs + 1));
+	     i++) {
+		struct mlx4_active_ports actv_ports =
+			mlx4_get_active_ports(dev->dev, i);
+
+		if (!test_bit(port - 1, actv_ports.ports))
+			continue;
+
 		ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
 		if (ret) {
 			ret = -ENOMEM;
@@ -2161,7 +2156,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
 	}
 
 	snprintf(name, sizeof name, "mlx4_ibt%d", port);
-	ctx->wq = create_singlethread_workqueue(name);
+	ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
 	if (!ctx->wq) {
 		pr_err("Failed to create tunnelling WQ for port %d\n", port);
 		ret = -ENOMEM;
@@ -2169,7 +2164,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
 	}
 
 	snprintf(name, sizeof name, "mlx4_ibud%d", port);
-	ctx->ud_wq = create_singlethread_workqueue(name);
+	ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
 	if (!ctx->ud_wq) {
 		pr_err("Failed to create up/down WQ for port %d\n", port);
 		ret = -ENOMEM;
@@ -2205,8 +2200,6 @@ static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
 		ib_destroy_qp(sqp_ctx->qp[1].qp);
 		sqp_ctx->qp[1].qp = NULL;
 		mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
-		ib_dereg_mr(sqp_ctx->mr);
-		sqp_ctx->mr = NULL;
 		ib_dealloc_pd(sqp_ctx->pd);
 		sqp_ctx->pd = NULL;
 		ib_destroy_cq(sqp_ctx->cq);
@@ -2295,22 +2288,25 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
 		if (err)
 			goto demux_err;
 		dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+		atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+			     be64_to_cpu(gid.global.subnet_prefix));
 		err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
 				      &dev->sriov.sqps[i]);
 		if (err)
 			goto demux_err;
 		err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
 		if (err)
-			goto demux_err;
+			goto free_pv;
 	}
 	mlx4_ib_master_tunnels(dev, 1);
 	return 0;
 
+free_pv:
+	free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
 demux_err:
-	while (i > 0) {
+	while (--i >= 0) {
 		free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
 		mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
-		--i;
 	}
 	mlx4_ib_device_unregister_sysfs(dev);
 
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
index 298e7e698937..3d04980a46b3 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_main.c
@@ -36,97 +36,54 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
+#include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/fs.h>
+#include <linux/rcupdate.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+
 #include <net/ipv6.h>
 
 #include <rdma/ib_smi.h>
 #include <rdma/ib_user_verbs.h>
-#include <rdma/ib_user_verbs_exp.h>
 #include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
 
 #include <dev/mlx4/driver.h>
 #include <dev/mlx4/cmd.h>
+#include <dev/mlx4/qp.h>
 #include <linux/sched.h>
 #include <linux/page.h>
 #include <linux/printk.h>
 #include "mlx4_ib.h"
-#include "mlx4_exp.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
 #include "wc.h"
 
 #define DRV_NAME	MLX4_IB_DRV_NAME
-#define DRV_VERSION	"1.0"
+#define DRV_VERSION	"3.4.1-BETA"
+#define DRV_RELDATE	"October 2017"
 
-#define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
-#define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0   0xA0
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
 MODULE_LICENSE("Dual BSD/GPL");
-#ifdef __linux__
-MODULE_VERSION(DRV_VERSION);
-#endif
-
-int mlx4_ib_sm_guid_assign = 1;
 
+int mlx4_ib_sm_guid_assign = 0;
 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
-MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
-
-enum {
-	MAX_NUM_STR_BITMAP = 1 << 15,
-	DEFAULT_TBL_VAL = -1
-};
-
-static struct mlx4_dbdf2val_lst dev_assign_str = {
-	.name		= "dev_assign_str param",
-	.num_vals	= 1,
-	.def_val	= {DEFAULT_TBL_VAL},
-	.range		= {0, MAX_NUM_STR_BITMAP - 1}
-};
-module_param_string(dev_assign_str, dev_assign_str.str,
-		    sizeof(dev_assign_str.str), 0444);
-MODULE_PARM_DESC(dev_assign_str,
-		 "Map device function numbers to IB device numbers (e.g. '0000:04:00.0-0,002b:1c:0b.a-1,...').\n"
-		 "\t\tHexadecimal digits for the device function (e.g. 002b:1c:0b.a) and decimal for IB device numbers (e.g. 1).\n"
-		 "\t\tMax supported devices - 32");
-
-
-static unsigned long *dev_num_str_bitmap;
-static spinlock_t dev_num_str_lock;
+MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
 
 static const char mlx4_ib_version[] =
 	DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
-	DRV_VERSION "\n";
-
-struct update_gid_work {
-	struct work_struct	work;
-	union ib_gid		gids[128];
-	struct mlx4_ib_dev     *dev;
-	int			port;
-};
-
-struct dev_rec {
-	int	bus;
-	int	dev;
-	int	func;
-	int	nr;
-};
-
-static int dr_active;
+	DRV_VERSION " (" DRV_RELDATE ")\n";
 
 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
 
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, struct net_device*,
-				 unsigned long);
-
-static u8 mlx4_ib_get_dev_port(struct net_device *dev,
-                                        struct mlx4_ib_dev *ibdev);
-
 static struct workqueue_struct *wq;
 
 static void init_query_mad(struct ib_smp *mad)
@@ -137,12 +94,11 @@ static void init_query_mad(struct ib_smp *mad)
 	mad->method	   = IB_MGMT_METHOD_GET;
 }
 
-static union ib_gid zgid;
-
 static int check_flow_steering_support(struct mlx4_dev *dev)
 {
 	int eth_num_ports = 0;
 	int ib_num_ports = 0;
+
 	int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
 
 	if (dmfs) {
@@ -156,20 +112,353 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
 			(!eth_num_ports ||
 			 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
 		if (ib_num_ports && mlx4_is_mfunc(dev)) {
+			pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
 			dmfs = 0;
 		}
 	}
 	return dmfs;
 }
 
-int mlx4_ib_query_device(struct ib_device *ibdev,
-				struct ib_device_attr *props)
+static int num_ib_ports(struct mlx4_dev *dev)
+{
+	int ib_ports = 0;
+	int i;
+
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+		ib_ports++;
+
+	return ib_ports;
+}
+
+static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
+{
+	struct mlx4_ib_dev *ibdev = to_mdev(device);
+	struct net_device *dev;
+
+	rcu_read_lock();
+	dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
+
+#if 0
+	if (dev) {
+		if (mlx4_is_bonded(ibdev->dev)) {
+			struct net_device *upper = NULL;
+
+			upper = netdev_master_upper_dev_get_rcu(dev);
+			if (upper) {
+				struct net_device *active;
+
+				active = bond_option_active_slave_get_rcu(netdev_priv(upper));
+				if (active)
+					dev = active;
+			}
+		}
+	}
+#endif
+	if (dev)
+		dev_hold(dev);
+
+	rcu_read_unlock();
+	return dev;
+}
+
+static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
+				  struct mlx4_ib_dev *ibdev,
+				  u8 port_num)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+	struct mlx4_dev *dev = ibdev->dev;
+	int i;
+	union ib_gid *gid_tbl;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return -ENOMEM;
+
+	gid_tbl = mailbox->buf;
+
+	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
+		memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
+
+	err = mlx4_cmd(dev, mailbox->dma,
+		       MLX4_SET_PORT_GID_TABLE << 8 | port_num,
+		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_WRAPPED);
+	if (mlx4_is_bonded(dev))
+		err += mlx4_cmd(dev, mailbox->dma,
+				MLX4_SET_PORT_GID_TABLE << 8 | 2,
+				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+				MLX4_CMD_WRAPPED);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
+				     struct mlx4_ib_dev *ibdev,
+				     u8 port_num)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	int err;
+	struct mlx4_dev *dev = ibdev->dev;
+	int i;
+	struct {
+		union ib_gid	gid;
+		__be32		rsrvd1[2];
+		__be16		rsrvd2;
+		u8		type;
+		u8		version;
+		__be32		rsrvd3;
+	} *gid_tbl;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return -ENOMEM;
+
+	gid_tbl = mailbox->buf;
+	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
+		memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
+		if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+			gid_tbl[i].version = 2;
+			if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
+				gid_tbl[i].type = 1;
+			else
+				memset(&gid_tbl[i].gid, 0, 12);
+		}
+	}
+
+	err = mlx4_cmd(dev, mailbox->dma,
+		       MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
+		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_WRAPPED);
+	if (mlx4_is_bonded(dev))
+		err += mlx4_cmd(dev, mailbox->dma,
+				MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
+				1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+				MLX4_CMD_WRAPPED);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+static int mlx4_ib_update_gids(struct gid_entry *gids,
+			       struct mlx4_ib_dev *ibdev,
+			       u8 port_num)
+{
+	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
+		return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
+
+	return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
+}
+
+static int mlx4_ib_add_gid(struct ib_device *device,
+			   u8 port_num,
+			   unsigned int index,
+			   const union ib_gid *gid,
+			   const struct ib_gid_attr *attr,
+			   void **context)
+{
+	struct mlx4_ib_dev *ibdev = to_mdev(device);
+	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+	struct mlx4_port_gid_table   *port_gid_table;
+	int free = -1, found = -1;
+	int ret = 0;
+	int hw_update = 0;
+	int i;
+	struct gid_entry *gids = NULL;
+
+	if (!rdma_cap_roce_gid_table(device, port_num))
+		return -EINVAL;
+
+	if (port_num > MLX4_MAX_PORTS)
+		return -EINVAL;
+
+	if (!context)
+		return -EINVAL;
+
+	port_gid_table = &iboe->gids[port_num - 1];
+	spin_lock_bh(&iboe->lock);
+	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
+		if (!memcmp(&port_gid_table->gids[i].gid, gid, sizeof(*gid)) &&
+		    (port_gid_table->gids[i].gid_type == attr->gid_type))  {
+			found = i;
+			break;
+		}
+		if (free < 0 && !memcmp(&port_gid_table->gids[i].gid, &zgid, sizeof(*gid)))
+			free = i; /* HW has space */
+	}
+
+	if (found < 0) {
+		if (free < 0) {
+			ret = -ENOSPC;
+		} else {
+			port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
+			if (!port_gid_table->gids[free].ctx) {
+				ret = -ENOMEM;
+			} else {
+				*context = port_gid_table->gids[free].ctx;
+				memcpy(&port_gid_table->gids[free].gid, gid, sizeof(*gid));
+				port_gid_table->gids[free].gid_type = attr->gid_type;
+				port_gid_table->gids[free].ctx->real_index = free;
+				port_gid_table->gids[free].ctx->refcount = 1;
+				hw_update = 1;
+			}
+		}
+	} else {
+		struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
+		*context = ctx;
+		ctx->refcount++;
+	}
+	if (!ret && hw_update) {
+		gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+		if (!gids) {
+			ret = -ENOMEM;
+		} else {
+			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+				gids[i].gid_type = port_gid_table->gids[i].gid_type;
+			}
+		}
+	}
+	spin_unlock_bh(&iboe->lock);
+
+	if (!ret && hw_update) {
+		ret = mlx4_ib_update_gids(gids, ibdev, port_num);
+		kfree(gids);
+	}
+
+	return ret;
+}
+
+static int mlx4_ib_del_gid(struct ib_device *device,
+			   u8 port_num,
+			   unsigned int index,
+			   void **context)
+{
+	struct gid_cache_context *ctx = *context;
+	struct mlx4_ib_dev *ibdev = to_mdev(device);
+	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+	struct mlx4_port_gid_table   *port_gid_table;
+	int ret = 0;
+	int hw_update = 0;
+	struct gid_entry *gids = NULL;
+
+	if (!rdma_cap_roce_gid_table(device, port_num))
+		return -EINVAL;
+
+	if (port_num > MLX4_MAX_PORTS)
+		return -EINVAL;
+
+	port_gid_table = &iboe->gids[port_num - 1];
+	spin_lock_bh(&iboe->lock);
+	if (ctx) {
+		ctx->refcount--;
+		if (!ctx->refcount) {
+			unsigned int real_index = ctx->real_index;
+
+			memcpy(&port_gid_table->gids[real_index].gid, &zgid, sizeof(zgid));
+			kfree(port_gid_table->gids[real_index].ctx);
+			port_gid_table->gids[real_index].ctx = NULL;
+			hw_update = 1;
+		}
+	}
+	if (!ret && hw_update) {
+		int i;
+
+		gids = kmalloc(sizeof(*gids) * MLX4_MAX_PORT_GIDS, GFP_ATOMIC);
+		if (!gids) {
+			ret = -ENOMEM;
+		} else {
+			for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
+				memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+		}
+	}
+	spin_unlock_bh(&iboe->lock);
+
+	if (!ret && hw_update) {
+		ret = mlx4_ib_update_gids(gids, ibdev, port_num);
+		kfree(gids);
+	}
+	return ret;
+}
+
+int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
+				    u8 port_num, int index)
+{
+	struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+	struct gid_cache_context *ctx = NULL;
+	union ib_gid gid;
+	struct mlx4_port_gid_table   *port_gid_table;
+	int real_index = -EINVAL;
+	int i;
+	int ret;
+	unsigned long flags;
+	struct ib_gid_attr attr;
+
+	if (port_num > MLX4_MAX_PORTS)
+		return -EINVAL;
+
+	if (mlx4_is_bonded(ibdev->dev))
+		port_num = 1;
+
+	if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
+		return index;
+
+	ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
+	if (ret)
+		return ret;
+
+	if (attr.ndev)
+		dev_put(attr.ndev);
+
+	if (!memcmp(&gid, &zgid, sizeof(gid)))
+		return -EINVAL;
+
+	spin_lock_irqsave(&iboe->lock, flags);
+	port_gid_table = &iboe->gids[port_num - 1];
+
+	for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
+		if (!memcmp(&port_gid_table->gids[i].gid, &gid, sizeof(gid)) &&
+		    attr.gid_type == port_gid_table->gids[i].gid_type) {
+			ctx = port_gid_table->gids[i].ctx;
+			break;
+		}
+	if (ctx)
+		real_index = ctx->real_index;
+	spin_unlock_irqrestore(&iboe->lock, flags);
+	return real_index;
+}
+
+static int mlx4_ib_query_device(struct ib_device *ibdev,
+				struct ib_device_attr *props,
+				struct ib_udata *uhw)
 {
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	struct ib_smp *in_mad  = NULL;
 	struct ib_smp *out_mad = NULL;
 	int err = -ENOMEM;
+	int have_ib_ports;
+	struct mlx4_uverbs_ex_query_device cmd;
+	struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
+	struct mlx4_clock_params clock_params;
 
+	if (uhw->inlen) {
+		if (uhw->inlen < sizeof(cmd))
+			return -EINVAL;
+
+		err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
+		if (err)
+			return err;
+
+		if (cmd.comp_mask)
+			return -EINVAL;
+
+		if (cmd.reserved)
+			return -EINVAL;
+	}
+
+	resp.response_length = offsetof(typeof(resp), response_length) +
+		sizeof(resp.response_length);
 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
 	if (!in_mad || !out_mad)
@@ -185,25 +474,27 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
 
 	memset(props, 0, sizeof *props);
 
+	have_ib_ports = num_ib_ports(dev->dev);
+
 	props->fw_ver = dev->dev->caps.fw_ver;
 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
 		IB_DEVICE_PORT_ACTIVE_EVENT		|
 		IB_DEVICE_SYS_IMAGE_GUID		|
 		IB_DEVICE_RC_RNR_NAK_GEN		|
-		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK	|
-		IB_DEVICE_SHARED_MR;
-
+		IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
 		props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
-	if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+	if (dev->dev->caps.max_gso_sz &&
+	    (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+	    (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
 		props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -213,18 +504,6 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
 		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
 		props->device_cap_flags |= IB_DEVICE_XRC;
-	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_CROSS_CHANNEL)
-		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
-
-	if (check_flow_steering_support(dev->dev))
-		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
-
-
-	props->device_cap_flags |= IB_DEVICE_QPG;
-	if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
-		props->device_cap_flags |= IB_DEVICE_UD_RSS;
-		props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
-	}
 	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
 	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
@@ -233,9 +512,14 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
 		else
 			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
 	}
+	if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
+	props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+
 	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
 		0xffffff;
-	props->vendor_part_id	   = dev->dev->pdev->device;
+	props->vendor_part_id	   = dev->dev->persist->pdev->device;
 	props->hw_ver		   = be32_to_cpup((__be32 *) (out_mad->data + 32));
 	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);
 
@@ -245,6 +529,7 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
 	props->max_qp_wr	   = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
 	props->max_sge		   = min(dev->dev->caps.max_sq_sg,
 					 dev->dev->caps.max_rq_sg);
+	props->max_sge_rd	   = MLX4_MAX_SGE_RD;
 	props->max_cq		   = dev->dev->quotas.cq;
 	props->max_cqe		   = dev->dev->caps.max_cqes;
 	props->max_mr		   = dev->dev->quotas.mpt;
@@ -266,14 +551,25 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 					   props->max_mcast_grp;
 	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
-	props->hca_core_clock = dev->dev->caps.hca_core_clock;
-	if (dev->dev->caps.hca_core_clock > 0)
-		props->comp_mask |= IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK;
-	if (dev->dev->caps.cq_timestamp) {
-		props->timestamp_mask = 0xFFFFFFFFFFFF;
-		props->comp_mask |= IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK;
+	props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
+	props->timestamp_mask = 0xFFFFFFFFFFFFULL;
+
+	if (!mlx4_is_slave(dev->dev))
+		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
+
+	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
+		resp.response_length += sizeof(resp.hca_core_clock_offset);
+		if (!err && !mlx4_is_slave(dev->dev)) {
+			resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+		}
 	}
 
+	if (uhw->outlen) {
+		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+		if (err)
+			goto out;
+	}
 out:
 	kfree(in_mad);
 	kfree(out_mad);
@@ -394,8 +690,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
 	struct net_device *ndev;
 	enum ib_mtu tmp;
 	struct mlx4_cmd_mailbox *mailbox;
-	unsigned long flags;
 	int err = 0;
+	int is_bonded = mlx4_is_bonded(mdev->dev);
 
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
@@ -410,12 +706,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
 	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
 						IB_WIDTH_4X : IB_WIDTH_1X;
 	props->active_speed	= IB_SPEED_QDR;
-	props->port_cap_flags	= IB_PORT_CM_SUP;
-	if (netw_view)
-		props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
-	else
-		props->gid_tbl_len   = mdev->dev->caps.gid_table_len[port];
-
+	props->port_cap_flags	= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
 	props->pkey_tbl_len	= 1;
 	props->max_mtu		= IB_MTU_4096;
@@ -423,8 +715,15 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
 	props->state		= IB_PORT_DOWN;
 	props->phys_state	= state_to_phys_state(props->state);
 	props->active_mtu	= IB_MTU_256;
-	spin_lock_irqsave(&iboe->lock, flags);
+	spin_lock_bh(&iboe->lock);
 	ndev = iboe->netdevs[port - 1];
+	if (ndev && is_bonded) {
+#if 0
+		rcu_read_lock(); /* required to get upper dev */
+		ndev = netdev_master_upper_dev_get_rcu(ndev);
+		rcu_read_unlock();
+#endif
+	}
 	if (!ndev)
 		goto out_unlock;
 
@@ -435,7 +734,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
 					IB_PORT_ACTIVE : IB_PORT_DOWN;
 	props->phys_state	= state_to_phys_state(props->state);
 out_unlock:
-	spin_unlock_irqrestore(&iboe->lock, flags);
+	spin_unlock_bh(&iboe->lock);
 out:
 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
 	return err;
@@ -518,23 +817,87 @@ out:
 	return err;
 }
 
-static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
-			  union ib_gid *gid)
-{
-	struct mlx4_ib_dev *dev = to_mdev(ibdev);
-
-	*gid = dev->iboe.gid_table[port - 1][index];
-
-	return 0;
-}
-
 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
 			     union ib_gid *gid)
 {
-	if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+	int ret;
+
+	if (rdma_protocol_ib(ibdev, port))
 		return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
-	else
-		return iboe_query_gid(ibdev, port, index, gid);
+
+	if (!rdma_protocol_roce(ibdev, port))
+		return -ENODEV;
+
+	if (!rdma_cap_roce_gid_table(ibdev, port))
+		return -ENODEV;
+
+	ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
+	if (ret == -EAGAIN) {
+		memcpy(gid, &zgid, sizeof(*gid));
+		return 0;
+	}
+
+	return ret;
+}
+
+static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
+{
+	union sl2vl_tbl_to_u64 sl2vl64;
+	struct ib_smp *in_mad  = NULL;
+	struct ib_smp *out_mad = NULL;
+	int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
+	int err = -ENOMEM;
+	int jj;
+
+	if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
+		*sl2vl_tbl = 0;
+		return 0;
+	}
+
+	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+	if (!in_mad || !out_mad)
+		goto out;
+
+	init_query_mad(in_mad);
+	in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
+	in_mad->attr_mod = 0;
+
+	if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
+		mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+	err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
+			   in_mad, out_mad);
+	if (err)
+		goto out;
+
+	for (jj = 0; jj < 8; jj++)
+		sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
+	*sl2vl_tbl = sl2vl64.sl64;
+
+out:
+	kfree(in_mad);
+	kfree(out_mad);
+	return err;
+}
+
+static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
+{
+	u64 sl2vl;
+	int i;
+	int err;
+
+	for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
+		if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+			continue;
+		err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
+		if (err) {
+			pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
+			       i, err);
+			sl2vl = 0;
+		}
+		atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
+	}
 }
 
 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
@@ -591,7 +954,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
 		return -EOPNOTSUPP;
 
 	spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
-	memcpy(ibdev->node_desc, props->node_desc, 64);
+	memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
 	spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
 
 	/*
@@ -602,8 +965,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
 	if (IS_ERR(mailbox))
 		return 0;
 
-	memset(mailbox->buf, 0, 256);
-	memcpy(mailbox->buf, props->node_desc, 64);
+	memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
 	mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
 		 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
@@ -612,19 +974,16 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
 	return 0;
 }
 
-static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
-			 u32 cap_mask)
+static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
+			    u32 cap_mask)
 {
 	struct mlx4_cmd_mailbox *mailbox;
 	int err;
-	u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
-	memset(mailbox->buf, 0, 256);
-
 	if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
 		*(u8 *) mailbox->buf	     = !!reset_qkey_viols << 6;
 		((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
@@ -633,8 +992,9 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
 		((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
 	}
 
-	err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
-		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+	err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
+		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_WRAPPED);
 
 	mlx4_free_cmd_mailbox(dev->dev, mailbox);
 	return err;
@@ -643,11 +1003,20 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
 			       struct ib_port_modify *props)
 {
+	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
+	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
 	struct ib_port_attr attr;
 	u32 cap_mask;
 	int err;
 
-	mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
+	/* return OK if this is RoCE. CM calls ib_modify_port() regardless
+	 * of whether port link layer is ETH or IB. For ETH ports, qkey
+	 * violations and port capabilities are not meaningful.
+	 */
+	if (is_eth)
+		return 0;
+
+	mutex_lock(&mdev->cap_mask_mutex);
 
 	err = mlx4_ib_query_port(ibdev, port, &attr);
 	if (err)
@@ -656,9 +1025,9 @@ static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
 	cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
 		~props->clr_port_cap_mask;
 
-	err = mlx4_SET_PORT(to_mdev(ibdev), port,
-			    !!(mask & IB_PORT_RESET_QKEY_CNTR),
-			    cap_mask);
+	err = mlx4_ib_SET_PORT(mdev, port,
+			       !!(mask & IB_PORT_RESET_QKEY_CNTR),
+			       cap_mask);
 
 out:
 	mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
@@ -679,27 +1048,17 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
 
 	if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
 		resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
-		if (mlx4_wc_enabled()) {
-			resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
-			resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
-		} else {
-			resp_v3.bf_reg_size      = 0;
-			resp_v3.bf_regs_per_page = 0;
-		}
+		resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
+		resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
 	} else {
 		resp.dev_caps	      = dev->dev->caps.userspace_caps;
 		resp.qp_tab_size      = dev->dev->caps.num_qps;
-		if (mlx4_wc_enabled()) {
-			resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
-			resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
-		} else {
-			resp.bf_reg_size      = 0;
-			resp.bf_regs_per_page = 0;
-		}
+		resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
+		resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
 		resp.cqe_size	      = dev->dev->caps.cqe_size;
 	}
 
-	context = kmalloc(sizeof *context, GFP_KERNEL);
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
 	if (!context)
 		return ERR_PTR(-ENOMEM);
 
@@ -736,161 +1095,180 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 	return 0;
 }
 
-/* XXX FBSD has no support for get_unmapped_area function */
-#if 0
-static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
-			unsigned long addr,
-			unsigned long len, unsigned long pgoff,
-			unsigned long flags)
+static void  mlx4_ib_vma_open(struct vm_area_struct *area)
 {
-	struct mm_struct *mm;
+	/* vma_open is called when a new VMA is created on top of our VMA.
+	 * This is done through either mremap flow or split_vma (usually due
+	 * to mlock, madvise, munmap, etc.). We do not support a clone of the
+	 * vma, as this VMA is strongly hardware related. Therefore we set the
+	 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
+	 * calling us again and trying to do incorrect actions. We assume that
+	 * the original vma size is exactly a single page that there will be no
+	 * "splitting" operations on.
+	 */
+	area->vm_ops = NULL;
+}
+
+static void  mlx4_ib_vma_close(struct vm_area_struct *area)
+{
+	struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
+
+	/* It's guaranteed that all VMAs opened on a FD are closed before the
+	 * file itself is closed, therefore no sync is needed with the regular
+	 * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
+	 * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
+	 * The close operation is usually called under mm->mmap_sem except when
+	 * process is exiting.  The exiting case is handled explicitly as part
+	 * of mlx4_ib_disassociate_ucontext.
+	 */
+	mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
+				area->vm_private_data;
+
+	/* set the vma context pointer to null in the mlx4_ib driver's private
+	 * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
+	 */
+	mlx4_ib_vma_priv_data->vma = NULL;
+}
+
+static const struct vm_operations_struct mlx4_ib_vm_ops = {
+	.open = mlx4_ib_vma_open,
+	.close = mlx4_ib_vma_close
+};
+
+static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+	int i;
+	int ret = 0;
 	struct vm_area_struct *vma;
-	unsigned long start_addr;
-	unsigned long page_size_order;
-	unsigned long  command;
+	struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
+	struct task_struct *owning_process  = NULL;
+	struct mm_struct   *owning_mm       = NULL;
 
-	mm = current->mm;
-	if (addr)
-		return current->mm->get_unmapped_area(file, addr, len,
-						pgoff, flags);
+	owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+	if (!owning_process)
+		return;
 
-	/* Last 8 bits hold the  command others are data per that command */
-	command = pgoff & MLX4_IB_MMAP_CMD_MASK;
-	if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
-		return current->mm->get_unmapped_area(file, addr, len,
-						pgoff, flags);
-
-	page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
-	/* code is based on the huge-pages get_unmapped_area code */
-	start_addr = mm->free_area_cache;
-
-	if (len <= mm->cached_hole_size)
-		start_addr = TASK_UNMAPPED_BASE;
-
-
-full_search:
-	addr = ALIGN(start_addr, 1 << page_size_order);
-
-	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-		/* At this point:  (!vma || addr < vma->vm_end). */
-		if (TASK_SIZE - len < addr) {
-			/*
-			 * Start a new search - just in case we missed
-			 * some holes.
+	owning_mm = get_task_mm(owning_process);
+	if (!owning_mm) {
+		pr_info("no mm, disassociate ucontext is pending task termination\n");
+		while (1) {
+			/* make sure that task is dead before returning, it may
+			 * prevent a rare case of module down in parallel to a
+			 * call to mlx4_ib_vma_close.
 			 */
-			if (start_addr != TASK_UNMAPPED_BASE) {
-				start_addr = TASK_UNMAPPED_BASE;
-				goto full_search;
+			put_task_struct(owning_process);
+			msleep(1);
+			owning_process = get_pid_task(ibcontext->tgid,
+						      PIDTYPE_PID);
+			if (!owning_process /* ||
+			    owning_process->state == TASK_DEAD */) {
+				pr_info("disassociate ucontext done, task was terminated\n");
+				/* in case task was dead need to release the task struct */
+				if (owning_process)
+					put_task_struct(owning_process);
+				return;
 			}
-			return -ENOMEM;
+		}
+	}
+
+	/* need to protect from a race on closing the vma as part of
+	 * mlx4_ib_vma_close().
+	 */
+	down_read(&owning_mm->mmap_sem);
+	for (i = 0; i < HW_BAR_COUNT; i++) {
+		vma = context->hw_bar_info[i].vma;
+		if (!vma)
+			continue;
+
+		ret = zap_vma_ptes(context->hw_bar_info[i].vma,
+				   context->hw_bar_info[i].vma->vm_start,
+				   PAGE_SIZE);
+		if (ret) {
+			pr_err("Error: zap_vma_ptes failed for index=%d, ret=%d\n", i, ret);
+			BUG_ON(1);
 		}
 
-		if (!vma || addr + len <= vma->vm_start)
-			return addr;
-		addr = ALIGN(vma->vm_end, 1 << page_size_order);
+		/* context going to be destroyed, should not access ops any more */
+		context->hw_bar_info[i].vma->vm_ops = NULL;
 	}
+
+	up_read(&owning_mm->mmap_sem);
+	mmput(owning_mm);
+	put_task_struct(owning_process);
+}
+
+static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
+				 struct mlx4_ib_vma_private_data *vma_private_data)
+{
+	vma_private_data->vma = vma;
+	vma->vm_private_data = vma_private_data;
+	vma->vm_ops =  &mlx4_ib_vm_ops;
 }
-#endif
 
 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 {
 	struct mlx4_ib_dev *dev = to_mdev(context->device);
+	struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
 
-	/* Last 8 bits hold the  command others are data per that command */
-	unsigned long  command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
+	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+		return -EINVAL;
 
-	if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
-		/* compatibility handling for commands 0 & 1*/
-		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+	if (vma->vm_pgoff == 0) {
+		/* We prevent double mmaping on same context */
+		if (mucontext->hw_bar_info[HW_BAR_DB].vma)
 			return -EINVAL;
-	}
-	if (command == MLX4_IB_MMAP_UAR_PAGE) {
+
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       to_mucontext(context)->uar.pfn,
 				       PAGE_SIZE, vma->vm_page_prot))
 			return -EAGAIN;
-	} else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
-			dev->dev->caps.bf_reg_size != 0) {
-		vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
+
+		mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
+
+	} else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
+		/* We prevent double mmaping on same context */
+		if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+			return -EINVAL;
+
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       to_mucontext(context)->uar.pfn +
 				       dev->dev->caps.num_uars,
 				       PAGE_SIZE, vma->vm_page_prot))
 			return -EAGAIN;
-	} else if (command == MLX4_IB_MMAP_GET_HW_CLOCK) {
+
+		mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
+
+	} else if (vma->vm_pgoff == 3) {
 		struct mlx4_clock_params params;
 		int ret;
 
+		/* We prevent double mmaping on same context */
+		if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
+			return -EINVAL;
+
 		ret = mlx4_get_internal_clock_params(dev->dev, &params);
+
 		if (ret)
 			return ret;
 
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
 		if (io_remap_pfn_range(vma, vma->vm_start,
-				       (pci_resource_start(dev->dev->pdev,
-				       params.bar) + params.offset)
+				       (pci_resource_start(dev->dev->persist->pdev,
+							   params.bar) +
+					params.offset)
 				       >> PAGE_SHIFT,
 				       PAGE_SIZE, vma->vm_page_prot))
 			return -EAGAIN;
-	} else
+
+		mlx4_ib_set_vma_data(vma,
+				     &mucontext->hw_bar_info[HW_BAR_CLOCK]);
+	} else {
 		return -EINVAL;
-
-	return 0;
-}
-
-static int mlx4_ib_ioctl(struct ib_ucontext *context, unsigned int cmd,
-			 unsigned long arg)
-{
-	struct mlx4_ib_dev *dev = to_mdev(context->device);
-	int ret;
-        int offset;
-
-	switch (cmd) {
-	case MLX4_IOCHWCLOCKOFFSET: {
-		struct mlx4_clock_params params;
-		int ret;
-		ret = mlx4_get_internal_clock_params(dev->dev, &params);
-		if (!ret) {
-                        offset = params.offset % PAGE_SIZE;
-			ret = put_user(offset,
-					 (int *)arg);
-			return sizeof(int);
-		} else {
-			return ret;
-		}
 	}
-	default: {
-		pr_err("mlx4_ib: invalid ioctl %u command with arg %lX\n",
-		       cmd, arg);
-		return -ENOTTY;
-	}
-	}
-
-	return ret;
-}
-
-static int mlx4_ib_query_values(struct ib_device *device, int q_values,
-				struct ib_device_values *values)
-{
-	struct mlx4_ib_dev *dev = to_mdev(device);
-	s64 cycles;
-
-	values->values_mask = 0;
-	if (q_values & IBV_VALUES_HW_CLOCK) {
-		cycles = mlx4_read_clock(dev->dev);
-		if (cycles >= 0) {
-			values->hwclock = cycles & CORE_CLOCK_MASK;
-			values->values_mask |= IBV_VALUES_HW_CLOCK;
-		}
-		q_values &= ~IBV_VALUES_HW_CLOCK;
-	}
-
-	if (q_values)
-		return -ENOTTY;
 
 	return 0;
 }
@@ -935,6 +1313,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
 					  struct ib_udata *udata)
 {
 	struct mlx4_ib_xrcd *xrcd;
+	struct ib_cq_init_attr cq_attr = {};
 	int err;
 
 	if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
@@ -948,13 +1327,14 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
 	if (err)
 		goto err1;
 
-	xrcd->pd = ib_alloc_pd(ibdev);
+	xrcd->pd = ib_alloc_pd(ibdev, 0);
 	if (IS_ERR(xrcd->pd)) {
 		err = PTR_ERR(xrcd->pd);
 		goto err2;
 	}
 
-	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
+	cq_attr.cqe = 1;
+	xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
 	if (IS_ERR(xrcd->cq)) {
 		err = PTR_ERR(xrcd->cq);
 		goto err3;
@@ -1004,28 +1384,39 @@ static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
 	return 0;
 }
 
+static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
+					  struct mlx4_ib_counters *ctr_table)
+{
+	struct counter_index *counter, *tmp_count;
+
+	mutex_lock(&ctr_table->mutex);
+	list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
+				 list) {
+		if (counter->allocated)
+			mlx4_counter_free(ibdev->dev, counter->index);
+		list_del(&counter->list);
+		kfree(counter);
+	}
+	mutex_unlock(&ctr_table->mutex);
+}
+
 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 		   union ib_gid *gid)
 {
-	u8 mac[6];
 	struct net_device *ndev;
 	int ret = 0;
 
 	if (!mqp->port)
 		return 0;
 
-	spin_lock(&mdev->iboe.lock);
+	spin_lock_bh(&mdev->iboe.lock);
 	ndev = mdev->iboe.netdevs[mqp->port - 1];
 	if (ndev)
 		dev_hold(ndev);
-	spin_unlock(&mdev->iboe.lock);
+	spin_unlock_bh(&mdev->iboe.lock);
 
 	if (ndev) {
-		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
-		rtnl_lock();
-		dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
 		ret = 1;
-		rtnl_unlock();
 		dev_put(ndev);
 	}
 
@@ -1034,11 +1425,25 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 
 struct mlx4_ib_steering {
 	struct list_head list;
-	u64 reg_id;
+	struct mlx4_flow_reg_id reg_id;
 	union ib_gid gid;
 };
 
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD dst_ip
+#define LAST_TCP_UDP_FIELD src_port
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field)\
+	memchr_inv((void *)&filter.field  +\
+		   sizeof(filter.field), 0,\
+		   sizeof(filter) -\
+		   offsetof(typeof(filter), field) -\
+		   sizeof(filter.field))
+
 static int parse_flow_attr(struct mlx4_dev *dev,
+			   u32 qp_num,
 			   union ib_flow_spec *ib_spec,
 			   struct _rule_hw *mlx4_spec)
 {
@@ -1046,6 +1451,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
 
 	switch (ib_spec->type) {
 	case IB_FLOW_SPEC_ETH:
+		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+			return -ENOTSUPP;
+
 		type = MLX4_NET_TRANS_RULE_ID_ETH;
 		memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
 		       ETH_ALEN);
@@ -1054,17 +1462,22 @@ static int parse_flow_attr(struct mlx4_dev *dev,
 		mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
 		mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
 		break;
-
 	case IB_FLOW_SPEC_IB:
+		if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
+			return -ENOTSUPP;
+
 		type = MLX4_NET_TRANS_RULE_ID_IB;
-		mlx4_spec->ib.l3_qpn = ib_spec->ib.val.l3_type_qpn;
-		mlx4_spec->ib.qpn_mask = ib_spec->ib.mask.l3_type_qpn;
-		memcpy(&mlx4_spec->ib.dst_gid, ib_spec->ib.val.dst_gid, 16);
-		memcpy(&mlx4_spec->ib.dst_gid_msk,
-		       ib_spec->ib.mask.dst_gid, 16);
+		mlx4_spec->ib.l3_qpn =
+			cpu_to_be32(qp_num);
+		mlx4_spec->ib.qpn_mask =
+			cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
 		break;
 
+
 	case IB_FLOW_SPEC_IPV4:
+		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+			return -ENOTSUPP;
+
 		type = MLX4_NET_TRANS_RULE_ID_IPV4;
 		mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
 		mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
@@ -1074,26 +1487,134 @@ static int parse_flow_attr(struct mlx4_dev *dev,
 
 	case IB_FLOW_SPEC_TCP:
 	case IB_FLOW_SPEC_UDP:
+		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
+			return -ENOTSUPP;
+
 		type = ib_spec->type == IB_FLOW_SPEC_TCP ?
 					MLX4_NET_TRANS_RULE_ID_TCP :
 					MLX4_NET_TRANS_RULE_ID_UDP;
 		mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
-		mlx4_spec->tcp_udp.dst_port_msk =
-			ib_spec->tcp_udp.mask.dst_port;
+		mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
 		mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
-		mlx4_spec->tcp_udp.src_port_msk =
-			ib_spec->tcp_udp.mask.src_port;
+		mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
 		break;
 
 	default:
 		return -EINVAL;
 	}
-	if (map_sw_to_hw_steering_id(dev, type) < 0 ||
-	    hw_rule_sz(dev, type) < 0)
+	if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
+	    mlx4_hw_rule_sz(dev, type) < 0)
 		return -EINVAL;
-	mlx4_spec->id = cpu_to_be16(map_sw_to_hw_steering_id(dev, type));
-	mlx4_spec->size = hw_rule_sz(dev, type) >> 2;
-	return hw_rule_sz(dev, type);
+	mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
+	mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
+	return mlx4_hw_rule_sz(dev, type);
+}
+
+struct default_rules {
+	__u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
+	__u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
+	__u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
+	__u8  link_layer;
+};
+static const struct default_rules default_table[] = {
+	{
+		.mandatory_fields = {IB_FLOW_SPEC_IPV4},
+		.mandatory_not_fields = {IB_FLOW_SPEC_ETH},
+		.rules_create_list = {IB_FLOW_SPEC_IB},
+		.link_layer = IB_LINK_LAYER_INFINIBAND
+	}
+};
+
+static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
+					 struct ib_flow_attr *flow_attr)
+{
+	int i, j, k;
+	void *ib_flow;
+	const struct default_rules *pdefault_rules = default_table;
+	u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
+
+	for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
+		__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
+		memset(&field_types, 0, sizeof(field_types));
+
+		if (link_layer != pdefault_rules->link_layer)
+			continue;
+
+		ib_flow = flow_attr + 1;
+		/* we assume the specs are sorted */
+		for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
+		     j < flow_attr->num_of_specs; k++) {
+			union ib_flow_spec *current_flow =
+				(union ib_flow_spec *)ib_flow;
+
+			/* same layer but different type */
+			if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
+			     (pdefault_rules->mandatory_fields[k] &
+			      IB_FLOW_SPEC_LAYER_MASK)) &&
+			    (current_flow->type !=
+			     pdefault_rules->mandatory_fields[k]))
+				goto out;
+
+			/* same layer, try match next one */
+			if (current_flow->type ==
+			    pdefault_rules->mandatory_fields[k]) {
+				j++;
+				ib_flow +=
+					((union ib_flow_spec *)ib_flow)->size;
+			}
+		}
+
+		ib_flow = flow_attr + 1;
+		for (j = 0; j < flow_attr->num_of_specs;
+		     j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
+			for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
+				/* same layer and same type */
+				if (((union ib_flow_spec *)ib_flow)->type ==
+				    pdefault_rules->mandatory_not_fields[k])
+					goto out;
+
+		return i;
+	}
+out:
+	return -1;
+}
+
+static int __mlx4_ib_create_default_rules(
+		struct mlx4_ib_dev *mdev,
+		struct ib_qp *qp,
+		const struct default_rules *pdefault_rules,
+		struct _rule_hw *mlx4_spec) {
+	int size = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+		int ret;
+		union ib_flow_spec ib_spec;
+		switch (pdefault_rules->rules_create_list[i]) {
+		case 0:
+			/* no rule */
+			continue;
+		case IB_FLOW_SPEC_IB:
+			ib_spec.type = IB_FLOW_SPEC_IB;
+			ib_spec.size = sizeof(struct ib_flow_spec_ib);
+
+			break;
+		default:
+			/* invalid rule */
+			return -EINVAL;
+		}
+		/* We must put empty rule, qpn is being ignored */
+		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
+				      mlx4_spec);
+		if (ret < 0) {
+			pr_info("invalid parsing\n");
+			return -EINVAL;
+		}
+
+		mlx4_spec = (void *)mlx4_spec + ret;
+		size += ret;
+	}
+	return size;
 }
 
 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
@@ -1107,8 +1628,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
 	struct mlx4_cmd_mailbox *mailbox;
 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
-	size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
-			   (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
+	int default_flow;
 
 	static const u16 __mlx4_domain[] = {
 		[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
@@ -1118,52 +1638,64 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
 	};
 
 	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
-		pr_err("Invalid priority value.\n");
+		pr_err("Invalid priority value %d\n", flow_attr->priority);
 		return -EINVAL;
 	}
+
 	if (domain >= IB_FLOW_DOMAIN_NUM) {
-		pr_err("Invalid domain value.\n");
+		pr_err("Invalid domain value %d\n", domain);
 		return -EINVAL;
 	}
-	if (map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
+
+	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
 		return -EINVAL;
 
 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
-	memset(mailbox->buf, 0, rule_size);
 	ctrl = mailbox->buf;
 
 	ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
 				 flow_attr->priority);
-	ctrl->type = map_sw_to_hw_steering_mode(mdev->dev, flow_type);
+	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
 	ctrl->port = flow_attr->port;
 	ctrl->qpn = cpu_to_be32(qp->qp_num);
 
-	if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK)
-		ctrl->flags = (1 << 3);
-
 	ib_flow = flow_attr + 1;
 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
-	for (i = 0; i < flow_attr->num_of_specs; i++) {
-		ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
+	/* Add default flows */
+	default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
+	if (default_flow >= 0) {
+		ret = __mlx4_ib_create_default_rules(
+				mdev, qp, default_table + default_flow,
+				mailbox->buf + size);
 		if (ret < 0) {
 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
 			return -EINVAL;
 		}
-		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
+		size += ret;
+	}
+	for (i = 0; i < flow_attr->num_of_specs; i++) {
+		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
+				      mailbox->buf + size);
+		if (ret < 0) {
+			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+			return -EINVAL;
+		}
+		ib_flow += ((union ib_flow_spec *) ib_flow)->size;
 		size += ret;
 	}
 
 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
-			   MLX4_CMD_NATIVE);
+			   MLX4_CMD_WRAPPED);
 	if (ret == -ENOMEM)
 		pr_err("mcg table is full. Fail to register network rule.\n");
 	else if (ret == -ENXIO)
 		pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
 	else if (ret)
-		pr_err("Invalid argumant. Fail to register network rule.\n");
+		pr_err("Invalid argument. Fail to register network rule.\n");
+
 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
 	return ret;
 }
@@ -1173,10 +1705,85 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
 	int err;
 	err = mlx4_cmd(dev, reg_id, 0, 0,
 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
-		       MLX4_CMD_NATIVE);
+		       MLX4_CMD_WRAPPED);
 	if (err)
 		pr_err("Fail to detach network rule. registration id = 0x%llx\n",
-		       (unsigned long long)reg_id);
+		       (long long)reg_id);
+	return err;
+}
+
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+				    u64 *reg_id)
+{
+	void *ib_flow;
+	union ib_flow_spec *ib_spec;
+	struct mlx4_dev	*dev = to_mdev(qp->device)->dev;
+	int err = 0;
+
+	if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+	    dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
+		return 0; /* do nothing */
+
+	ib_flow = flow_attr + 1;
+	ib_spec = (union ib_flow_spec *)ib_flow;
+
+	if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+		return 0; /* do nothing */
+
+	err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+				    flow_attr->port, qp->qp_num,
+				    MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+				    reg_id);
+	return err;
+}
+
+static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
+				      struct ib_flow_attr *flow_attr,
+				      enum mlx4_net_trans_promisc_mode *type)
+{
+	int err = 0;
+
+	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
+	    (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
+	    (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
+		return -EOPNOTSUPP;
+	}
+
+	if (flow_attr->num_of_specs == 0) {
+		type[0] = MLX4_FS_MC_SNIFFER;
+		type[1] = MLX4_FS_UC_SNIFFER;
+	} else {
+		union ib_flow_spec *ib_spec;
+
+		ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+		if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
+			return -EINVAL;
+
+		/* if all is zero than MC and UC */
+		if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
+			type[0] = MLX4_FS_MC_SNIFFER;
+			type[1] = MLX4_FS_UC_SNIFFER;
+		} else {
+			u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
+					    ib_spec->eth.mask.dst_mac[1],
+					    ib_spec->eth.mask.dst_mac[2],
+					    ib_spec->eth.mask.dst_mac[3],
+					    ib_spec->eth.mask.dst_mac[4],
+					    ib_spec->eth.mask.dst_mac[5]};
+
+			/* Above xor was only on MC bit, non empty mask is valid
+			 * only if this bit is set and rest are zero.
+			 */
+			if (!is_zero_ether_addr(&mac[0]))
+				return -EINVAL;
+
+			if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
+				type[0] = MLX4_FS_MC_SNIFFER;
+			else
+				type[0] = MLX4_FS_UC_SNIFFER;
+		}
+	}
+
 	return err;
 }
 
@@ -1184,13 +1791,22 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 				    struct ib_flow_attr *flow_attr,
 				    int domain)
 {
-	int err = 0, i = 0;
+	int err = 0, i = 0, j = 0;
 	struct mlx4_ib_flow *mflow;
 	enum mlx4_net_trans_promisc_mode type[2];
+	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
+	int is_bonded = mlx4_is_bonded(dev);
+
+	if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+		return ERR_PTR(-EINVAL);
+
+	if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+	    (flow_attr->type != IB_FLOW_ATTR_NORMAL))
+		return ERR_PTR(-EOPNOTSUPP);
 
 	memset(type, 0, sizeof(type));
 
-	mflow = kzalloc(sizeof(struct mlx4_ib_flow), GFP_KERNEL);
+	mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
 	if (!mflow) {
 		err = -ENOMEM;
 		goto err_free;
@@ -1198,7 +1814,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 
 	switch (flow_attr->type) {
 	case IB_FLOW_ATTR_NORMAL:
-		type[0] = MLX4_FS_REGULAR;
+		/* If dont trap flag (continue match) is set, under specific
+		 * condition traffic be replicated to given qp,
+		 * without stealing it
+		 */
+		if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
+			err = mlx4_ib_add_dont_trap_rule(dev,
+							 flow_attr,
+							 type);
+			if (err)
+				goto err_free;
+		} else {
+			type[0] = MLX4_FS_REGULAR;
+		}
 		break;
 
 	case IB_FLOW_ATTR_ALL_DEFAULT:
@@ -1210,8 +1838,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 		break;
 
 	case IB_FLOW_ATTR_SNIFFER:
-		type[0] = MLX4_FS_UC_SNIFFER;
-		type[1] = MLX4_FS_MC_SNIFFER;
+		type[0] = MLX4_FS_MIRROR_RX_PORT;
+		type[1] = MLX4_FS_MIRROR_SX_PORT;
 		break;
 
 	default:
@@ -1221,14 +1849,59 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 
 	while (i < ARRAY_SIZE(type) && type[i]) {
 		err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
-					    &mflow->reg_id[i]);
+					    &mflow->reg_id[i].id);
 		if (err)
-			goto err_free;
+			goto err_create_flow;
+		if (is_bonded) {
+			/* Application always sees one port so the mirror rule
+			 * must be on port #2
+			 */
+			flow_attr->port = 2;
+			err = __mlx4_ib_create_flow(qp, flow_attr,
+						    domain, type[j],
+						    &mflow->reg_id[j].mirror);
+			flow_attr->port = 1;
+			if (err)
+				goto err_create_flow;
+			j++;
+		}
+
+		i++;
+	}
+
+	if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+		err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+					       &mflow->reg_id[i].id);
+		if (err)
+			goto err_create_flow;
+
+		if (is_bonded) {
+			flow_attr->port = 2;
+			err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
+						       &mflow->reg_id[j].mirror);
+			flow_attr->port = 1;
+			if (err)
+				goto err_create_flow;
+			j++;
+		}
+		/* function to create mirror rule */
 		i++;
 	}
 
 	return &mflow->ibflow;
 
+err_create_flow:
+	while (i) {
+		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+					     mflow->reg_id[i].id);
+		i--;
+	}
+
+	while (j) {
+		(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
+					     mflow->reg_id[j].mirror);
+		j--;
+	}
 err_free:
 	kfree(mflow);
 	return ERR_PTR(err);
@@ -1241,10 +1914,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
 	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
 	struct mlx4_ib_flow *mflow = to_mflow(flow_id);
 
-	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
-		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
+	while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
+		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
 		if (err)
 			ret = err;
+		if (mflow->reg_id[i].mirror) {
+			err = __mlx4_ib_destroy_flow(mdev->dev,
+						     mflow->reg_id[i].mirror);
+			if (err)
+				ret = err;
+		}
 		i++;
 	}
 
@@ -1252,6 +1931,68 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
 	return ret;
 }
 
+static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+	int err;
+	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+	struct mlx4_dev	*dev = mdev->dev;
+	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+	struct mlx4_ib_steering *ib_steering = NULL;
+	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
+	struct mlx4_flow_reg_id	reg_id;
+
+	if (mdev->dev->caps.steering_mode ==
+	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
+		ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
+		if (!ib_steering)
+			return -ENOMEM;
+	}
+
+	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
+				    !!(mqp->flags &
+				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+				    prot, &reg_id.id);
+	if (err) {
+		pr_err("multicast attach op failed, err %d\n", err);
+		goto err_malloc;
+	}
+
+	reg_id.mirror = 0;
+	if (mlx4_is_bonded(dev)) {
+		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
+					    (mqp->port == 1) ? 2 : 1,
+					    !!(mqp->flags &
+					    MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+					    prot, &reg_id.mirror);
+		if (err)
+			goto err_add;
+	}
+
+	err = add_gid_entry(ibqp, gid);
+	if (err)
+		goto err_add;
+
+	if (ib_steering) {
+		memcpy(ib_steering->gid.raw, gid->raw, 16);
+		ib_steering->reg_id = reg_id;
+		mutex_lock(&mqp->mutex);
+		list_add(&ib_steering->list, &mqp->steering_rules);
+		mutex_unlock(&mqp->mutex);
+	}
+	return 0;
+
+err_add:
+	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+			      prot, reg_id.id);
+	if (reg_id.mirror)
+		mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+				      prot, reg_id.mirror);
+err_malloc:
+	kfree(ib_steering);
+
+	return err;
+}
+
 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
 {
 	struct mlx4_ib_gid_entry *ge;
@@ -1268,204 +2009,67 @@ static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
 	return ret;
 }
 
-
-static int del_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
+static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 {
+	int err;
 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+	struct mlx4_dev *dev = mdev->dev;
 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
-	struct mlx4_ib_gid_entry *ge;
 	struct net_device *ndev;
-	u8 mac[6];
+	struct mlx4_ib_gid_entry *ge;
+	struct mlx4_flow_reg_id reg_id = {0, 0};
+	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
+
+	if (mdev->dev->caps.steering_mode ==
+	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
+		struct mlx4_ib_steering *ib_steering;
+
+		mutex_lock(&mqp->mutex);
+		list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
+			if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
+				list_del(&ib_steering->list);
+				break;
+			}
+		}
+		mutex_unlock(&mqp->mutex);
+		if (&ib_steering->list == &mqp->steering_rules) {
+			pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
+			return -EINVAL;
+		}
+		reg_id = ib_steering->reg_id;
+		kfree(ib_steering);
+	}
+
+	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+				    prot, reg_id.id);
+	if (err)
+		return err;
+
+	if (mlx4_is_bonded(dev)) {
+		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+					    prot, reg_id.mirror);
+		if (err)
+			return err;
+	}
 
 	mutex_lock(&mqp->mutex);
 	ge = find_gid_entry(mqp, gid->raw);
 	if (ge) {
-		spin_lock(&mdev->iboe.lock);
+		spin_lock_bh(&mdev->iboe.lock);
 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
 		if (ndev)
 			dev_hold(ndev);
-		spin_unlock(&mdev->iboe.lock);
-		rdma_get_mcast_mac((struct in6_addr *)gid, mac);
-		if (ndev) {
-			rtnl_lock();
-			dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
-			rtnl_unlock();
+		spin_unlock_bh(&mdev->iboe.lock);
+		if (ndev)
 			dev_put(ndev);
-		}
 		list_del(&ge->list);
 		kfree(ge);
 	} else
 		pr_warn("could not find mgid entry\n");
 
 	mutex_unlock(&mqp->mutex);
-	return ge != NULL ? 0 : -EINVAL;
-}
-
-static int _mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid,
-			       int count)
-{
-	int err;
-	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
-	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
-	u64 reg_id = 0;
-	int record_err = 0;
-
-	if (mdev->dev->caps.steering_mode ==
-	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
-		struct mlx4_ib_steering *ib_steering;
-		struct mlx4_ib_steering *tmp;
-		LIST_HEAD(temp);
-
-		mutex_lock(&mqp->mutex);
-		list_for_each_entry_safe(ib_steering, tmp, &mqp->steering_rules,
-					 list) {
-			if (memcmp(ib_steering->gid.raw, gid->raw, 16))
-				continue;
-
-			if (--count < 0)
-				break;
-
-			list_del(&ib_steering->list);
-			list_add(&ib_steering->list, &temp);
-		}
-		mutex_unlock(&mqp->mutex);
-		list_for_each_entry_safe(ib_steering, tmp, &temp,
-					 list) {
-			reg_id = ib_steering->reg_id;
-
-			err = mlx4_multicast_detach(mdev->dev, &mqp->mqp,
-					gid->raw,
-					(ibqp->qp_type == IB_QPT_RAW_PACKET) ?
-					MLX4_PROT_ETH : MLX4_PROT_IB_IPV6,
-					reg_id);
-			if (err) {
-				record_err = record_err ?: err;
-				continue;
-			}
-
-			err = del_gid_entry(ibqp, gid);
-			if (err) {
-				record_err = record_err ?: err;
-				continue;
-			}
-
-			list_del(&ib_steering->list);
-			kfree(ib_steering);
-		}
-		mutex_lock(&mqp->mutex);
-		list_for_each_entry(ib_steering, &temp, list) {
-			list_add(&ib_steering->list, &mqp->steering_rules);
-		}
-		mutex_unlock(&mqp->mutex);
-		if (count) {
-			pr_warn("Couldn't release all reg_ids for mgid. Steering rule is left attached\n");
-			return -EINVAL;
-		}
-
-	} else {
-		if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_B0 &&
-		    ibqp->qp_type == IB_QPT_RAW_PACKET)
-			gid->raw[5] = mqp->port;
-
-		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
-				(ibqp->qp_type == IB_QPT_RAW_PACKET) ?
-				MLX4_PROT_ETH : MLX4_PROT_IB_IPV6,
-				reg_id);
-		if (err)
-			return err;
-
-		err = del_gid_entry(ibqp, gid);
-
-		if (err)
-			return err;
-	}
-
-	return record_err;
-}
-
-static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
-	int count = (mdev->dev->caps.steering_mode ==
-		     MLX4_STEERING_MODE_DEVICE_MANAGED) ?
-		    mdev->dev->caps.num_ports : 1;
-
-	return _mlx4_ib_mcg_detach(ibqp, gid, lid, count);
-}
-
-static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-	int err = -ENODEV;
-	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
-	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
-	DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
-	int i = 0;
-
-	if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_B0 &&
-	    ibqp->qp_type == IB_QPT_RAW_PACKET)
-		gid->raw[5] = mqp->port;
-
-	if (mdev->dev->caps.steering_mode ==
-	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
-		bitmap_fill(ports, mdev->dev->caps.num_ports);
-	} else {
-		if (mqp->port <= mdev->dev->caps.num_ports) {
-			bitmap_zero(ports, mdev->dev->caps.num_ports);
-			set_bit(0, ports);
-		} else {
-			return -EINVAL;
-		}
-	}
-
-	for (; i < mdev->dev->caps.num_ports; i++) {
-		u64 reg_id;
-		struct mlx4_ib_steering *ib_steering = NULL;
-		if (!test_bit(i, ports))
-			continue;
-		if (mdev->dev->caps.steering_mode ==
-		    MLX4_STEERING_MODE_DEVICE_MANAGED) {
-			ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
-			if (!ib_steering)
-				goto err_add;
-		}
-
-		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp,
-			gid->raw, i + 1,
-			!!(mqp->flags &
-				MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
-			(ibqp->qp_type == IB_QPT_RAW_PACKET) ?
-			MLX4_PROT_ETH : MLX4_PROT_IB_IPV6,
-			&reg_id);
-		if (err) {
-			kfree(ib_steering);
-			goto err_add;
-		}
-
-		err = add_gid_entry(ibqp, gid);
-		if (err) {
-			mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
-					      MLX4_PROT_IB_IPV6, reg_id);
-			kfree(ib_steering);
-			goto err_add;
-		}
-
-		if (ib_steering) {
-			memcpy(ib_steering->gid.raw, gid->raw, 16);
-			mutex_lock(&mqp->mutex);
-			list_add(&ib_steering->list, &mqp->steering_rules);
-			mutex_unlock(&mqp->mutex);
-			ib_steering->reg_id = reg_id;
-		}
-	}
-
 
 	return 0;
-
-err_add:
-	if (i > 0)
-		_mlx4_ib_mcg_detach(ibqp, gid, lid, i);
-
-	return err;
 }
 
 static int init_node_data(struct mlx4_ib_dev *dev)
@@ -1489,7 +2093,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
 	if (err)
 		goto out;
 
-	memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+	memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
 
 	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 
@@ -1511,17 +2115,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
 {
 	struct mlx4_ib_dev *dev =
 		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
-	return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
-}
-
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
-			   char *buf)
-{
-	struct mlx4_ib_dev *dev =
-		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
-	return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
-		       (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
-		       (int) dev->dev->caps.fw_ver & 0xffff);
+	return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
 }
 
 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -1541,409 +2135,308 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
 		       dev->dev->board_id);
 }
 
-static ssize_t show_vsd(struct device *device, struct device_attribute *attr,
-			  char *buf)
-{
-	struct mlx4_ib_dev *dev =
-		container_of(device, struct mlx4_ib_dev, ib_dev.dev);
-	ssize_t len = MLX4_VSD_LEN;
-
-	if (dev->dev->vsd_vendor_id == PCI_VENDOR_ID_MELLANOX)
-		len = sprintf(buf, "%.*s\n", MLX4_VSD_LEN, dev->dev->vsd);
-	else
-		memcpy(buf, dev->dev->vsd, MLX4_VSD_LEN);
-
-	return len;
-}
-
 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
-static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
-static DEVICE_ATTR(vsd,      S_IRUGO, show_vsd,    NULL);
 
 static struct device_attribute *mlx4_class_attributes[] = {
 	&dev_attr_hw_rev,
-	&dev_attr_fw_ver,
 	&dev_attr_hca_type,
-	&dev_attr_board_id,
-	&dev_attr_vsd
+	&dev_attr_board_id
 };
 
-static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev, u8 port)
+struct diag_counter {
+	const char *name;
+	u32 offset;
+};
+
+#define DIAG_COUNTER(_name, _offset)			\
+	{ .name = #_name, .offset = _offset }
+
+static const struct diag_counter diag_basic[] = {
+	DIAG_COUNTER(rq_num_lle, 0x00),
+	DIAG_COUNTER(sq_num_lle, 0x04),
+	DIAG_COUNTER(rq_num_lqpoe, 0x08),
+	DIAG_COUNTER(sq_num_lqpoe, 0x0C),
+	DIAG_COUNTER(rq_num_lpe, 0x18),
+	DIAG_COUNTER(sq_num_lpe, 0x1C),
+	DIAG_COUNTER(rq_num_wrfe, 0x20),
+	DIAG_COUNTER(sq_num_wrfe, 0x24),
+	DIAG_COUNTER(sq_num_mwbe, 0x2C),
+	DIAG_COUNTER(sq_num_bre, 0x34),
+	DIAG_COUNTER(sq_num_rire, 0x44),
+	DIAG_COUNTER(rq_num_rire, 0x48),
+	DIAG_COUNTER(sq_num_rae, 0x4C),
+	DIAG_COUNTER(rq_num_rae, 0x50),
+	DIAG_COUNTER(sq_num_roe, 0x54),
+	DIAG_COUNTER(sq_num_tree, 0x5C),
+	DIAG_COUNTER(sq_num_rree, 0x64),
+	DIAG_COUNTER(rq_num_rnr, 0x68),
+	DIAG_COUNTER(sq_num_rnr, 0x6C),
+	DIAG_COUNTER(rq_num_oos, 0x100),
+	DIAG_COUNTER(sq_num_oos, 0x104),
+};
+
+static const struct diag_counter diag_ext[] = {
+	DIAG_COUNTER(rq_num_dup, 0x130),
+	DIAG_COUNTER(sq_num_to, 0x134),
+};
+
+static const struct diag_counter diag_device_only[] = {
+	DIAG_COUNTER(num_cqovf, 0x1A0),
+	DIAG_COUNTER(rq_num_udsdprd, 0x118),
+};
+
+static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
+						    u8 port_num)
 {
-        memcpy(eui, IF_LLADDR(dev), 3);
-        memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
-	if (vlan_id < 0x1000) {
-		eui[3] = vlan_id >> 8;
-		eui[4] = vlan_id & 0xff;
-	} else {
-		eui[3] = 0xff;
-		eui[4] = 0xfe;
-	}
-	eui[0] ^= 2;
+	struct mlx4_ib_dev *dev = to_mdev(ibdev);
+	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+
+	if (!diag[!!port_num].name)
+		return NULL;
+
+	return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
+					  diag[!!port_num].num_counters,
+					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
 }
 
-static void update_gids_task(struct work_struct *work)
+static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
+				struct rdma_hw_stats *stats,
+				u8 port, int index)
 {
-	struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
-	struct mlx4_cmd_mailbox *mailbox;
-	union ib_gid *gids;
-	int err;
-	struct mlx4_dev	*dev = gw->dev->dev;
-
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox)) {
-		pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
-		goto free;
-	}
-
-	gids = mailbox->buf;
-	memcpy(gids, gw->gids, sizeof gw->gids);
-
-	if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
-					IB_LINK_LAYER_ETHERNET) {
-		err = mlx4_cmd(dev, mailbox->dma,
-			       MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-			       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-			       MLX4_CMD_WRAPPED);
-
-		if (err)
-			pr_warn("set port command failed\n");
-		else
-			mlx4_ib_dispatch_event(gw->dev, gw->port,
-					       IB_EVENT_GID_CHANGE);
-	}
-
-	mlx4_free_cmd_mailbox(dev, mailbox);
-free:
-	kfree(gw);
-}
-
-static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
-{
-	struct mlx4_ib_dev *ibdev = to_mdev(device);
-	return mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
-}
-
-static void reset_gids_task(struct work_struct *work)
-{
-	struct update_gid_work *gw =
-			container_of(work, struct update_gid_work, work);
-	struct mlx4_cmd_mailbox *mailbox;
-	union ib_gid *gids;
-	int err;
-	struct mlx4_dev	*dev = gw->dev->dev;
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox)) {
-		pr_warn("reset gid table failed\n");
-		goto free;
-	}
-
-	gids = mailbox->buf;
-	memcpy(gids, gw->gids, sizeof(gw->gids));
-
-	if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, 1) ==
-					IB_LINK_LAYER_ETHERNET &&
-					dev->caps.num_ports > 0) {
-		err = mlx4_cmd(dev, mailbox->dma,
-			       MLX4_SET_PORT_GID_TABLE << 8 | 1,
-			       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-			       MLX4_CMD_WRAPPED);
-		if (err)
-			pr_warn("set port 1 command failed\n");
-	}
-
-	if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, 2) ==
-					IB_LINK_LAYER_ETHERNET &&
-					dev->caps.num_ports > 1) {
-		err = mlx4_cmd(dev, mailbox->dma,
-			       MLX4_SET_PORT_GID_TABLE << 8 | 2,
-			       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-			       MLX4_CMD_WRAPPED);
-		if (err)
-			pr_warn("set port 2 command failed\n");
-	}
-
-	mlx4_free_cmd_mailbox(dev, mailbox);
-free:
-	kfree(gw);
-}
-
-static int update_gid_table(struct mlx4_ib_dev *dev, int port,
-		union ib_gid *gid, int clear, int default_gid)
-{
-	struct update_gid_work *work;
+	struct mlx4_ib_dev *dev = to_mdev(ibdev);
+	struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+	u32 hw_value[ARRAY_SIZE(diag_device_only) +
+		ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
+	int ret;
 	int i;
-	int need_update = 0;
-	int free = -1;
-	int found = -1;
-	int max_gids;
-	int start_index = !default_gid;
 
-	max_gids = dev->dev->caps.gid_table_len[port];
-	for (i = start_index; i < max_gids; ++i) {
-		if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
-		    sizeof(*gid)))
-			found = i;
+	ret = mlx4_query_diag_counters(dev->dev,
+				       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
+				       diag[!!port].offset, hw_value,
+				       diag[!!port].num_counters, port);
 
-		if (clear) {
-			if (found >= 0) {
-				need_update = 1;
-				dev->iboe.gid_table[port - 1][found] = zgid;
-				break;
-			}
-		} else {
-			if (found >= 0)
-				break;
+	if (ret)
+		return ret;
 
-			if (free < 0 &&
-			    !memcmp(&dev->iboe.gid_table[port - 1][i],
-				    &zgid, sizeof(*gid)))
-				free = i;
+	for (i = 0; i < diag[!!port].num_counters; i++)
+		stats->value[i] = hw_value[i];
+
+	return diag[!!port].num_counters;
+}
+
+static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
+					 const char ***name,
+					 u32 **offset,
+					 u32 *num,
+					 bool port)
+{
+	u32 num_counters;
+
+	num_counters = ARRAY_SIZE(diag_basic);
+
+	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
+		num_counters += ARRAY_SIZE(diag_ext);
+
+	if (!port)
+		num_counters += ARRAY_SIZE(diag_device_only);
+
+	*name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
+	if (!*name)
+		return -ENOMEM;
+
+	*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
+	if (!*offset)
+		goto err_name;
+
+	*num = num_counters;
+
+	return 0;
+
+err_name:
+	kfree(*name);
+	return -ENOMEM;
+}
+
+static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
+				       const char **name,
+				       u32 *offset,
+				       bool port)
+{
+	int i;
+	int j;
+
+	for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
+		name[i] = diag_basic[i].name;
+		offset[i] = diag_basic[i].offset;
+	}
+
+	if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
+		for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
+			name[j] = diag_ext[i].name;
+			offset[j] = diag_ext[i].offset;
 		}
 	}
 
-	if (found == -1 && !clear && free < 0) {
-		pr_err("GID table of port %d is full. Can't add "GID_PRINT_FMT"\n",
-		       port, GID_PRINT_ARGS(gid));
-		return -ENOMEM;
-	}
-	if (found == -1 && clear) {
-		pr_err(GID_PRINT_FMT" is not in GID table of port %d\n", GID_PRINT_ARGS(gid), port);
-		return -EINVAL;
-	}
-	if (found == -1 && !clear && free >= 0) {
-		dev->iboe.gid_table[port - 1][free] = *gid;
-		need_update = 1;
+	if (!port) {
+		for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
+			name[j] = diag_device_only[i].name;
+			offset[j] = diag_device_only[i].offset;
+		}
 	}
+}
 
-	if (!need_update)
+static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
+{
+	struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
+	int i;
+	int ret;
+	bool per_port = !!(ibdev->dev->caps.flags2 &
+		MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+
+	if (mlx4_is_slave(ibdev->dev))
 		return 0;
 
-	work = kzalloc(sizeof *work, GFP_ATOMIC);
-	if (!work)
-		return -ENOMEM;
+	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+		/* i == 1 means we are building port counters */
+		if (i && !per_port)
+			continue;
 
-	memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
-	INIT_WORK(&work->work, update_gids_task);
-	work->port = port;
-	work->dev = dev;
-	queue_work(wq, &work->work);
+		ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+						    &diag[i].offset,
+						    &diag[i].num_counters, i);
+		if (ret)
+			goto err_alloc;
 
-	return 0;
-}
-
-static int reset_gid_table(struct mlx4_ib_dev *dev)
-{
-	struct update_gid_work *work;
-
-
-	work = kzalloc(sizeof(*work), GFP_ATOMIC);
-	if (!work)
-		return -ENOMEM;
-
-	memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
-	memset(work->gids, 0, sizeof(work->gids));
-	INIT_WORK(&work->work, reset_gids_task);
-	work->dev = dev;
-	queue_work(wq, &work->work);
-	return 0;
-}
-
-/* XXX BOND Related - stub (no support for these flags in FBSD)*/
-static inline int netif_is_bond_master(struct net_device *dev)
-{
-#if 0
-	return (dev->flags & IFF_MASTER) && (dev->priv_flags & IFF_BONDING);
-#endif
-        return 0;
-}
-
-static void mlx4_make_default_gid(struct  net_device *dev, union ib_gid *gid, u8 port)
-{
-	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
-	mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev, port);
-}
-
-static u8 mlx4_ib_get_dev_port(struct net_device *dev, struct mlx4_ib_dev *ibdev)
-{
-	u8 port = 0;
-	struct mlx4_ib_iboe *iboe;
-	struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
-				rdma_vlan_dev_real_dev(dev) : dev;
-
-	iboe = &ibdev->iboe;
-
-	for (port = 1; port <= MLX4_MAX_PORTS; ++port)
-		if ((netif_is_bond_master(real_dev) && (real_dev == iboe->masters[port - 1])) ||
-		    (!netif_is_bond_master(real_dev) && (real_dev == iboe->netdevs[port - 1])))
-			break;
-
-	return port > MLX4_MAX_PORTS ? 0 : port;
-}
-
-static void mlx4_ib_get_dev_addr(struct net_device *dev, struct mlx4_ib_dev *ibdev, u8 port)
-{
-        struct ifaddr *ifa;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-	struct inet6_dev *in6_dev;
-	union ib_gid  *pgid;
-	struct inet6_ifaddr *ifp;
-#endif
-	union ib_gid gid;
-
-
-	if ((port == 0) || (port > MLX4_MAX_PORTS))
-		return;
-
-	/* IPv4 gids */
-        TAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) {
-                if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET){
-                        ipv6_addr_set_v4mapped(
-				((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr,
-				(struct in6_addr *)&gid);
-                        update_gid_table(ibdev, port, &gid, 0, 0);
-                }
-
-        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-	/* IPv6 gids */
-	in6_dev = in6_dev_get(dev);
-	if (in6_dev) {
-		read_lock_bh(&in6_dev->lock);
-		list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
-			pgid = (union ib_gid *)&ifp->addr;
-				update_gid_table(ibdev, port, pgid, 0, 0);
-			}
-		read_unlock_bh(&in6_dev->lock);
-		in6_dev_put(in6_dev);
-		}
-#endif
-}
-
-static void mlx4_set_default_gid(struct mlx4_ib_dev *ibdev,
-				 struct  net_device *dev, u8 port)
-{
-	union ib_gid gid;
-	mlx4_make_default_gid(dev, &gid, port);
-	update_gid_table(ibdev, port, &gid, 0, 1);
-}
-
-static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
-{
-	struct	net_device *dev;
-
-	if (reset_gid_table(ibdev))
-		return -1;
-
-        IFNET_RLOCK_NOSLEEP();
-        TAILQ_FOREACH(dev, &V_ifnet, if_link) {
-		u8 port = mlx4_ib_get_dev_port(dev, ibdev);
-		if (port) {
-			if (!rdma_vlan_dev_real_dev(dev) &&
-			    !netif_is_bond_master(dev))
-				mlx4_set_default_gid(ibdev, dev, port);
-			mlx4_ib_get_dev_addr(dev, ibdev, port);
-		}
+		mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+					   diag[i].offset, i);
 	}
 
-        IFNET_RUNLOCK_NOSLEEP();
+	ibdev->ib_dev.get_hw_stats	= mlx4_ib_get_hw_stats;
+	ibdev->ib_dev.alloc_hw_stats	= mlx4_ib_alloc_hw_stats;
 
 	return 0;
+
+err_alloc:
+	if (i) {
+		kfree(diag[i - 1].name);
+		kfree(diag[i - 1].offset);
+	}
+
+	return ret;
+}
+
+static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
+{
+	int i;
+
+	for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+		kfree(ibdev->diag_counters[i].offset);
+		kfree(ibdev->diag_counters[i].name);
+	}
+}
+
+#define MLX4_IB_INVALID_MAC	((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+			       struct net_device *dev,
+			       int port)
+{
+	u64 new_smac = 0;
+	u64 release_mac = MLX4_IB_INVALID_MAC;
+	struct mlx4_ib_qp *qp;
+
+	new_smac = mlx4_mac_to_u64(IF_LLADDR(dev));
+
+	atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
+
+	/* no need for update QP1 and mac registration in non-SRIOV */
+	if (!mlx4_is_mfunc(ibdev->dev))
+		return;
+
+	mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+	qp = ibdev->qp1_proxy[port - 1];
+	if (qp) {
+		int new_smac_index;
+		u64 old_smac;
+		struct mlx4_update_qp_params update_params;
+
+		mutex_lock(&qp->mutex);
+		old_smac = qp->pri.smac;
+		if (new_smac == old_smac)
+			goto unlock;
+
+		new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+		if (new_smac_index < 0)
+			goto unlock;
+
+		update_params.smac_index = new_smac_index;
+		if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
+				   &update_params)) {
+			release_mac = new_smac;
+			goto unlock;
+		}
+		/* if old port was zero, no mac was yet registered for this QP */
+		if (qp->pri.smac_port)
+			release_mac = old_smac;
+		qp->pri.smac = new_smac;
+		qp->pri.smac_port = port;
+		qp->pri.smac_index = new_smac_index;
+	}
+
+unlock:
+	if (release_mac != MLX4_IB_INVALID_MAC)
+		mlx4_unregister_mac(ibdev->dev, port, release_mac);
+	if (qp)
+		mutex_unlock(&qp->mutex);
+	mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
 }
 
 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
-				 struct net_device *dev, unsigned long event)
+				 struct net_device *dev,
+				 unsigned long event)
+
 {
 	struct mlx4_ib_iboe *iboe;
+	int update_qps_port = -1;
 	int port;
-	int init = 0;
-	unsigned long flags;
 
 	iboe = &ibdev->iboe;
 
-	spin_lock_irqsave(&iboe->lock, flags);
+	spin_lock_bh(&iboe->lock);
 	mlx4_foreach_ib_transport_port(port, ibdev->dev) {
-		struct net_device *old_netdev = iboe->netdevs[port - 1];
-/* XXX BOND related */
-#if 0
-		struct net_device *old_master = iboe->masters[port - 1];
-#endif
-		iboe->masters[port - 1] = NULL;
+
 		iboe->netdevs[port - 1] =
 			mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
 
-
-		if (old_netdev != iboe->netdevs[port - 1])
-			init = 1;
 		if (dev == iboe->netdevs[port - 1] &&
-		    event == NETDEV_CHANGEADDR)
-			init = 1;
-/* XXX BOND related */
-#if 0
-                if (iboe->netdevs[port - 1] && netif_is_bond_slave(iboe->netdevs[port - 1]))
-			iboe->masters[port - 1] = iboe->netdevs[port - 1]->master;
+		    (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+		     event == NETDEV_UP || event == NETDEV_CHANGE))
+			update_qps_port = port;
 
-		/* if bonding is used it is possible that we add it to masters only after
-		   IP address is assigned to the net bonding interface */
-		if (old_master != iboe->masters[port - 1])
-			init = 1;
-#endif
 	}
+	spin_unlock_bh(&iboe->lock);
 
-	spin_unlock_irqrestore(&iboe->lock, flags);
-
-	if (init)
-		if (mlx4_ib_init_gid_table(ibdev))
-			pr_warn("Fail to reset gid table\n");
+	if (update_qps_port > 0)
+		mlx4_ib_update_qps(ibdev, dev, update_qps_port);
 }
 
-static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
-				void *ptr)
+static int mlx4_ib_netdev_event(struct notifier_block *this,
+				unsigned long event, void *ptr)
 {
-	struct net_device *dev = ptr;
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct mlx4_ib_dev *ibdev;
 
-	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
+	if (!net_eq(dev_net(dev), &init_net))
+		return NOTIFY_DONE;
 
+	ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
 	mlx4_ib_scan_netdevs(ibdev, dev, event);
 
 	return NOTIFY_DONE;
 }
 
-/* This function initializes the gid table only if the event_netdev real device is an iboe
- * device, will be invoked by the inet/inet6 events */
-static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
-                                void *ptr)
-{
-        struct net_device *event_netdev = ptr;
-        struct mlx4_ib_dev *ibdev;
-        struct mlx4_ib_iboe *ibdev_iboe;
-        int port = 0;
-
-        ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
-
-        struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
-                        rdma_vlan_dev_real_dev(event_netdev) :
-                        event_netdev;
-
-        ibdev_iboe = &ibdev->iboe;
-
-        port = mlx4_ib_get_dev_port(real_dev, ibdev);
-
-        /* Perform init_gid_table if the event real_dev is the net_device which represents this port,
-         * otherwise this event is not related and would be ignored.*/
-        if(port && (real_dev == ibdev_iboe->netdevs[port - 1]))
-                if (mlx4_ib_init_gid_table(ibdev))
-                        pr_warn("Fail to reset gid table\n");
-
-        return NOTIFY_DONE;
-}
-
-
 static void init_pkeys(struct mlx4_ib_dev *ibdev)
 {
 	int port;
@@ -1951,7 +2444,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
 	int i;
 
 	if (mlx4_is_master(ibdev->dev)) {
-		for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
+		for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
+		     ++slave) {
 			for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
 				for (i = 0;
 				     i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
@@ -1978,255 +2472,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
 
 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
-	char name[32];
-	int eq_per_port = 0;
-	int added_eqs = 0;
-	int total_eqs = 0;
-	int i, j, eq;
+	int i, j, eq = 0, total_eqs = 0;
 
-	/* Legacy mode or comp_pool is not large enough */
-	if (dev->caps.comp_pool == 0 ||
-	    dev->caps.num_ports > dev->caps.comp_pool)
-		return;
-
-	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
-					dev->caps.num_ports);
-
-	/* Init eq table */
-	added_eqs = 0;
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-		added_eqs += eq_per_port;
-
-	total_eqs = dev->caps.num_comp_vectors + added_eqs;
-
-	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+	ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
+				  sizeof(ibdev->eq_table[0]), GFP_KERNEL);
 	if (!ibdev->eq_table)
 		return;
 
-	ibdev->eq_added = added_eqs;
-
-	eq = 0;
-	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
-		for (j = 0; j < eq_per_port; j++) {
-			sprintf(name, "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
-						pci_get_domain(dev->pdev->dev.bsddev),
-						pci_get_bus(dev->pdev->dev.bsddev),
-						PCI_SLOT(dev->pdev->devfn),
-						PCI_FUNC(dev->pdev->devfn));
-
-			/* Set IRQ for specific name (per ring) */
-			if (mlx4_assign_eq(dev, name,
-					   &ibdev->eq_table[eq])) {
-				/* Use legacy (same as mlx4_en driver) */
-				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
-				ibdev->eq_table[eq] =
-					(eq % dev->caps.num_comp_vectors);
-			}
-			eq++;
+	for (i = 1; i <= dev->caps.num_ports; i++) {
+		for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
+		     j++, total_eqs++) {
+			if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
+				continue;
+			ibdev->eq_table[eq] = total_eqs;
+			if (!mlx4_assign_eq(dev, i,
+					    &ibdev->eq_table[eq]))
+				eq++;
+			else
+				ibdev->eq_table[eq] = -1;
 		}
 	}
 
-	/* Fill the reset of the vector with legacy EQ */
-	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
-		ibdev->eq_table[eq++] = i;
+	for (i = eq; i < dev->caps.num_comp_vectors;
+	     ibdev->eq_table[i++] = -1)
+		;
 
 	/* Advertise the new number of EQs to clients */
-	ibdev->ib_dev.num_comp_vectors = total_eqs;
+	ibdev->ib_dev.num_comp_vectors = eq;
 }
 
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
 	int i;
+	int total_eqs = ibdev->ib_dev.num_comp_vectors;
 
-	/* no additional eqs were added */
+	/* no eqs were allocated */
 	if (!ibdev->eq_table)
 		return;
 
 	/* Reset the advertised EQ number */
-	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+	ibdev->ib_dev.num_comp_vectors = 0;
 
-	/* Free only the added eqs */
-	for (i = 0; i < ibdev->eq_added; i++) {
-		/* Don't free legacy eqs if used */
-		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
-			continue;
+	for (i = 0; i < total_eqs; i++)
 		mlx4_release_eq(dev, ibdev->eq_table[i]);
-	}
 
 	kfree(ibdev->eq_table);
-}
-
-/*
- * create show function and a device_attribute struct pointing to
- * the function for _name
- */
-#define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod)		\
-static ssize_t show_rprt_##_name(struct device *dev,		\
-				 struct device_attribute *attr,	\
-				 char *buf){			\
-	return show_diag_rprt(dev, buf, _offset, _op_mod);	\
-}								\
-static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
-
-#define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
-
-static size_t show_diag_rprt(struct device *device, char *buf,
-			     u32 offset, u8 op_modifier)
-{
-	size_t ret;
-	u32 counter_offset = offset;
-	u32 diag_counter = 0;
-	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
-					       ib_dev.dev);
-
-	ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
-				       &counter_offset, &diag_counter);
-	if (ret)
-		return ret;
-
-	return sprintf(buf, "%d\n", diag_counter);
-}
-
-static ssize_t clear_diag_counters(struct device *device,
-				   struct device_attribute *attr,
-				   const char *buf, size_t length)
-{
-	size_t ret;
-	struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
-					       ib_dev.dev);
-
-	ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
-				       NULL, NULL);
-	if (ret)
-		return ret;
-
-	return length;
-}
-
-DEVICE_DIAG_RPRT_ATTR(rq_num_lle	, 0x00, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_lle	, 0x04, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe	, 0x08, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe 	, 0x0C, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_lpe	, 0x18, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_lpe	, 0x1C, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe	, 0x20, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe	, 0x24, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe	, 0x2C, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_bre	, 0x34, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_lae	, 0x38, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_rire	, 0x44, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_rire	, 0x48, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_rae	, 0x4C, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_rae	, 0x50, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_roe	, 0x54, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_tree	, 0x5C, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_rree	, 0x64, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_rnr	, 0x68, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_rnr	, 0x6C, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_oos	, 0x100, 2);
-DEVICE_DIAG_RPRT_ATTR(sq_num_oos	, 0x104, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_mce	, 0x108, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd	, 0x118, 2);
-DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd	, 0x120, 2);
-DEVICE_DIAG_RPRT_ATTR(num_cqovf		, 0x1A0, 2);
-DEVICE_DIAG_RPRT_ATTR(num_eqovf		, 0x1A4, 2);
-DEVICE_DIAG_RPRT_ATTR(num_baddb		, 0x1A8, 2);
-
-static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
-
-static struct attribute *diag_rprt_attrs[] = {
-	&dev_attr_rq_num_lle.attr,
-	&dev_attr_sq_num_lle.attr,
-	&dev_attr_rq_num_lqpoe.attr,
-	&dev_attr_sq_num_lqpoe.attr,
-	&dev_attr_rq_num_lpe.attr,
-	&dev_attr_sq_num_lpe.attr,
-	&dev_attr_rq_num_wrfe.attr,
-	&dev_attr_sq_num_wrfe.attr,
-	&dev_attr_sq_num_mwbe.attr,
-	&dev_attr_sq_num_bre.attr,
-	&dev_attr_rq_num_lae.attr,
-	&dev_attr_sq_num_rire.attr,
-	&dev_attr_rq_num_rire.attr,
-	&dev_attr_sq_num_rae.attr,
-	&dev_attr_rq_num_rae.attr,
-	&dev_attr_sq_num_roe.attr,
-	&dev_attr_sq_num_tree.attr,
-	&dev_attr_sq_num_rree.attr,
-	&dev_attr_rq_num_rnr.attr,
-	&dev_attr_sq_num_rnr.attr,
-	&dev_attr_rq_num_oos.attr,
-	&dev_attr_sq_num_oos.attr,
-	&dev_attr_rq_num_mce.attr,
-	&dev_attr_rq_num_udsdprd.attr,
-	&dev_attr_rq_num_ucsdprd.attr,
-	&dev_attr_num_cqovf.attr,
-	&dev_attr_num_eqovf.attr,
-	&dev_attr_num_baddb.attr,
-	&dev_attr_clear_diag.attr,
-	NULL
-};
-
-static struct attribute_group diag_counters_group = {
-	.name  = "diag_counters",
-	.attrs  = diag_rprt_attrs
-};
-
-static void init_dev_assign(void)
-{
-	int i = 1;
-
-	spin_lock_init(&dev_num_str_lock);
-	if (mlx4_fill_dbdf2val_tbl(&dev_assign_str))
-		return;
-	dev_num_str_bitmap =
-		kmalloc(BITS_TO_LONGS(MAX_NUM_STR_BITMAP) * sizeof(long),
-			GFP_KERNEL);
-	if (!dev_num_str_bitmap) {
-		pr_warn("bitmap alloc failed -- cannot apply dev_assign_str parameter\n");
-		return;
-	}
-	bitmap_zero(dev_num_str_bitmap, MAX_NUM_STR_BITMAP);
-	while ((i < MLX4_DEVS_TBL_SIZE) && (dev_assign_str.tbl[i].dbdf !=
-	       MLX4_ENDOF_TBL)) {
-		if (bitmap_allocate_region(dev_num_str_bitmap,
-					   dev_assign_str.tbl[i].val[0], 0))
-			goto err;
-		i++;
-	}
-	dr_active = 1;
-	return;
-
-err:
-	kfree(dev_num_str_bitmap);
-	dev_num_str_bitmap = NULL;
-	pr_warn("mlx4_ib: The value of 'dev_assign_str' parameter "
-			    "is incorrect. The parameter value is discarded!");
-}
-
-static int mlx4_ib_dev_idx(struct mlx4_dev *dev)
-{
-	int i, val;
-
-	if (!dr_active)
-		return -1;
-	if (!dev)
-		return -1;
-	if (mlx4_get_val(dev_assign_str.tbl, dev->pdev, 0, &val))
-		return -1;
-
-	if (val != DEFAULT_TBL_VAL) {
-		dev->flags |= MLX4_FLAG_DEV_NUM_STR;
-		return val;
-	}
-
-	spin_lock(&dev_num_str_lock);
-	i = bitmap_find_free_region(dev_num_str_bitmap, MAX_NUM_STR_BITMAP, 0);
-	spin_unlock(&dev_num_str_lock);
-	if (i >= 0)
-		return i;
-
-	return -1;
+	ibdev->eq_table = NULL;
 }
 
 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -2236,42 +2527,55 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
 	int err;
 
-	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
-		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-	} else {
-		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
-			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
-		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCEV2)
-			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
-				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
-		immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
-		if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
-		    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
-			immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-	}
-
-	err = ib_query_port(ibdev, port_num, &attr);
+	err = mlx4_ib_query_port(ibdev, port_num, &attr);
 	if (err)
 		return err;
 
 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
 	immutable->gid_tbl_len = attr.gid_tbl_len;
 
+	if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
+		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+	} else {
+		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
+			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
+			immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+				RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+	}
+
+	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
 	return 0;
 }
 
+static void get_fw_ver_str(struct ib_device *device, char *str,
+			   size_t str_len)
+{
+	struct mlx4_ib_dev *dev =
+		container_of(device, struct mlx4_ib_dev, ib_dev);
+	snprintf(str, str_len, "%d.%d.%d",
+		 (int) (dev->dev->caps.fw_ver >> 32),
+		 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
+		 (int) dev->dev->caps.fw_ver & 0xffff);
+}
+
 static void *mlx4_ib_add(struct mlx4_dev *dev)
 {
 	struct mlx4_ib_dev *ibdev;
-	int num_ports = 0;
+	int num_ports;
 	int i, j;
 	int err;
 	struct mlx4_ib_iboe *iboe;
-	int dev_idx;
+	int ib_num_ports = 0;
+	int num_req_counters;
+	int allocated;
+	u32 counter_index;
+	struct counter_index *new_counter_index = NULL;
 
-        pr_info_once("%s", mlx4_ib_version);
+	pr_info_once("%s", mlx4_ib_version);
 
+	num_ports = 0;
 	mlx4_foreach_ib_transport_port(i, dev)
 		num_ports++;
 
@@ -2281,7 +2585,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
 	ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
 	if (!ibdev) {
-		dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
+		dev_err(&dev->persist->pdev->dev,
+			"Device struct alloc failed\n");
 		return NULL;
 	}
 
@@ -2293,29 +2598,27 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 	if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
 		goto err_pd;
 
-	ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
-		PAGE_SIZE);
-
-	if (!ibdev->priv_uar.map)
+	ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
+				 PAGE_SIZE);
+	if (!ibdev->uar_map)
 		goto err_uar;
-
 	MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
 
 	ibdev->dev = dev;
+	ibdev->bond_next_port	= 0;
 
-	dev_idx = mlx4_ib_dev_idx(dev);
-	if (dev_idx >= 0)
-		sprintf(ibdev->ib_dev.name, "mlx4_%d", dev_idx);
-	else
-		strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
-
+	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
 	ibdev->ib_dev.owner		= THIS_MODULE;
 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
 	ibdev->ib_dev.local_dma_lkey	= dev->caps.reserved_lkey;
 	ibdev->num_ports		= num_ports;
-	ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
+	ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
+						1 : ibdev->num_ports;
 	ibdev->ib_dev.num_comp_vectors	= dev->caps.num_comp_vectors;
-	ibdev->ib_dev.dma_device	= &dev->pdev->dev;
+	ibdev->ib_dev.dma_device	= &dev->persist->pdev->dev;
+	ibdev->ib_dev.get_netdev	= mlx4_ib_get_netdev;
+	ibdev->ib_dev.add_gid		= mlx4_ib_add_gid;
+	ibdev->ib_dev.del_gid		= mlx4_ib_del_gid;
 
 	if (dev->caps.userspace_caps)
 		ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@@ -2329,6 +2632,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
+		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
@@ -2357,10 +2661,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 	ibdev->ib_dev.alloc_ucontext	= mlx4_ib_alloc_ucontext;
 	ibdev->ib_dev.dealloc_ucontext	= mlx4_ib_dealloc_ucontext;
 	ibdev->ib_dev.mmap		= mlx4_ib_mmap;
-/* XXX FBSD has no support for get_unmapped_area function */
-#if 0
-	ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
-#endif
 	ibdev->ib_dev.alloc_pd		= mlx4_ib_alloc_pd;
 	ibdev->ib_dev.dealloc_pd	= mlx4_ib_dealloc_pd;
 	ibdev->ib_dev.create_ah		= mlx4_ib_create_ah;
@@ -2385,17 +2685,16 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 	ibdev->ib_dev.req_notify_cq	= mlx4_ib_arm_cq;
 	ibdev->ib_dev.get_dma_mr	= mlx4_ib_get_dma_mr;
 	ibdev->ib_dev.reg_user_mr	= mlx4_ib_reg_user_mr;
+	ibdev->ib_dev.rereg_user_mr	= mlx4_ib_rereg_user_mr;
 	ibdev->ib_dev.dereg_mr		= mlx4_ib_dereg_mr;
-	ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
-	ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
-	ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
+	ibdev->ib_dev.alloc_mr		= mlx4_ib_alloc_mr;
+	ibdev->ib_dev.map_mr_sg		= mlx4_ib_map_mr_sg;
 	ibdev->ib_dev.attach_mcast	= mlx4_ib_mcg_attach;
 	ibdev->ib_dev.detach_mcast	= mlx4_ib_mcg_detach;
 	ibdev->ib_dev.process_mad	= mlx4_ib_process_mad;
 	ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
-	ibdev->ib_dev.get_netdev	= mlx4_ib_get_netdev;
-	ibdev->ib_dev.ioctl		= mlx4_ib_ioctl;
-	ibdev->ib_dev.query_values	= mlx4_ib_query_values;
+	ibdev->ib_dev.get_dev_fw_str    = get_fw_ver_str;
+	ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
 
 	if (!mlx4_is_slave(ibdev->dev)) {
 		ibdev->ib_dev.alloc_fmr		= mlx4_ib_fmr_alloc;
@@ -2404,9 +2703,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;
 	}
 
-	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) {
+	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
+	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
 		ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
-		ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
 		ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
 
 		ibdev->ib_dev.uverbs_cmd_mask |=
@@ -2422,28 +2721,20 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
 	}
 
-	/*
-	 * Set experimental data
-	 */
-	ibdev->ib_dev.uverbs_exp_cmd_mask	=
-		(1ull << IB_USER_VERBS_EXP_CMD_CREATE_QP)	|
-		(1ull << IB_USER_VERBS_EXP_CMD_MODIFY_CQ)	|
-		(1ull << IB_USER_VERBS_EXP_CMD_QUERY_DEVICE)	|
-		(1ull << IB_USER_VERBS_EXP_CMD_CREATE_CQ);
-	ibdev->ib_dev.exp_create_qp	= mlx4_ib_exp_create_qp;
-	ibdev->ib_dev.exp_query_device	= mlx4_ib_exp_query_device;
 	if (check_flow_steering_support(dev)) {
+		ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
+		ibdev->ib_dev.create_flow	= mlx4_ib_create_flow;
+		ibdev->ib_dev.destroy_flow	= mlx4_ib_destroy_flow;
+
 		ibdev->ib_dev.uverbs_ex_cmd_mask	|=
 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
-		ibdev->ib_dev.create_flow	= mlx4_ib_create_flow;
-		ibdev->ib_dev.destroy_flow	= mlx4_ib_destroy_flow;
-	} else {
-		pr_debug("Device managed flow steering is unavailable for this configuration.\n");
 	}
-	/*
-	 * End of experimental data
-	 */
+
+	ibdev->ib_dev.uverbs_ex_cmd_mask |=
+		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
+		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
+		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
 
 	mlx4_ib_alloc_eqs(dev, ibdev);
 
@@ -2451,36 +2742,75 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
 	if (init_node_data(ibdev))
 		goto err_map;
+	mlx4_init_sl2vl_tbl(ibdev);
 
 	for (i = 0; i < ibdev->num_ports; ++i) {
+		mutex_init(&ibdev->counters_table[i].mutex);
+		INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
+	}
+
+	num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
+	for (i = 0; i < num_req_counters; ++i) {
+		mutex_init(&ibdev->qp1_proxy_lock[i]);
+		allocated = 0;
 		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
 						IB_LINK_LAYER_ETHERNET) {
-			if (mlx4_is_slave(dev)) {
-				ibdev->counters[i].status = mlx4_counter_alloc(ibdev->dev,
-									       i + 1,
-									       &ibdev->counters[i].counter_index);
-			} else {/* allocating the PF IB default counter indices reserved in mlx4_init_counters_table */
-				ibdev->counters[i].counter_index = ((i + 1) << 1) - 1;
-				ibdev->counters[i].status = 0;
-			}
-
-			dev_info(&dev->pdev->dev,
-				 "%s: allocated counter index %d for port %d\n",
-				 __func__, ibdev->counters[i].counter_index, i+1);
-		} else {
-			ibdev->counters[i].counter_index = MLX4_SINK_COUNTER_INDEX;
-			ibdev->counters[i].status = -ENOSPC;
+			err = mlx4_counter_alloc(ibdev->dev, &counter_index);
+			/* if failed to allocate a new counter, use default */
+			if (err)
+				counter_index =
+					mlx4_get_default_counter_index(dev,
+								       i + 1);
+			else
+				allocated = 1;
+		} else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
+			counter_index = mlx4_get_default_counter_index(dev,
+								       i + 1);
 		}
+		new_counter_index = kmalloc(sizeof(*new_counter_index),
+					    GFP_KERNEL);
+		if (!new_counter_index) {
+			if (allocated)
+				mlx4_counter_free(ibdev->dev, counter_index);
+			goto err_counter;
+		}
+		new_counter_index->index = counter_index;
+		new_counter_index->allocated = allocated;
+		list_add_tail(&new_counter_index->list,
+			      &ibdev->counters_table[i].counters_list);
+		ibdev->counters_table[i].default_counter = counter_index;
+		pr_info("counter index %d for port %d allocated %d\n",
+			counter_index, i + 1, allocated);
 	}
+	if (mlx4_is_bonded(dev))
+		for (i = 1; i < ibdev->num_ports ; ++i) {
+			new_counter_index =
+					kmalloc(sizeof(struct counter_index),
+						GFP_KERNEL);
+			if (!new_counter_index)
+				goto err_counter;
+			new_counter_index->index = counter_index;
+			new_counter_index->allocated = 0;
+			list_add_tail(&new_counter_index->list,
+				      &ibdev->counters_table[i].counters_list);
+			ibdev->counters_table[i].default_counter =
+								counter_index;
+		}
+
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+		ib_num_ports++;
 
 	spin_lock_init(&ibdev->sm_lock);
 	mutex_init(&ibdev->cap_mask_mutex);
+	INIT_LIST_HEAD(&ibdev->qp_list);
+	spin_lock_init(&ibdev->reset_flow_resource_lock);
 
-	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
-	    !mlx4_is_mfunc(dev)) {
+	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+	    ib_num_ports) {
 		ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
 		err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
-					    MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
+					    MLX4_IB_UC_STEER_QPN_ALIGN,
+					    &ibdev->steer_qpn_base, 0);
 		if (err)
 			goto err_counter;
 
@@ -2489,53 +2819,59 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 				sizeof(long),
 				GFP_KERNEL);
 		if (!ibdev->ib_uc_qpns_bitmap) {
-			dev_err(&dev->pdev->dev, "bit map alloc failed\n");
+			dev_err(&dev->persist->pdev->dev,
+				"bit map alloc failed\n");
 			goto err_steer_qp_release;
 		}
 
 		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
 
-		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
-				ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
+		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+				dev, ibdev->steer_qpn_base,
+				ibdev->steer_qpn_base +
+				ibdev->steer_qpn_count - 1);
 		if (err)
 			goto err_steer_free_bitmap;
 	}
 
-	if (ib_register_device(&ibdev->ib_dev, NULL))
+	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+		atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
+
+	if (mlx4_ib_alloc_diag_counters(ibdev))
 		goto err_steer_free_bitmap;
 
+	if (ib_register_device(&ibdev->ib_dev, NULL))
+		goto err_diag_counters;
+
 	if (mlx4_ib_mad_init(ibdev))
 		goto err_reg;
 
 	if (mlx4_ib_init_sriov(ibdev))
 		goto err_mad;
 
-	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
+	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE ||
+	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
 		if (!iboe->nb.notifier_call) {
 			iboe->nb.notifier_call = mlx4_ib_netdev_event;
 			err = register_netdevice_notifier(&iboe->nb);
 			if (err) {
 				iboe->nb.notifier_call = NULL;
-				goto err_notify;
+				goto err_notif;
 			}
 		}
-		if (!iboe->nb_inet.notifier_call) {
-			iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
-			err = register_inetaddr_notifier(&iboe->nb_inet);
+		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
+			err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
 			if (err) {
-				iboe->nb_inet.notifier_call = NULL;
-				goto err_notify;
+				goto err_notif;
 			}
 		}
-		mlx4_ib_scan_netdevs(ibdev, NULL, 0);
 	}
+
 	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
 		if (device_create_file(&ibdev->ib_dev.dev,
 				       mlx4_class_attributes[j]))
-			goto err_notify;
+			goto err_notif;
 	}
-	if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
-		goto err_notify;
 
 	ibdev->ib_active = true;
 
@@ -2553,22 +2889,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 	}
 	return ibdev;
 
-err_notify:
-	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
-                device_remove_file(&ibdev->ib_dev.dev,
-                        mlx4_class_attributes[j]);
-        }
-
+err_notif:
 	if (ibdev->iboe.nb.notifier_call) {
 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
 			pr_warn("failure unregistering notifier\n");
 		ibdev->iboe.nb.notifier_call = NULL;
 	}
-	if (ibdev->iboe.nb_inet.notifier_call) {
-		if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
-			pr_warn("failure unregistering notifier\n");
-		ibdev->iboe.nb_inet.notifier_call = NULL;
-	}
 	flush_workqueue(wq);
 
 	mlx4_ib_close_sriov(ibdev);
@@ -2579,26 +2905,22 @@ err_mad:
 err_reg:
 	ib_unregister_device(&ibdev->ib_dev);
 
+err_diag_counters:
+	mlx4_ib_diag_cleanup(ibdev);
+
 err_steer_free_bitmap:
 	kfree(ibdev->ib_uc_qpns_bitmap);
 
 err_steer_qp_release:
-	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
+	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
 		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
-				ibdev->steer_qpn_count);
+				      ibdev->steer_qpn_count);
 err_counter:
-	for (; i; --i) {
-		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i) ==
-						IB_LINK_LAYER_ETHERNET) {
-			mlx4_counter_free(ibdev->dev,
-					  i,
-					  ibdev->counters[i - 1].counter_index);
-		}
-	}
+	for (i = 0; i < ibdev->num_ports; ++i)
+		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
 
 err_map:
-	iounmap(ibdev->priv_uar.map);
-	mlx4_ib_free_eqs(dev, ibdev);
+	iounmap(ibdev->uar_map);
 
 err_uar:
 	mlx4_uar_free(dev, &ibdev->priv_uar);
@@ -2631,13 +2953,14 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
 {
 	if (!qpn ||
-	    dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+	    dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
 		return;
 
 	BUG_ON(qpn < dev->steer_qpn_base);
 
 	bitmap_release_region(dev->ib_uc_qpns_bitmap,
-			qpn - dev->steer_qpn_base, get_count_order(count));
+			      qpn - dev->steer_qpn_base,
+			      get_count_order(count));
 }
 
 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
@@ -2660,8 +2983,8 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 		ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
 		ib_spec->type = IB_FLOW_SPEC_IB;
 		ib_spec->size = sizeof(struct ib_flow_spec_ib);
-		ib_spec->val.l3_type_qpn = mqp->ibqp.qp_num;
-		ib_spec->mask.l3_type_qpn = MLX4_IB_FLOW_QPN_MASK;
+		/* Add an empty rule for IB L2 */
+		memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
 
 		err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
 					    IB_FLOW_DOMAIN_NIC,
@@ -2677,60 +3000,31 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
 {
 	struct mlx4_ib_dev *ibdev = ibdev_ptr;
-	int p, j;
-	int dev_idx, ret;
+	int p;
 
-	if (ibdev->iboe.nb_inet.notifier_call) {
-		if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
-			pr_warn("failure unregistering notifier\n");
-		ibdev->iboe.nb_inet.notifier_call = NULL;
-	}
+	ibdev->ib_active = false;
+	flush_workqueue(wq);
 
 	mlx4_ib_close_sriov(ibdev);
-	sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
 	mlx4_ib_mad_cleanup(ibdev);
-
-	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
-		device_remove_file(&ibdev->ib_dev.dev,
-			mlx4_class_attributes[j]);
-	}
-
-
-	dev_idx = -1;
-	if (dr_active && !(ibdev->dev->flags & MLX4_FLAG_DEV_NUM_STR)) {
-		ret = sscanf(ibdev->ib_dev.name, "mlx4_%d", &dev_idx);
-		if (ret != 1)
-			dev_idx = -1;
-	}
 	ib_unregister_device(&ibdev->ib_dev);
-	if (dev_idx >= 0) {
-		spin_lock(&dev_num_str_lock);
-		bitmap_release_region(dev_num_str_bitmap, dev_idx, 0);
-		spin_unlock(&dev_num_str_lock);
-	}
-
-	if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
-		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
-				ibdev->steer_qpn_count);
-		kfree(ibdev->ib_uc_qpns_bitmap);
-	}
-
+	mlx4_ib_diag_cleanup(ibdev);
 	if (ibdev->iboe.nb.notifier_call) {
 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
 			pr_warn("failure unregistering notifier\n");
 		ibdev->iboe.nb.notifier_call = NULL;
 	}
-	iounmap(ibdev->priv_uar.map);
 
-	for (p = 0; p < ibdev->num_ports; ++p) {
-		if (mlx4_ib_port_link_layer(&ibdev->ib_dev, p + 1) ==
-						IB_LINK_LAYER_ETHERNET) {
-			mlx4_counter_free(ibdev->dev,
-					  p + 1,
-					  ibdev->counters[p].counter_index);
-		}
+	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+				      ibdev->steer_qpn_count);
+		kfree(ibdev->ib_uc_qpns_bitmap);
 	}
 
+	iounmap(ibdev->uar_map);
+	for (p = 0; p < ibdev->num_ports; ++p)
+		mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
+
 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
 		mlx4_CLOSE_PORT(dev, p);
 
@@ -2747,45 +3041,191 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
 	struct mlx4_dev *dev = ibdev->dev;
 	int i;
 	unsigned long flags;
+	struct mlx4_active_ports actv_ports;
+	unsigned int ports;
+	unsigned int first_port;
 
 	if (!mlx4_is_master(dev))
 		return;
 
-	dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
+	actv_ports = mlx4_get_active_ports(dev, slave);
+	ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
 	if (!dm) {
 		pr_err("failed to allocate memory for tunneling qp update\n");
-		goto out;
+		return;
 	}
 
-	for (i = 0; i < dev->caps.num_ports; i++) {
+	for (i = 0; i < ports; i++) {
 		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
 		if (!dm[i]) {
 			pr_err("failed to allocate memory for tunneling qp update work struct\n");
-			for (i = 0; i < dev->caps.num_ports; i++) {
-				if (dm[i])
-					kfree(dm[i]);
-			}
+			while (--i >= 0)
+				kfree(dm[i]);
 			goto out;
 		}
-	}
-	/* initialize or tear down tunnel QPs for the slave */
-	for (i = 0; i < dev->caps.num_ports; i++) {
 		INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
-		dm[i]->port = i + 1;
+		dm[i]->port = first_port + i + 1;
 		dm[i]->slave = slave;
 		dm[i]->do_init = do_init;
 		dm[i]->dev = ibdev;
-		spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
-		if (!ibdev->sriov.is_going_down)
+	}
+	/* initialize or tear down tunnel QPs for the slave */
+	spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+	if (!ibdev->sriov.is_going_down) {
+		for (i = 0; i < ports; i++)
 			queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
 		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+	} else {
+		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+		for (i = 0; i < ports; i++)
+			kfree(dm[i]);
 	}
 out:
-	if (dm)
-		kfree(dm);
+	kfree(dm);
 	return;
 }
 
+static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
+{
+	struct mlx4_ib_qp *mqp;
+	unsigned long flags_qp;
+	unsigned long flags_cq;
+	struct mlx4_ib_cq *send_mcq, *recv_mcq;
+	struct list_head    cq_notify_list;
+	struct mlx4_cq *mcq;
+	unsigned long flags;
+
+	pr_warn("mlx4_ib_handle_catas_error was started\n");
+	INIT_LIST_HEAD(&cq_notify_list);
+
+	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+
+	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+		if (mqp->sq.tail != mqp->sq.head) {
+			send_mcq = to_mcq(mqp->ibqp.send_cq);
+			spin_lock_irqsave(&send_mcq->lock, flags_cq);
+			if (send_mcq->mcq.comp &&
+			    mqp->ibqp.send_cq->comp_handler) {
+				if (!send_mcq->mcq.reset_notify_added) {
+					send_mcq->mcq.reset_notify_added = 1;
+					list_add_tail(&send_mcq->mcq.reset_notify,
+						      &cq_notify_list);
+				}
+			}
+			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+		}
+		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+		/* Now, handle the QP's receive queue */
+		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+		/* no handling is needed for SRQ */
+		if (!mqp->ibqp.srq) {
+			if (mqp->rq.tail != mqp->rq.head) {
+				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+				if (recv_mcq->mcq.comp &&
+				    mqp->ibqp.recv_cq->comp_handler) {
+					if (!recv_mcq->mcq.reset_notify_added) {
+						recv_mcq->mcq.reset_notify_added = 1;
+						list_add_tail(&recv_mcq->mcq.reset_notify,
+							      &cq_notify_list);
+					}
+				}
+				spin_unlock_irqrestore(&recv_mcq->lock,
+						       flags_cq);
+			}
+		}
+		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+	}
+
+	list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
+		mcq->comp(mcq);
+	}
+	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+	pr_warn("mlx4_ib_handle_catas_error ended\n");
+}
+
+static void handle_bonded_port_state_event(struct work_struct *work)
+{
+	struct ib_event_work *ew =
+		container_of(work, struct ib_event_work, work);
+	struct mlx4_ib_dev *ibdev = ew->ib_dev;
+	enum ib_port_state bonded_port_state = IB_PORT_NOP;
+	int i;
+	struct ib_event ibev;
+
+	kfree(ew);
+	spin_lock_bh(&ibdev->iboe.lock);
+	for (i = 0; i < MLX4_MAX_PORTS; ++i) {
+		struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
+		enum ib_port_state curr_port_state;
+
+		if (!curr_netdev)
+			continue;
+
+		curr_port_state =
+			(netif_running(curr_netdev) &&
+			 netif_carrier_ok(curr_netdev)) ?
+			IB_PORT_ACTIVE : IB_PORT_DOWN;
+
+		bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
+			curr_port_state : IB_PORT_ACTIVE;
+	}
+	spin_unlock_bh(&ibdev->iboe.lock);
+
+	ibev.device = &ibdev->ib_dev;
+	ibev.element.port_num = 1;
+	ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
+		IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+
+	ib_dispatch_event(&ibev);
+}
+
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
+{
+	u64 sl2vl;
+	int err;
+
+	err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
+	if (err) {
+		pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
+		       port, err);
+		sl2vl = 0;
+	}
+	atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
+}
+
+static void ib_sl2vl_update_work(struct work_struct *work)
+{
+	struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
+	struct mlx4_ib_dev *mdev = ew->ib_dev;
+	int port = ew->port;
+
+	mlx4_ib_sl2vl_update(mdev, port);
+
+	kfree(ew);
+}
+
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+				     int port)
+{
+	struct ib_event_work *ew;
+
+	ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+	if (ew) {
+		INIT_WORK(&ew->work, ib_sl2vl_update_work);
+		ew->port = port;
+		ew->ib_dev = ibdev;
+		queue_work(wq, &ew->work);
+	} else {
+		pr_err("failed to allocate memory for sl2vl update work\n");
+	}
+}
+
 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 			  enum mlx4_dev_event event, unsigned long param)
 {
@@ -2795,6 +3235,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 	struct ib_event_work *ew;
 	int p = 0;
 
+	if (mlx4_is_bonded(dev) &&
+	    ((event == MLX4_DEV_EVENT_PORT_UP) ||
+	    (event == MLX4_DEV_EVENT_PORT_DOWN))) {
+		ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+		if (!ew)
+			return;
+		INIT_WORK(&ew->work, handle_bonded_port_state_event);
+		ew->ib_dev = ibdev;
+		queue_work(wq, &ew->work);
+		return;
+	}
+
 	if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
 		eqe = (struct mlx4_eqe *)param;
 	else
@@ -2804,27 +3256,28 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 	case MLX4_DEV_EVENT_PORT_UP:
 		if (p > ibdev->num_ports)
 			return;
-		if (mlx4_is_master(dev) &&
+		if (!mlx4_is_slave(dev) &&
 		    rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
 			IB_LINK_LAYER_INFINIBAND) {
-			mlx4_ib_invalidate_all_guid_record(ibdev, p);
+			if (mlx4_is_master(dev))
+				mlx4_ib_invalidate_all_guid_record(ibdev, p);
+			if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+			    !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
+				mlx4_sched_ib_sl2vl_update_work(ibdev, p);
 		}
-		mlx4_ib_info((struct ib_device *) ibdev_ptr,
-			     "Port %d logical link is up\n", p);
 		ibev.event = IB_EVENT_PORT_ACTIVE;
 		break;
 
 	case MLX4_DEV_EVENT_PORT_DOWN:
 		if (p > ibdev->num_ports)
 			return;
-		mlx4_ib_info((struct ib_device *) ibdev_ptr,
-			     "Port %d logical link is down\n", p);
 		ibev.event = IB_EVENT_PORT_ERR;
 		break;
 
 	case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
 		ibdev->ib_active = false;
 		ibev.event = IB_EVENT_DEVICE_FATAL;
+		mlx4_ib_handle_catas_error(ibdev);
 		break;
 
 	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
@@ -2847,9 +3300,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 	case MLX4_DEV_EVENT_SLAVE_INIT:
 		/* here, p is the slave id */
 		do_slave_init(ibdev, p, 1);
+		if (mlx4_is_master(dev)) {
+			int i;
+
+			for (i = 1; i <= ibdev->num_ports; i++) {
+				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
+					== IB_LINK_LAYER_INFINIBAND)
+					mlx4_ib_slave_alias_guid_event(ibdev,
+								       p, i,
+								       1);
+			}
+		}
 		return;
 
 	case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+		if (mlx4_is_master(dev)) {
+			int i;
+
+			for (i = 1; i <= ibdev->num_ports; i++) {
+				if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
+					== IB_LINK_LAYER_INFINIBAND)
+					mlx4_ib_slave_alias_guid_event(ibdev,
+								       p, i,
+								       0);
+			}
+		}
 		/* here, p is the slave id */
 		do_slave_init(ibdev, p, 0);
 		return;
@@ -2859,7 +3334,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 	}
 
 	ibev.device	      = ibdev_ptr;
-	ibev.element.port_num = (u8) p;
+	ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
 
 	ib_dispatch_event(&ibev);
 }
@@ -2868,22 +3343,21 @@ static struct mlx4_interface mlx4_ib_interface = {
 	.add		= mlx4_ib_add,
 	.remove		= mlx4_ib_remove,
 	.event		= mlx4_ib_event,
-	.protocol	= MLX4_PROT_IB_IPV6
+	.protocol	= MLX4_PROT_IB_IPV6,
+	.flags		= MLX4_INTFF_BONDING
 };
 
 static int __init mlx4_ib_init(void)
 {
 	int err;
 
-	wq = create_singlethread_workqueue("mlx4_ib");
+	wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
 	if (!wq)
 		return -ENOMEM;
 
 	err = mlx4_ib_mcg_init();
 	if (err)
-		goto clean_proc;
-
-	init_dev_assign();
+		goto clean_wq;
 
 	err = mlx4_register_interface(&mlx4_ib_interface);
 	if (err)
@@ -2894,7 +3368,7 @@ static int __init mlx4_ib_init(void)
 clean_mcg:
 	mlx4_ib_mcg_destroy();
 
-clean_proc:
+clean_wq:
 	destroy_workqueue(wq);
 	return err;
 }
@@ -2904,8 +3378,6 @@ static void __exit mlx4_ib_cleanup(void)
 	mlx4_unregister_interface(&mlx4_ib_interface);
 	mlx4_ib_mcg_destroy();
 	destroy_workqueue(wq);
-
-	kfree(dev_num_str_bitmap);
 }
 
 module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
index 391c3eeeef4b..45ead56cd410 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_mcg.c
@@ -51,6 +51,10 @@
 	pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
 	(group)->name, group->demux->port, ## arg)
 
+#define mcg_debug_group(group, format, arg...) \
+	pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
+		 (group)->name, (group)->demux->port, ## arg)
+
 #define mcg_error_group(group, format, arg...) \
 	pr_err("  %16s: " format, (group)->name, ## arg)
 
@@ -92,7 +96,7 @@ struct ib_sa_mcmember_data {
 	u8		scope_join_state;
 	u8		proxy_join;
 	u8		reserved[2];
-};
+} __packed __aligned(4);
 
 struct mcast_group {
 	struct ib_sa_mcmember_data rec;
@@ -206,17 +210,19 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
 {
 	struct mlx4_ib_dev *dev = ctx->dev;
 	struct ib_ah_attr	ah_attr;
+	unsigned long flags;
 
-	spin_lock(&dev->sm_lock);
+	spin_lock_irqsave(&dev->sm_lock, flags);
 	if (!dev->sm_ah[ctx->port - 1]) {
 		/* port is not yet Active, sm_ah not ready */
-		spin_unlock(&dev->sm_lock);
+		spin_unlock_irqrestore(&dev->sm_lock, flags);
 		return -EAGAIN;
 	}
 	mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
-	spin_unlock(&dev->sm_lock);
-	return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
-				    IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, 0, mad);
+	spin_unlock_irqrestore(&dev->sm_lock, flags);
+	return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
+				    ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
+				    &ah_attr, NULL, 0xffff, mad);
 }
 
 static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
@@ -483,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
 		if (!group->members[i])
 			leave_state |= (1 << i);
 
-	return leave_state & (group->rec.scope_join_state & 7);
+	return leave_state & (group->rec.scope_join_state & 0xf);
 }
 
 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -558,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
 		} else
 			mcg_warn_group(group, "DRIVER BUG\n");
 	} else if (group->state == MCAST_LEAVE_SENT) {
-		if (group->rec.scope_join_state & 7)
-			group->rec.scope_join_state &= 0xf8;
+		if (group->rec.scope_join_state & 0xf)
+			group->rec.scope_join_state &= 0xf0;
 		group->state = MCAST_IDLE;
 		mutex_unlock(&group->lock);
 		if (release_group(group, 1))
@@ -599,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
 static int handle_join_req(struct mcast_group *group, u8 join_mask,
 			   struct mcast_req *req)
 {
-	u8 group_join_state = group->rec.scope_join_state & 7;
+	u8 group_join_state = group->rec.scope_join_state & 0xf;
 	int ref = 0;
 	u16 status;
 	struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -685,11 +691,11 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
 			u8 cur_join_state;
 
 			resp_join_state = ((struct ib_sa_mcmember_data *)
-						group->response_sa_mad.data)->scope_join_state & 7;
-			cur_join_state = group->rec.scope_join_state & 7;
+						group->response_sa_mad.data)->scope_join_state & 0xf;
+			cur_join_state = group->rec.scope_join_state & 0xf;
 
 			if (method == IB_MGMT_METHOD_GET_RESP) {
-				/* successful join */
+				/* successfull join */
 				if (!cur_join_state && resp_join_state)
 					--rc;
 			} else if (!resp_join_state)
@@ -705,7 +711,7 @@ process_requests:
 		req = list_first_entry(&group->pending_list, struct mcast_req,
 				       group_list);
 		sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
-		req_join_state = sa_data->scope_join_state & 0x7;
+		req_join_state = sa_data->scope_join_state & 0xf;
 
 		/* For a leave request, we will immediately answer the VF, and
 		 * update our internal counters. The actual leave will be sent
@@ -742,14 +748,11 @@ static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
 						       __be64 tid,
 						       union ib_gid *new_mgid)
 {
-	struct mcast_group *group = NULL, *cur_group;
+	struct mcast_group *group = NULL, *cur_group, *n;
 	struct mcast_req *req;
-	struct list_head *pos;
-	struct list_head *n;
 
 	mutex_lock(&ctx->mcg_table_lock);
-	list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
-		group = list_entry(pos, struct mcast_group, mgid0_list);
+	list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
 		mutex_lock(&group->lock);
 		if (group->last_req_tid == tid) {
 			if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
@@ -963,8 +966,8 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
 		mutex_lock(&group->lock);
 		if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
 			mutex_unlock(&group->lock);
-			mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
-				       port, slave, MAX_PEND_REQS_PER_FUNC);
+			mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
+					port, slave, MAX_PEND_REQS_PER_FUNC);
 			release_group(group, 0);
 			kfree(req);
 			return -ENOMEM;
@@ -1046,7 +1049,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
 
 	atomic_set(&ctx->tid, 0);
 	sprintf(name, "mlx4_ib_mcg%d", ctx->port);
-	ctx->mcg_wq = create_singlethread_workqueue(name);
+	ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
 	if (!ctx->mcg_wq)
 		return -ENOMEM;
 
@@ -1247,7 +1250,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
 
 int mlx4_ib_mcg_init(void)
 {
-	clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
+	clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
 	if (!clean_wq)
 		return -ENOMEM;
 
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_mr.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_mr.c
index f531a03a3681..aa0723aa1dd2 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_mr.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_mr.c
@@ -35,6 +35,8 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 
+#include <asm/atomic64.h>
+
 #include "mlx4_ib.h"
 
 static u32 convert_access(int acc)
@@ -43,71 +45,25 @@ static u32 convert_access(int acc)
 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |
 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |
 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) |
-	       (acc & IB_ACCESS_MW_BIND       ? MLX4_PERM_BIND_MW      : 0) |
+	       (acc & IB_ACCESS_MW_BIND	      ? MLX4_PERM_BIND_MW      : 0) |
 	       MLX4_PERM_LOCAL_READ;
 }
-/* No suuport for Shared MR feature */
-#if 0
-static ssize_t shared_mr_proc_read(struct file *file,
-			  char __user *buffer,
-			  size_t len,
-			  loff_t *offset)
+
+static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
 {
-
-	return -ENOSYS;
-
+	switch (type) {
+	case IB_MW_TYPE_1:	return MLX4_MW_TYPE_1;
+	case IB_MW_TYPE_2:	return MLX4_MW_TYPE_2;
+	default:		return -1;
+	}
 }
 
-static ssize_t shared_mr_proc_write(struct file *file,
-			   const char __user *buffer,
-			   size_t len,
-			   loff_t *offset)
-{
-
-	return -ENOSYS;
-}
-
-static int shared_mr_mmap(struct file *filep, struct vm_area_struct *vma)
-{
-
-	struct proc_dir_entry *pde = PDE(filep->f_path.dentry->d_inode);
-	struct mlx4_shared_mr_info *smr_info =
-		(struct mlx4_shared_mr_info *)pde->data;
-
-	/* Prevent any mapping not on start of area */
-	if (vma->vm_pgoff != 0)
-		return -EINVAL;
-
-	return ib_umem_map_to_vma(smr_info->umem,
-					vma);
-
-}
-
-static const struct file_operations shared_mr_proc_ops = {
-	.owner	= THIS_MODULE,
-	.read	= shared_mr_proc_read,
-	.write	= shared_mr_proc_write,
-	.mmap	= shared_mr_mmap
-};
-
-static mode_t convert_shared_access(int acc)
-{
-
-	return (acc & IB_ACCESS_SHARED_MR_USER_READ ? S_IRUSR       : 0) |
-	       (acc & IB_ACCESS_SHARED_MR_USER_WRITE  ? S_IWUSR : 0) |
-	       (acc & IB_ACCESS_SHARED_MR_GROUP_READ   ? S_IRGRP  : 0) |
-	       (acc & IB_ACCESS_SHARED_MR_GROUP_WRITE   ? S_IWGRP  : 0) |
-	       (acc & IB_ACCESS_SHARED_MR_OTHER_READ   ? S_IROTH  : 0) |
-	       (acc & IB_ACCESS_SHARED_MR_OTHER_WRITE   ? S_IWOTH  : 0);
-
-}
-#endif
 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
 {
 	struct mlx4_ib_mr *mr;
 	int err;
 
-	mr = kzalloc(sizeof *mr, GFP_KERNEL);
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
@@ -134,420 +90,78 @@ err_free:
 	return ERR_PTR(err);
 }
 
-static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
-						struct mlx4_mtt *mtt,
-						u64 mtt_size,
-						u64 mtt_shift,
-						u64 len,
-						u64 cur_start_addr,
-						u64 *pages,
-						int *start_index,
-						int *npages)
-{
-	int k;
-	int err = 0;
-	u64 mtt_entries;
-	u64 cur_end_addr = cur_start_addr + len;
-	u64 cur_end_addr_aligned = 0;
-
-	len += (cur_start_addr & (mtt_size-1ULL));
-	cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
-	len += (cur_end_addr_aligned - cur_end_addr);
-	if (len & (mtt_size-1ULL)) {
-		WARN(1 ,
-		"write_block: len %llx is not aligned to mtt_size %llx\n",
-			(unsigned long long)len, (unsigned long long)mtt_size);
-		return -EINVAL;
-	}
-
-
-	mtt_entries = (len >> mtt_shift);
-
-	/* Align the MTT start address to
-		the mtt_size.
-		Required to handle cases when the MR
-		starts in the middle of an MTT record.
-		Was not required in old code since
-		the physical addresses provided by
-		the dma subsystem were page aligned,
-		which was also the MTT size.
-	*/
-	cur_start_addr = round_down(cur_start_addr, mtt_size);
-	/* A new block is started ...*/
-	for (k = 0; k < mtt_entries; ++k) {
-		pages[*npages] = cur_start_addr + (mtt_size * k);
-		(*npages)++;
-		/*
-		 * Be friendly to mlx4_write_mtt() and
-		 * pass it chunks of appropriate size.
-		 */
-		if (*npages == PAGE_SIZE / sizeof(u64)) {
-			err = mlx4_write_mtt(dev->dev,
-					mtt, *start_index,
-					*npages, pages);
-			if (err)
-				return err;
-
-			(*start_index) += *npages;
-			*npages = 0;
-		}
-	}
-
-	return 0;
-}
-
 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
 			   struct ib_umem *umem)
 {
 	u64 *pages;
-	u64 len = 0;
+	int i, k, entry;
+	int n;
+	int len;
 	int err = 0;
-	u64 mtt_size;
-	u64 cur_start_addr = 0;
-	u64 mtt_shift;
-	int start_index = 0;
-	int npages = 0;
 	struct scatterlist *sg;
-	int i;
 
 	pages = (u64 *) __get_free_page(GFP_KERNEL);
 	if (!pages)
 		return -ENOMEM;
 
-	mtt_shift = mtt->page_shift;
-	mtt_size = 1ULL << mtt_shift;
+	i = n = 0;
 
-	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
-			if (cur_start_addr + len ==
-			    sg_dma_address(sg)) {
-				/* still the same block */
-				len += sg_dma_len(sg);
-				continue;
+	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+		len = sg_dma_len(sg) >> mtt->page_shift;
+		for (k = 0; k < len; ++k) {
+			pages[i++] = sg_dma_address(sg) +
+				umem->page_size * k;
+			/*
+			 * Be friendly to mlx4_write_mtt() and
+			 * pass it chunks of appropriate size.
+			 */
+			if (i == PAGE_SIZE / sizeof (u64)) {
+				err = mlx4_write_mtt(dev->dev, mtt, n,
+						     i, pages);
+				if (err)
+					goto out;
+				n += i;
+				i = 0;
 			}
-			/* A new block is started ...*/
-			/* If len is malaligned, write an extra mtt entry to
-			    cover the misaligned area (round up the division)
-			*/
-			err = mlx4_ib_umem_write_mtt_block(dev,
-						mtt, mtt_size, mtt_shift,
-						len, cur_start_addr,
-						pages,
-						&start_index,
-						&npages);
-			if (err)
-				goto out;
-
-			cur_start_addr =
-				sg_dma_address(sg);
-			len = sg_dma_len(sg);
+		}
 	}
 
-	/* Handle the last block */
-	if (len > 0) {
-		/*  If len is malaligned, write an extra mtt entry to cover
-		     the misaligned area (round up the division)
-		*/
-		err = mlx4_ib_umem_write_mtt_block(dev,
-						mtt, mtt_size, mtt_shift,
-						len, cur_start_addr,
-						pages,
-						&start_index,
-						&npages);
-			if (err)
-				goto out;
-	}
-
-
-	if (npages)
-		err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
+	if (i)
+		err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
 
 out:
 	free_page((unsigned long) pages);
 	return err;
 }
 
-static inline u64 alignment_of(u64 ptr)
-{
-	return ilog2(ptr & (~(ptr-1)));
-}
-
-static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
-						u64 current_block_end,
-						u64 block_shift)
-{
-	/* Check whether the alignment of the new block
-	     is aligned as well as the previous block.
-	     Block address must start with zeros till size of entity_size.
-	*/
-	if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
-		/* It is not as well aligned as the
-		previous block-reduce the mtt size
-		accordingly.
-		Here we take the last right bit
-		which is 1.
-		*/
-		block_shift = alignment_of(next_block_start);
-
-	/*  Check whether the alignment of the
-	     end of previous block - is it aligned
-	     as well as the start of the block
-	*/
-	if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
-		/* It is not as well aligned as
-		the start of the block - reduce the
-		mtt size accordingly.
-		*/
-		block_shift = alignment_of(current_block_end);
-
-	return block_shift;
-}
-
-/* Calculate optimal mtt size based on contiguous pages.
-* Function will return also the number of pages that are not aligned to the
-   calculated mtt_size to be added to total number
-    of pages. For that we should check the first chunk length & last chunk
-    length and if not aligned to mtt_size we should increment
-    the non_aligned_pages number.
-    All chunks in the middle already handled as part of mtt shift calculation
-    for both their start & end addresses.
-*/
-int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
-						u64 start_va,
-						int *num_of_mtts)
-{
-	u64 block_shift = MLX4_MAX_MTT_SHIFT;
-	u64 current_block_len = 0;
-	u64 current_block_start = 0;
-	u64 misalignment_bits;
-	u64 first_block_start = 0;
-	u64 last_block_end = 0;
-	u64 total_len = 0;
-	u64 last_block_aligned_end = 0;
-	u64 min_shift = ilog2(umem->page_size);
-	struct scatterlist *sg;
-	int i;
-	u64 next_block_start;
-	u64 current_block_end;
-
-	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
-		/* Initialization - save the first chunk start as
-		    the current_block_start - block means contiguous pages.
-		*/
-		if (current_block_len == 0 && current_block_start == 0) {
-			first_block_start = current_block_start =
-				sg_dma_address(sg);
-			/* Find the bits that are different between
-			    the physical address and the virtual
-			    address for the start of the MR.
-			*/
-			/* umem_get aligned the start_va to a page
-			   boundary. Therefore, we need to align the
-			   start va to the same boundary */
-			/* misalignment_bits is needed to handle the
-			   case of a single memory region. In this
-			   case, the rest of the logic will not reduce
-			   the block size.  If we use a block size
-			   which is bigger than the alignment of the
-			   misalignment bits, we might use the virtual
-			   page number instead of the physical page
-			   number, resulting in access to the wrong
-			   data. */
-			misalignment_bits =
-			(start_va & (~(((u64)(umem->page_size))-1ULL)))
-						^ current_block_start;
-			block_shift = min(alignment_of(misalignment_bits)
-				, block_shift);
-		}
-
-		/* Go over the scatter entries and check
-		     if they continue the previous scatter entry.
-		*/
-		next_block_start =
-			sg_dma_address(sg);
-		current_block_end = current_block_start
-			+ current_block_len;
-		/* If we have a split (non-contig.) between two block*/
-		if (current_block_end != next_block_start) {
-			block_shift = mlx4_ib_umem_calc_block_mtt(
-					next_block_start,
-					current_block_end,
-					block_shift);
-
-			/* If we reached the minimum shift for 4k
-			     page we stop the loop.
-			*/
-			if (block_shift <= min_shift)
-				goto end;
-
-			/* If not saved yet we are in first block -
-			     we save the length of first block to
-			     calculate the non_aligned_pages number at
-			*    the end.
-			*/
-			total_len += current_block_len;
-
-			/* Start a new block */
-			current_block_start = next_block_start;
-			current_block_len =
-				sg_dma_len(sg);
-			continue;
-		}
-		/* The scatter entry is another part of
-		     the current block, increase the block size
-		* An entry in the scatter can be larger than
-		4k (page) as of dma mapping
-		which merge some blocks together.
-		*/
-		current_block_len +=
-			sg_dma_len(sg);
-	}
-
-	/* Account for the last block in the total len */
-	total_len += current_block_len;
-	/* Add to the first block the misalignment that it suffers from.*/
-	total_len += (first_block_start & ((1ULL<<block_shift)-1ULL));
-	last_block_end = current_block_start+current_block_len;
-	last_block_aligned_end = round_up(last_block_end, 1<<block_shift);
-	total_len += (last_block_aligned_end - last_block_end);
-
-	WARN((total_len & ((1ULL<<block_shift)-1ULL)),
-		" misaligned total length detected (%llu, %llu)!",
-		(unsigned long long)total_len, (unsigned long long)block_shift);
-
-	*num_of_mtts = total_len >> block_shift;
-end:
-	if (block_shift < min_shift) {
-		/* If shift is less than the min we set a WARN and
-		     return the min shift.
-		*/
-		WARN(1,
-		"mlx4_ib_umem_calc_optimal_mtt_size - unexpected shift %lld\n",
-		(unsigned long long)block_shift);
-
-		block_shift = min_shift;
-	}
-	return block_shift;
-
-}
-
-/* No suuport for Shared MR */
-#if 0
-static int prepare_shared_mr(struct mlx4_ib_mr *mr, int access_flags, int mr_id)
-{
-
-	struct proc_dir_entry *mr_proc_entry;
-	mode_t mode = S_IFREG;
-	char name_buff[16];
-
-	mode |= convert_shared_access(access_flags);
-	sprintf(name_buff, "%X", mr_id);
-	mr->smr_info = kmalloc(sizeof(struct mlx4_shared_mr_info), GFP_KERNEL);
-	mr->smr_info->mr_id = mr_id;
-	mr->smr_info->umem = mr->umem;
-
-	mr_proc_entry = proc_create_data(name_buff, mode,
-				mlx4_mrs_dir_entry,
-				&shared_mr_proc_ops,
-				mr->smr_info);
-
-	if (!mr_proc_entry) {
-		pr_err("prepare_shared_mr failed via proc\n");
-		kfree(mr->smr_info);
-		return -ENODEV;
-	}
-
-	current_uid_gid(&(mr_proc_entry->uid), &(mr_proc_entry->gid));
-	mr_proc_entry->size = mr->umem->length;
-	return 0;
-
-}
-static int is_shared_mr(int access_flags)
-{
-	/* We should check whether IB_ACCESS_SHARED_MR_USER_READ or
-	other shared bits were turned on.
-	*/
-	return !!(access_flags & (IB_ACCESS_SHARED_MR_USER_READ |
-				IB_ACCESS_SHARED_MR_USER_WRITE |
-				IB_ACCESS_SHARED_MR_GROUP_READ |
-				IB_ACCESS_SHARED_MR_GROUP_WRITE |
-				IB_ACCESS_SHARED_MR_OTHER_READ |
-				IB_ACCESS_SHARED_MR_OTHER_WRITE));
-
-}
-
-static void free_smr_info(struct mlx4_ib_mr *mr)
-{
-	/* When master/parent shared mr is dereged there is
-	no ability to share this mr any more - its mr_id will be
-	returned to the kernel as part of ib_uverbs_dereg_mr
-	and may be allocated again as part of other reg_mr.
-	*/
-	char name_buff[16];
-
-	sprintf(name_buff, "%X", mr->smr_info->mr_id);
-	/* Remove proc entry is checking internally that no operation
-	was strated on that proc fs file and if in the middle
-	current process will wait till end of operation.
-	That's why no sync mechanism is needed when we release
-	below the shared umem.
-	*/
-	remove_proc_entry(name_buff, mlx4_mrs_dir_entry);
-	kfree(mr->smr_info);
-	mr->smr_info = NULL;
-}
-#endif
-
-static void mlx4_invalidate_umem(void *invalidation_cookie,
-				struct ib_umem *umem,
-				unsigned long addr, size_t size)
-{
-	struct mlx4_ib_mr *mr = (struct mlx4_ib_mr *)invalidation_cookie;
-
-	/* This function is called under client peer lock so its resources are race protected */
-	if (atomic_inc_return(&mr->invalidated) > 1) {
-		umem->invalidation_ctx->inflight_invalidation = 1;
-		goto end;
-	}
-
-	umem->invalidation_ctx->peer_callback = 1;
-	mlx4_mr_free(to_mdev(mr->ibmr.device)->dev, &mr->mmr);
-	ib_umem_release(umem);
-	complete(&mr->invalidation_comp);
-
-end:
-	return;
-
-}
-
 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				  u64 virt_addr, int access_flags,
-				  struct ib_udata *udata,
-				  int mr_id)
+				  struct ib_udata *udata)
 {
 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
 	struct mlx4_ib_mr *mr;
 	int shift;
 	int err;
 	int n;
-	struct ib_peer_memory_client *ib_peer_mem;
 
-	mr = kzalloc(sizeof *mr, GFP_KERNEL);
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
-	mr->umem = ib_umem_get_ex(pd->uobject->context, start, length,
-			access_flags, 0, 1);
+	/* Force registering the memory as writable. */
+	/* Used for memory re-registeration. HCA protects the access */
+	mr->umem = ib_umem_get(pd->uobject->context, start, length,
+			       access_flags | IB_ACCESS_LOCAL_WRITE, 0);
 	if (IS_ERR(mr->umem)) {
 		err = PTR_ERR(mr->umem);
 		goto err_free;
 	}
 
-	ib_peer_mem = mr->umem->ib_peer_mem;
 	n = ib_umem_page_count(mr->umem);
-	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start,
-		&n);
+	shift = ilog2(mr->umem->page_size);
+
 	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
-			 convert_access(access_flags), n, shift, &mr->mmr);
+			    convert_access(access_flags), n, shift, &mr->mmr);
 	if (err)
 		goto err_umem;
 
@@ -560,46 +174,9 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		goto err_mr;
 
 	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
-/* No suuport for Shared MR */
-#if 0
-	/* Check whether MR should be shared */
-	if (is_shared_mr(access_flags)) {
-	/* start address and length must be aligned to page size in order
-	    to map a full page and preventing leakage of data */
-		if (mr->umem->offset || (length & ~PAGE_MASK)) {
-		        err = -EINVAL;
-		        goto err_mr;
-		}
 
-		err = prepare_shared_mr(mr, access_flags, mr_id);
-		if (err)
-			goto err_mr;
-	}
-#endif
-	if (ib_peer_mem) {
-		if (access_flags & IB_ACCESS_MW_BIND) {
-			/* Prevent binding MW on peer clients.
-			* mlx4_invalidate_umem must be void,
-			* therefore, mlx4_mr_free should not fail
-			* when using peer clients. */
-			err = -ENOSYS;
-			pr_err("MW is not supported with peer memory client");
-			goto err_smr;
-		}
-		init_completion(&mr->invalidation_comp);
-		ib_umem_activate_invalidation_notifier(mr->umem,
-					mlx4_invalidate_umem, mr);
-	}
-
-	atomic_set(&mr->invalidated, 0);
 	return &mr->ibmr;
 
-err_smr:
-/* No suuport for Shared MR */
-#if 0
-	if (mr->smr_info)
-		free_smr_info(mr);
-#endif
 err_mr:
 	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
 
@@ -612,44 +189,159 @@ err_free:
 	return ERR_PTR(err);
 }
 
+int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+			  u64 start, u64 length, u64 virt_addr,
+			  int mr_access_flags, struct ib_pd *pd,
+			  struct ib_udata *udata)
+{
+	struct mlx4_ib_dev *dev = to_mdev(mr->device);
+	struct mlx4_ib_mr *mmr = to_mmr(mr);
+	struct mlx4_mpt_entry *mpt_entry;
+	struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
+	int err;
+
+	/* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
+	 * we assume that the calls can't run concurrently. Otherwise, a
+	 * race exists.
+	 */
+	err =  mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
+
+	if (err)
+		return err;
+
+	if (flags & IB_MR_REREG_PD) {
+		err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
+					   to_mpd(pd)->pdn);
+
+		if (err)
+			goto release_mpt_entry;
+	}
+
+	if (flags & IB_MR_REREG_ACCESS) {
+		err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
+					       convert_access(mr_access_flags));
+
+		if (err)
+			goto release_mpt_entry;
+	}
+
+	if (flags & IB_MR_REREG_TRANS) {
+		int shift;
+		int n;
+
+		mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+		ib_umem_release(mmr->umem);
+		mmr->umem = ib_umem_get(mr->uobject->context, start, length,
+					mr_access_flags |
+					IB_ACCESS_LOCAL_WRITE,
+					0);
+		if (IS_ERR(mmr->umem)) {
+			err = PTR_ERR(mmr->umem);
+			/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
+			mmr->umem = NULL;
+			goto release_mpt_entry;
+		}
+		n = ib_umem_page_count(mmr->umem);
+		shift = ilog2(mmr->umem->page_size);
+
+		err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
+					      virt_addr, length, n, shift,
+					      *pmpt_entry);
+		if (err) {
+			ib_umem_release(mmr->umem);
+			goto release_mpt_entry;
+		}
+		mmr->mmr.iova       = virt_addr;
+		mmr->mmr.size       = length;
+
+		err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
+		if (err) {
+			mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+			ib_umem_release(mmr->umem);
+			goto release_mpt_entry;
+		}
+	}
+
+	/* If we couldn't transfer the MR to the HCA, just remember to
+	 * return a failure. But dereg_mr will free the resources.
+	 */
+	err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
+	if (!err && flags & IB_MR_REREG_ACCESS)
+		mmr->mmr.access = mr_access_flags;
+
+release_mpt_entry:
+	mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
+
+	return err;
+}
+
+static int
+mlx4_alloc_priv_pages(struct ib_device *device,
+		      struct mlx4_ib_mr *mr,
+		      int max_pages)
+{
+	int ret;
+
+	/* Ensure that size is aligned to DMA cacheline
+	 * requirements.
+	 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+	 * so page_map_size will never cross PAGE_SIZE.
+	 */
+	mr->page_map_size = roundup(max_pages * sizeof(u64),
+				    MLX4_MR_PAGES_ALIGN);
+
+	/* Prevent cross page boundary allocation. */
+	mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+	if (!mr->pages)
+		return -ENOMEM;
+
+	mr->page_map = dma_map_single(device->dma_device, mr->pages,
+				      mr->page_map_size, DMA_TO_DEVICE);
+
+	if (dma_mapping_error(device->dma_device, mr->page_map)) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	free_page((unsigned long)mr->pages);
+	return ret;
+}
+
+static void
+mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
+{
+	if (mr->pages) {
+		struct ib_device *device = mr->ibmr.device;
+
+		dma_unmap_single(device->dma_device, mr->page_map,
+				 mr->page_map_size, DMA_TO_DEVICE);
+		free_page((unsigned long)mr->pages);
+		mr->pages = NULL;
+	}
+}
+
 int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
 {
 	struct mlx4_ib_mr *mr = to_mmr(ibmr);
-	struct ib_umem *umem = mr->umem;
 	int ret;
 
-/* No suuport for Shared MR */
-#if 0
-	if (mr->smr_info)
-		free_smr_info(mr);
-#endif
-
-	if (atomic_inc_return(&mr->invalidated) > 1) {
-		wait_for_completion(&mr->invalidation_comp);
-		goto end;
-	}
+	mlx4_free_priv_pages(mr);
 
 	ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
-	if (ret) {
-		/* Error is not expected here, except when memory windows
-		* are bound to MR which is not supported with
-		* peer memory clients */
-		atomic_set(&mr->invalidated, 0);
+	if (ret)
 		return ret;
-	}
-
-	if (!umem)
-		goto end;
-
-	ib_umem_release(mr->umem);
-end:
-
+	if (mr->umem)
+		ib_umem_release(mr->umem);
 	kfree(mr);
 
 	return 0;
 }
 
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+			       struct ib_udata *udata)
 {
 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
 	struct mlx4_ib_mw *mw;
@@ -659,7 +351,8 @@ struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
 	if (!mw)
 		return ERR_PTR(-ENOMEM);
 
-	err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, (enum mlx4_mw_type)type, &mw->mmw);
+	err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
+			    to_mlx4_type(type), &mw->mmw);
 	if (err)
 		goto err_free;
 
@@ -680,28 +373,6 @@ err_free:
 	return ERR_PTR(err);
 }
 
-int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
-		    struct ib_mw_bind *mw_bind)
-{
-	struct ib_send_wr  wr;
-	struct ib_send_wr *bad_wr;
-	int ret;
-
-	memset(&wr, 0, sizeof(wr));
-	wr.opcode               = IB_WR_BIND_MW;
-	wr.wr_id                = mw_bind->wr_id;
-	wr.send_flags           = mw_bind->send_flags;
-	wr.wr.bind_mw.mw        = mw;
-	wr.wr.bind_mw.bind_info = mw_bind->bind_info;
-	wr.wr.bind_mw.rkey      = ib_inc_rkey(mw->rkey);
-
-	ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
-	if (!ret)
-		mw->rkey = wr.wr.bind_mw.rkey;
-
-	return ret;
-}
-
 int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
 {
 	struct mlx4_ib_mw *mw = to_mmw(ibmw);
@@ -712,85 +383,51 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
 	return 0;
 }
 
-struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
-					int max_page_list_len)
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
+			       enum ib_mr_type mr_type,
+			       u32 max_num_sg)
 {
 	struct mlx4_ib_dev *dev = to_mdev(pd->device);
 	struct mlx4_ib_mr *mr;
 	int err;
 
-	mr = kzalloc(sizeof *mr, GFP_KERNEL);
+	if (mr_type != IB_MR_TYPE_MEM_REG ||
+	    max_num_sg > MLX4_MAX_FAST_REG_PAGES)
+		return ERR_PTR(-EINVAL);
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
 	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
-			    max_page_list_len, 0, &mr->mmr);
+			    max_num_sg, 0, &mr->mmr);
 	if (err)
 		goto err_free;
 
+	err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
+	if (err)
+		goto err_free_mr;
+
+	mr->max_pages = max_num_sg;
+
 	err = mlx4_mr_enable(dev->dev, &mr->mmr);
 	if (err)
-		goto err_mr;
+		goto err_free_pl;
 
 	mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
 	mr->umem = NULL;
 
 	return &mr->ibmr;
 
-err_mr:
+err_free_pl:
+	mlx4_free_priv_pages(mr);
+err_free_mr:
 	(void) mlx4_mr_free(dev->dev, &mr->mmr);
-
 err_free:
 	kfree(mr);
 	return ERR_PTR(err);
 }
 
-struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
-							       int page_list_len)
-{
-	struct mlx4_ib_dev *dev = to_mdev(ibdev);
-	struct mlx4_ib_fast_reg_page_list *mfrpl;
-	int size = page_list_len * sizeof (u64);
-
-	if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
-		return ERR_PTR(-EINVAL);
-
-	mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
-	if (!mfrpl)
-		return ERR_PTR(-ENOMEM);
-
-	mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
-	if (!mfrpl->ibfrpl.page_list)
-		goto err_free;
-
-	mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
-						     size, &mfrpl->map,
-						     GFP_KERNEL);
-	if (!mfrpl->mapped_page_list)
-		goto err_free;
-
-	WARN_ON(mfrpl->map & 0x3f);
-
-	return &mfrpl->ibfrpl;
-
-err_free:
-	kfree(mfrpl->ibfrpl.page_list);
-	kfree(mfrpl);
-	return ERR_PTR(-ENOMEM);
-}
-
-void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
-{
-	struct mlx4_ib_dev *dev = to_mdev(page_list->device);
-	struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
-	int size = page_list->max_page_list_len * sizeof (u64);
-
-	dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
-			  mfrpl->map);
-	kfree(mfrpl->ibfrpl.page_list);
-	kfree(mfrpl);
-}
-
 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
 				 struct ib_fmr_attr *fmr_attr)
 {
@@ -883,3 +520,34 @@ int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
 
 	return err;
 }
+
+static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
+{
+	struct mlx4_ib_mr *mr = to_mmr(ibmr);
+
+	if (unlikely(mr->npages == mr->max_pages))
+		return -ENOMEM;
+
+	mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
+
+	return 0;
+}
+
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+		      unsigned int *sg_offset)
+{
+	struct mlx4_ib_mr *mr = to_mmr(ibmr);
+	int rc;
+
+	mr->npages = 0;
+
+	ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
+				   mr->page_map_size, DMA_TO_DEVICE);
+
+	rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
+
+	ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
+				      mr->page_map_size, DMA_TO_DEVICE);
+
+	return rc;
+}
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c
index a1887a9c263e..98c1a43cea1d 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c
@@ -35,20 +35,26 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/bitops.h>
+#include <linux/rcupdate.h>
+#include <linux/etherdevice.h>
 
 #include <rdma/ib_cache.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_addr.h>
 #include <rdma/ib_mad.h>
 
+#include <dev/mlx4/cmd.h>
 #include <dev/mlx4/qp.h>
 #include <dev/mlx4/driver.h>
 #include <linux/io.h>
 
 #include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
 
-#define asm __asm
+static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
+			     struct mlx4_ib_cq *recv_cq);
+static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
+			       struct mlx4_ib_cq *recv_cq);
 
 enum {
 	MLX4_IB_ACK_REQ_FREQ	= 8,
@@ -83,6 +89,7 @@ struct mlx4_ib_sqp {
 	u32			send_psn;
 	struct ib_ud_header	ud_header;
 	u8			header_buf[MLX4_IB_UD_HEADER_SIZE];
+	struct ib_qp		*roce_v2_gsi;
 };
 
 enum {
@@ -95,6 +102,10 @@ enum {
 	MLX4_RAW_QP_MSGMAX	= 31,
 };
 
+#ifndef ETH_ALEN
+#define ETH_ALEN        6
+#endif
+
 static const __be32 mlx4_ib_opcode[] = {
 	[IB_WR_SEND]				= cpu_to_be32(MLX4_OPCODE_SEND),
 	[IB_WR_LSO]				= cpu_to_be32(MLX4_OPCODE_LSO),
@@ -106,25 +117,11 @@ static const __be32 mlx4_ib_opcode[] = {
 	[IB_WR_ATOMIC_FETCH_AND_ADD]		= cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
 	[IB_WR_SEND_WITH_INV]			= cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
 	[IB_WR_LOCAL_INV]			= cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
-	[IB_WR_FAST_REG_MR]			= cpu_to_be32(MLX4_OPCODE_FMR),
+	[IB_WR_REG_MR]				= cpu_to_be32(MLX4_OPCODE_FMR),
 	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),
 	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
-	[IB_WR_BIND_MW]				= cpu_to_be32(
-							MLX4_OPCODE_BIND_MW),
 };
 
-#ifndef wc_wmb
-	#if defined(__i386__)
-		#define wc_wmb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
-	#elif defined(__x86_64__)
-		#define wc_wmb() asm volatile("sfence" ::: "memory")
-	#elif defined(__ia64__)
-		#define wc_wmb() asm volatile("fwb" ::: "memory")
-	#else
-		#define wc_wmb() wmb()
-	#endif
-#endif
-
 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
 {
 	return container_of(mqp, struct mlx4_ib_sqp, qp);
@@ -161,7 +158,10 @@ static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 			}
 		}
 	}
-	return proxy_sqp;
+	if (proxy_sqp)
+		return 1;
+
+	return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
 }
 
 /* used for INIT/CLOSE port logic */
@@ -421,7 +421,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 }
 
 static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
+			      enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
+			      bool shrink_wqe)
 {
 	int s;
 
@@ -479,7 +480,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 	 * We set WQE size to at least 64 bytes, this way stamping
 	 * invalidates each WQE.
 	 */
-	if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
+	if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
 	    qp->sq_signal_bits && BITS_PER_LONG == 64 &&
 	    type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
 	    !(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
@@ -529,7 +530,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 	cap->max_send_sge = min(qp->sq.max_gs,
 				min(dev->dev->caps.max_sq_sg,
 				    dev->dev->caps.max_rq_sg));
-	qp->max_inline_data = cap->max_inline_data;
+	/* We don't support inline sends for kernel QPs (yet) */
+	cap->max_inline_data = 0;
 
 	return 0;
 }
@@ -573,9 +575,7 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
 			ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
 					  sizeof (struct mlx4_ib_proxy_sqp_hdr),
 					  DMA_FROM_DEVICE);
-		if (unlikely(ib_dma_mapping_error(dev,
-						  qp->sqp_proxy_rcv[i].map))) {
-			pr_warn("ib_dma_map_single failed\n");
+		if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
 			kfree(qp->sqp_proxy_rcv[i].addr);
 			goto err;
 		}
@@ -608,268 +608,49 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
 	kfree(qp->sqp_proxy_rcv);
 }
 
-static int init_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp,
-			   struct ib_qp_init_attr *attr, int *qpn)
+static int qp_has_rq(struct ib_qp_init_attr *attr)
 {
-	struct mlx4_ib_qpg_data *qpg_data;
-	int tss_num, rss_num;
-	int tss_align_num, rss_align_num;
-	int tss_base, rss_base = 0;
-	int err;
+	if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
+		return 0;
 
-	/* Parent is part of the TSS range (in SW TSS ARP is sent via parent) */
-	tss_num = 1 + attr->parent_attrib.tss_child_count;
-	tss_align_num = roundup_pow_of_two(tss_num);
-	rss_num = attr->parent_attrib.rss_child_count;
-	rss_align_num = roundup_pow_of_two(rss_num);
-
-	if (rss_num > 1) {
-		/* RSS is requested */
-		if (!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS))
-			return -ENOSYS;
-		if (rss_align_num > dev->dev->caps.max_rss_tbl_sz)
-			return -EINVAL;
-		/* We must work with power of two */
-		attr->parent_attrib.rss_child_count = rss_align_num;
-	}
-
-	qpg_data = kzalloc(sizeof *qpg_data, GFP_KERNEL);
-	if (!qpg_data)
-		return -ENOMEM;
-
-	if(pqp->flags & MLX4_IB_QP_NETIF)
-		err = mlx4_ib_steer_qp_alloc(dev, tss_align_num, &tss_base);
-	else
-		err = mlx4_qp_reserve_range(dev->dev, tss_align_num,
-				tss_align_num, &tss_base, MLX4_RESERVE_BF_QP);
-	if (err)
-		goto err1;
-
-	if (tss_num > 1) {
-		u32 alloc = BITS_TO_LONGS(tss_align_num)  * sizeof(long);
-		qpg_data->tss_bitmap = kzalloc(alloc, GFP_KERNEL);
-		if (qpg_data->tss_bitmap == NULL) {
-			err = -ENOMEM;
-			goto err2;
-		}
-		bitmap_fill(qpg_data->tss_bitmap, tss_num);
-		/* Note parent takes first index */
-		clear_bit(0, qpg_data->tss_bitmap);
-	}
-
-	if (rss_num > 1) {
-		u32 alloc = BITS_TO_LONGS(rss_align_num) * sizeof(long);
-		err = mlx4_qp_reserve_range(dev->dev, rss_align_num,
-					    1, &rss_base, 0);
-		if (err)
-			goto err3;
-		qpg_data->rss_bitmap = kzalloc(alloc, GFP_KERNEL);
-		if (qpg_data->rss_bitmap == NULL) {
-			err = -ENOMEM;
-			goto err4;
-		}
-		bitmap_fill(qpg_data->rss_bitmap, rss_align_num);
-	}
-
-	qpg_data->tss_child_count = attr->parent_attrib.tss_child_count;
-	qpg_data->rss_child_count = attr->parent_attrib.rss_child_count;
-	qpg_data->qpg_parent = pqp;
-	qpg_data->qpg_tss_mask_sz = ilog2(tss_align_num);
-	qpg_data->tss_qpn_base = tss_base;
-	qpg_data->rss_qpn_base = rss_base;
-
-	pqp->qpg_data = qpg_data;
-	*qpn = tss_base;
-
-	return 0;
-
-err4:
-	mlx4_qp_release_range(dev->dev, rss_base, rss_align_num);
-
-err3:
-	if (tss_num > 1)
-		kfree(qpg_data->tss_bitmap);
-
-err2:
-	if(pqp->flags & MLX4_IB_QP_NETIF)
-		mlx4_ib_steer_qp_free(dev, tss_base, tss_align_num);
-	else
-		mlx4_qp_release_range(dev->dev, tss_base, tss_align_num);
-
-err1:
-	kfree(qpg_data);
-	return err;
+	return !attr->srq;
 }
 
-static void free_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp)
+static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
 {
-	struct mlx4_ib_qpg_data *qpg_data = pqp->qpg_data;
-	int align_num;
-
-	if (qpg_data->tss_child_count > 1)
-		kfree(qpg_data->tss_bitmap);
-
-	align_num = roundup_pow_of_two(1 + qpg_data->tss_child_count);
-	if(pqp->flags & MLX4_IB_QP_NETIF)
-		mlx4_ib_steer_qp_free(dev, qpg_data->tss_qpn_base, align_num);
-	else
-		mlx4_qp_release_range(dev->dev, qpg_data->tss_qpn_base, align_num);
-
-	if (qpg_data->rss_child_count > 1) {
-		kfree(qpg_data->rss_bitmap);
-		align_num = roundup_pow_of_two(qpg_data->rss_child_count);
-		mlx4_qp_release_range(dev->dev, qpg_data->rss_qpn_base,
-					align_num);
+	int i;
+	for (i = 0; i < dev->caps.num_ports; i++) {
+		if (qpn == dev->caps.qp0_proxy[i])
+			return !!dev->caps.qp0_qkey[i];
 	}
-
-	kfree(qpg_data);
-}
-
-static int alloc_qpg_qpn(struct ib_qp_init_attr *init_attr,
-			 struct mlx4_ib_qp *pqp, int *qpn)
-{
-	struct mlx4_ib_qp *mqp = to_mqp(init_attr->qpg_parent);
-	struct mlx4_ib_qpg_data *qpg_data = mqp->qpg_data;
-	u32 idx, old;
-
-	switch (init_attr->qpg_type) {
-	case IB_QPG_CHILD_TX:
-		if (qpg_data->tss_child_count == 0)
-			return -EINVAL;
-		do {
-			/* Parent took index 0 */
-			idx = find_first_bit(qpg_data->tss_bitmap,
-					     qpg_data->tss_child_count + 1);
-			if (idx >= qpg_data->tss_child_count + 1)
-				return -ENOMEM;
-			old = test_and_clear_bit(idx, qpg_data->tss_bitmap);
-		} while (old == 0);
-		idx += qpg_data->tss_qpn_base;
-		break;
-	case IB_QPG_CHILD_RX:
-		if (qpg_data->rss_child_count == 0)
-			return -EINVAL;
-		do {
-			idx = find_first_bit(qpg_data->rss_bitmap,
-					     qpg_data->rss_child_count);
-			if (idx >= qpg_data->rss_child_count)
-				return -ENOMEM;
-			old = test_and_clear_bit(idx, qpg_data->rss_bitmap);
-		} while (old == 0);
-		idx += qpg_data->rss_qpn_base;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	pqp->qpg_data = qpg_data;
-	*qpn = idx;
-
 	return 0;
 }
 
-static void free_qpg_qpn(struct mlx4_ib_qp *mqp, int qpn)
+static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
+				    struct mlx4_ib_qp *qp)
 {
-	struct mlx4_ib_qpg_data *qpg_data = mqp->qpg_data;
+	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
+	mlx4_counter_free(dev->dev, qp->counter_index->index);
+	list_del(&qp->counter_index->list);
+	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
 
-	switch (mqp->qpg_type) {
-	case IB_QPG_CHILD_TX:
-		/* Do range check */
-		qpn -= qpg_data->tss_qpn_base;
-		set_bit(qpn, qpg_data->tss_bitmap);
-		break;
-	case IB_QPG_CHILD_RX:
-		qpn -= qpg_data->rss_qpn_base;
-		set_bit(qpn, qpg_data->rss_bitmap);
-		break;
-	default:
-		/* error */
-		pr_warn("wrong qpg type (%d)\n", mqp->qpg_type);
-		break;
-	}
-}
-
-static int alloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
-			    struct ib_qp_init_attr *attr, int *qpn)
-{
-	int err = 0;
-
-	switch (attr->qpg_type) {
-	case IB_QPG_NONE:
-		/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
-		 * otherwise, the WQE BlueFlame setup flow wrongly causes
-		 * VLAN insertion. */
-		if (attr->qp_type == IB_QPT_RAW_PACKET) {
-			err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn,
-						    MLX4_RESERVE_BF_QP);
-		} else {
-			if(qp->flags & MLX4_IB_QP_NETIF)
-				err = mlx4_ib_steer_qp_alloc(dev, 1, qpn);
-			else
-				err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn, 0);
-		}
-		break;
-	case IB_QPG_PARENT:
-		err = init_qpg_parent(dev, qp, attr, qpn);
-		break;
-	case IB_QPG_CHILD_TX:
-	case IB_QPG_CHILD_RX:
-		err = alloc_qpg_qpn(attr, qp, qpn);
-		break;
-	default:
-		qp->qpg_type = IB_QPG_NONE;
-		err = -EINVAL;
-		break;
-	}
-	if (err)
-		return err;
-	qp->qpg_type = attr->qpg_type;
-	return 0;
-}
-
-static void free_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
-			enum ib_qpg_type qpg_type, int qpn)
-{
-	switch (qpg_type) {
-	case IB_QPG_NONE:
-		if (qp->flags & MLX4_IB_QP_NETIF)
-			mlx4_ib_steer_qp_free(dev, qpn, 1);
-		else
-			mlx4_qp_release_range(dev->dev, qpn, 1);
-		break;
-	case IB_QPG_PARENT:
-		free_qpg_parent(dev, qp);
-		break;
-	case IB_QPG_CHILD_TX:
-	case IB_QPG_CHILD_RX:
-		free_qpg_qpn(qp, qpn);
-		break;
-	default:
-		break;
-	}
-}
-
-/* Revert allocation on create_qp_common */
-static void unalloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
-			       struct ib_qp_init_attr *attr, int qpn)
-{
-	free_qpn_common(dev, qp, attr->qpg_type, qpn);
-}
-
-static void release_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
-{
-	free_qpn_common(dev, qp, qp->qpg_type, qp->mqp.qpn);
+	kfree(qp->counter_index);
+	qp->counter_index = NULL;
 }
 
 static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 			    struct ib_qp_init_attr *init_attr,
-			    struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp)
+			    struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
+			    gfp_t gfp)
 {
 	int qpn;
 	int err;
+	struct ib_qp_cap backup_cap;
 	struct mlx4_ib_sqp *sqp;
 	struct mlx4_ib_qp *qp;
 	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
+	struct mlx4_ib_cq *mcq;
+	unsigned long flags;
 
 	/* When tunneling special qps, we use a plain UD qp */
 	if (sqpn) {
@@ -878,10 +659,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		     !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) {
 			if (init_attr->qp_type == IB_QPT_GSI)
 				qp_type = MLX4_IB_QPT_PROXY_GSI;
-			else if (mlx4_is_master(dev->dev))
-				qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
-			else
-				qp_type = MLX4_IB_QPT_PROXY_SMI;
+			else {
+				if (mlx4_is_master(dev->dev) ||
+				    qp0_enabled_vf(dev->dev, sqpn))
+					qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER;
+				else
+					qp_type = MLX4_IB_QPT_PROXY_SMI;
+			}
 		}
 		qpn = sqpn;
 		/* add extra sg entry for tunneling */
@@ -896,7 +680,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 			return -EINVAL;
 		if (tnl_init->proxy_qp_type == IB_QPT_GSI)
 			qp_type = MLX4_IB_QPT_TUN_GSI;
-		else if (tnl_init->slave == mlx4_master_func_num(dev->dev))
+		else if (tnl_init->slave == mlx4_master_func_num(dev->dev) ||
+			 mlx4_vf_smi_enabled(dev->dev, tnl_init->slave,
+					     tnl_init->port))
 			qp_type = MLX4_IB_QPT_TUN_SMI_OWNER;
 		else
 			qp_type = MLX4_IB_QPT_TUN_SMI;
@@ -911,73 +697,46 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
 		    (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
 				MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
-			sqp = kzalloc(sizeof (struct mlx4_ib_sqp), GFP_KERNEL);
+			sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
 			if (!sqp)
 				return -ENOMEM;
 			qp = &sqp->qp;
-			qp->pri.vid = qp->alt.vid = 0xFFFF;
+			qp->pri.vid = 0xFFFF;
+			qp->alt.vid = 0xFFFF;
 		} else {
-			qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
+			qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
 			if (!qp)
 				return -ENOMEM;
-			qp->pri.vid = qp->alt.vid = 0xFFFF;
+			qp->pri.vid = 0xFFFF;
+			qp->alt.vid = 0xFFFF;
 		}
 	} else
 		qp = *caller_qp;
 
 	qp->mlx4_ib_qp_type = qp_type;
 
-	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
-		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
-
-	if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
-		qp->flags |= MLX4_IB_QP_LSO;
-
-	if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
-		if (dev->dev->caps.steering_mode ==
-			 MLX4_STEERING_MODE_DEVICE_MANAGED &&
-			!mlx4_is_mfunc(dev->dev))
-				qp->flags |= MLX4_IB_QP_NETIF;
-		else {
-			err = -EINVAL;
-			goto err;
-		}
-	}
-
 	mutex_init(&qp->mutex);
 	spin_lock_init(&qp->sq.lock);
 	spin_lock_init(&qp->rq.lock);
 	INIT_LIST_HEAD(&qp->gid_list);
 	INIT_LIST_HEAD(&qp->steering_rules);
-	INIT_LIST_HEAD(&qp->rules_list);
 
 	qp->state	 = IB_QPS_RESET;
 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
 		qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
 
-	err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, mlx4_ib_qp_has_rq(init_attr), qp);
+	err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
 	if (err)
 		goto err;
 
 	if (pd->uobject) {
 		struct mlx4_ib_create_qp ucmd;
-		int shift;
-		int n;
 
-		if (!udata || ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+		if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
 			err = -EFAULT;
 			goto err;
 		}
 
-		if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
-			qp->flags |= MLX4_IB_QP_CAP_CROSS_CHANNEL;
-
-		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
-			qp->flags |= MLX4_IB_QP_CAP_MANAGED_SEND;
-
-		if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
-			qp->flags |= MLX4_IB_QP_CAP_MANAGED_RECV;
-
 		qp->sq_no_prefetch = ucmd.sq_no_prefetch;
 
 		err = set_user_sq_size(dev, qp, &ucmd);
@@ -991,10 +750,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 			goto err;
 		}
 
-		n = ib_umem_page_count(qp->umem);
-		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
-		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
-
+		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
+				    ilog2(qp->umem->page_size), &qp->mtt);
 		if (err)
 			goto err_buf;
 
@@ -1002,7 +759,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		if (err)
 			goto err_mtt;
 
-		if (mlx4_ib_qp_has_rq(init_attr)) {
+		if (qp_has_rq(init_attr)) {
 			err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
 						  ucmd.db_addr, &qp->db);
 			if (err)
@@ -1011,31 +768,45 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 	} else {
 		qp->sq_no_prefetch = 0;
 
-		err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
+		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+			qp->flags |= MLX4_IB_QP_LSO;
+
+		if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
+			if (dev->steering_support ==
+			    MLX4_STEERING_MODE_DEVICE_MANAGED)
+				qp->flags |= MLX4_IB_QP_NETIF;
+			else
+				goto err;
+		}
+
+		memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
+		err = set_kernel_sq_size(dev, &init_attr->cap,
+					 qp_type, qp, true);
 		if (err)
 			goto err;
 
-		if (mlx4_ib_qp_has_rq(init_attr)) {
-			err = mlx4_db_alloc(dev->dev, &qp->db, 0);
+		if (qp_has_rq(init_attr)) {
+			err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
 			if (err)
 				goto err;
 
 			*qp->db.db = 0;
 		}
 
-		if (qp->max_inline_data) {
-			err = mlx4_bf_alloc(dev->dev, &qp->bf, 0);
-			if (err) {
-				pr_debug("failed to allocate blue flame"
-					 " register (%d)", err);
-				qp->bf.uar = &dev->priv_uar;
-			}
-		} else
-			qp->bf.uar = &dev->priv_uar;
+		if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
+				   &qp->buf, gfp)) {
+			memcpy(&init_attr->cap, &backup_cap,
+			       sizeof(backup_cap));
+			err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
+						 qp, false);
+			if (err)
+				goto err_db;
 
-		if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
-			err = -ENOMEM;
-			goto err_db;
+			if (mlx4_buf_alloc(dev->dev, qp->buf_size,
+					   PAGE_SIZE * 2, &qp->buf, gfp)) {
+				err = -ENOMEM;
+				goto err_db;
+			}
 		}
 
 		err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
@@ -1043,13 +814,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		if (err)
 			goto err_buf;
 
-		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
+		err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
 		if (err)
 			goto err_mtt;
 
-		qp->sq.wrid  = kmalloc(qp->sq.wqe_cnt * sizeof (u64), GFP_KERNEL);
-		qp->rq.wrid  = kmalloc(qp->rq.wqe_cnt * sizeof (u64), GFP_KERNEL);
-
+		qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
+					gfp | __GFP_NOWARN);
+		if (!qp->sq.wrid)
+			qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
+						gfp, 0 /*PAGE_KERNEL*/);
+		qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
+					gfp | __GFP_NOWARN);
+		if (!qp->rq.wrid)
+			qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
+						gfp, 0 /*PAGE_KERNEL*/);
 		if (!qp->sq.wrid || !qp->rq.wrid) {
 			err = -ENOMEM;
 			goto err_wrid;
@@ -1065,12 +843,29 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 			}
 		}
 	} else {
-		err = alloc_qpn_common(dev, qp, init_attr, &qpn);
+		/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
+		 * otherwise, the WQE BlueFlame setup flow wrongly causes
+		 * VLAN insertion. */
+		if (init_attr->qp_type == IB_QPT_RAW_PACKET)
+			err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
+						    (init_attr->cap.max_send_wr ?
+						     MLX4_RESERVE_ETH_BF_QP : 0) |
+						    (init_attr->cap.max_recv_wr ?
+						     MLX4_RESERVE_A0_QP : 0));
+		else
+			if (qp->flags & MLX4_IB_QP_NETIF)
+				err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
+			else
+				err = mlx4_qp_reserve_range(dev->dev, 1, 1,
+							    &qpn, 0);
 		if (err)
 			goto err_proxy;
 	}
 
-	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+		qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+
+	err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
 	if (err)
 		goto err_qpn;
 
@@ -1087,21 +882,43 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 	qp->mqp.event = mlx4_ib_qp_event;
 	if (!*caller_qp)
 		*caller_qp = qp;
+
+	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+	mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+			 to_mcq(init_attr->recv_cq));
+	/* Maintain device to QPs access, needed for further handling
+	 * via reset flow
+	 */
+	list_add_tail(&qp->qps_list, &dev->qp_list);
+	/* Maintain CQ to QPs access, needed for further handling
+	 * via reset flow
+	 */
+	mcq = to_mcq(init_attr->send_cq);
+	list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+	mcq = to_mcq(init_attr->recv_cq);
+	list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+	mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+			   to_mcq(init_attr->recv_cq));
+	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
 	return 0;
 
 err_qpn:
-	unalloc_qpn_common(dev, qp, init_attr, qpn);
-
+	if (!sqpn) {
+		if (qp->flags & MLX4_IB_QP_NETIF)
+			mlx4_ib_steer_qp_free(dev, qpn, 1);
+		else
+			mlx4_qp_release_range(dev->dev, qpn, 1);
+	}
 err_proxy:
 	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
 		free_proxy_bufs(pd->device, qp);
 err_wrid:
 	if (pd->uobject) {
-		if (mlx4_ib_qp_has_rq(init_attr))
+		if (qp_has_rq(init_attr))
 			mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
 	} else {
-		kfree(qp->sq.wrid);
-		kfree(qp->rq.wrid);
+		kvfree(qp->sq.wrid);
+		kvfree(qp->rq.wrid);
 	}
 
 err_mtt:
@@ -1114,12 +931,9 @@ err_buf:
 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
 
 err_db:
-	if (!pd->uobject && mlx4_ib_qp_has_rq(init_attr))
+	if (!pd->uobject && qp_has_rq(init_attr))
 		mlx4_db_free(dev->dev, &qp->db);
 
-	if (qp->max_inline_data)
-		mlx4_bf_free(dev->dev, &qp->bf);
-
 err:
 	if (!*caller_qp)
 		kfree(qp);
@@ -1144,13 +958,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
 	if (send_cq == recv_cq) {
-		spin_lock_irq(&send_cq->lock);
+		spin_lock(&send_cq->lock);
 		__acquire(&recv_cq->lock);
 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
-		spin_lock_irq(&send_cq->lock);
+		spin_lock(&send_cq->lock);
 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
 	} else {
-		spin_lock_irq(&recv_cq->lock);
+		spin_lock(&recv_cq->lock);
 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
 	}
 }
@@ -1160,13 +974,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
 {
 	if (send_cq == recv_cq) {
 		__release(&recv_cq->lock);
-		spin_unlock_irq(&send_cq->lock);
+		spin_unlock(&send_cq->lock);
 	} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
 		spin_unlock(&recv_cq->lock);
-		spin_unlock_irq(&send_cq->lock);
+		spin_unlock(&send_cq->lock);
 	} else {
 		spin_unlock(&send_cq->lock);
-		spin_unlock_irq(&recv_cq->lock);
+		spin_unlock(&recv_cq->lock);
 	}
 }
 
@@ -1211,15 +1025,17 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 			      int is_user)
 {
 	struct mlx4_ib_cq *send_cq, *recv_cq;
+	unsigned long flags;
 
 	if (qp->state != IB_QPS_RESET) {
 		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
 				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
 			pr_warn("modify QP %06x to RESET failed.\n",
 			       qp->mqp.qpn);
-		if (qp->pri.smac) {
+		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
 			qp->pri.smac = 0;
+			qp->pri.smac_port = 0;
 		}
 		if (qp->alt.smac) {
 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1241,8 +1057,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 
 	get_cqs(qp, &send_cq, &recv_cq);
 
+	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
 	mlx4_ib_lock_cqs(send_cq, recv_cq);
 
+	/* del from lists under both locks above to protect reset flow paths */
+	list_del(&qp->qps_list);
+	list_del(&qp->cq_send_list);
+	list_del(&qp->cq_recv_list);
 	if (!is_user) {
 		__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
 				 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
@@ -1253,11 +1074,16 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 	mlx4_qp_remove(dev->dev, &qp->mqp);
 
 	mlx4_ib_unlock_cqs(send_cq, recv_cq);
+	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
 
 	mlx4_qp_free(dev->dev, &qp->mqp);
 
-	if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp))
-		release_qpn_common(dev, qp);
+	if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
+		if (qp->flags & MLX4_IB_QP_NETIF)
+			mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
+		else
+			mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+	}
 
 	mlx4_mtt_cleanup(dev->dev, &qp->mtt);
 
@@ -1267,15 +1093,12 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 					      &qp->db);
 		ib_umem_release(qp->umem);
 	} else {
-		kfree(qp->sq.wrid);
-		kfree(qp->rq.wrid);
+		kvfree(qp->sq.wrid);
+		kvfree(qp->rq.wrid);
 		if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
 		    MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
 			free_proxy_bufs(&dev->ib_dev, qp);
 		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
-		if (qp->max_inline_data)
-			mlx4_bf_free(dev->dev, &qp->bf);
-
 		if (qp->rq.wqe_cnt)
 			mlx4_db_free(dev->dev, &qp->db);
 	}
@@ -1300,139 +1123,52 @@ static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
 		return dev->dev->caps.qp1_proxy[attr->port_num - 1];
 }
 
-static int check_qpg_attr(struct mlx4_ib_dev *dev,
-			  struct ib_qp_init_attr *attr)
-{
-	if (attr->qpg_type == IB_QPG_NONE)
-		return 0;
-
-	if (attr->qp_type != IB_QPT_UD &&
-	    attr->qp_type != IB_QPT_RAW_PACKET)
-		return -EINVAL;
-
-	if (attr->qpg_type == IB_QPG_PARENT) {
-		if (attr->parent_attrib.tss_child_count == 1)
-			return -EINVAL; /* Doesn't make sense */
-		if (attr->parent_attrib.rss_child_count == 1)
-			return -EINVAL; /* Doesn't make sense */
-		if ((attr->parent_attrib.tss_child_count == 0) &&
-			(attr->parent_attrib.rss_child_count == 0))
-			/* Should be called with IP_QPG_NONE */
-			return -EINVAL;
-		if (attr->parent_attrib.rss_child_count > 1) {
-			int rss_align_num;
-			if (!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS))
-				return -ENOSYS;
-			rss_align_num = roundup_pow_of_two(
-					attr->parent_attrib.rss_child_count);
-			if (rss_align_num > dev->dev->caps.max_rss_tbl_sz)
-				return -EINVAL;
-		}
-	} else {
-		struct mlx4_ib_qpg_data *qpg_data;
-		if (attr->qpg_parent == NULL)
-			return -EINVAL;
-		if (IS_ERR(attr->qpg_parent))
-			return -EINVAL;
-		qpg_data = to_mqp(attr->qpg_parent)->qpg_data;
-		if (qpg_data == NULL)
-			return -EINVAL;
-		if (attr->qpg_type == IB_QPG_CHILD_TX &&
-		    !qpg_data->tss_child_count)
-			return -EINVAL;
-		if (attr->qpg_type == IB_QPG_CHILD_RX &&
-		    !qpg_data->rss_child_count)
-			return -EINVAL;
-	}
-	return 0;
-}
-
-#define RESERVED_FLAGS_MASK ((((unsigned int)IB_QP_CREATE_RESERVED_END - 1) | IB_QP_CREATE_RESERVED_END)   \
-							& ~(IB_QP_CREATE_RESERVED_START - 1))
-
-static enum mlx4_ib_qp_flags to_mlx4_ib_qp_flags(enum ib_qp_create_flags ib_qp_flags)
-{
-	enum mlx4_ib_qp_flags mlx4_ib_qp_flags = 0;
-
-	if (ib_qp_flags & IB_QP_CREATE_IPOIB_UD_LSO)
-		mlx4_ib_qp_flags |= MLX4_IB_QP_LSO;
-
-	if (ib_qp_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
-		mlx4_ib_qp_flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
-
-	if (ib_qp_flags & IB_QP_CREATE_NETIF_QP)
-		mlx4_ib_qp_flags |= MLX4_IB_QP_NETIF;
-
-	if (ib_qp_flags & IB_QP_CREATE_CROSS_CHANNEL)
-		mlx4_ib_qp_flags |= MLX4_IB_QP_CAP_CROSS_CHANNEL;
-
-	if (ib_qp_flags & IB_QP_CREATE_MANAGED_SEND)
-		mlx4_ib_qp_flags |= MLX4_IB_QP_CAP_MANAGED_SEND;
-
-	if (ib_qp_flags & IB_QP_CREATE_MANAGED_RECV)
-		mlx4_ib_qp_flags |= MLX4_IB_QP_CAP_MANAGED_RECV;
-
-	/* reserved flags */
-	mlx4_ib_qp_flags |= (ib_qp_flags & RESERVED_FLAGS_MASK);
-
-	return mlx4_ib_qp_flags;
-}
-
-struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
-				struct ib_qp_init_attr *init_attr,
-				struct ib_udata *udata)
+static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
+					struct ib_qp_init_attr *init_attr,
+					struct ib_udata *udata)
 {
 	struct mlx4_ib_qp *qp = NULL;
 	int err;
+	int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
 	u16 xrcdn = 0;
-	enum mlx4_ib_qp_flags mlx4_qp_flags = to_mlx4_ib_qp_flags(init_attr->create_flags);
-	struct ib_device *device;
+	gfp_t gfp;
 
-	/* see ib_core::ib_create_qp same handling */
-	device = pd ? pd->device : init_attr->xrcd->device;
+	gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
+		GFP_NOIO : GFP_KERNEL;
 	/*
 	 * We only support LSO, vendor flag1, and multicast loopback blocking,
 	 * and only for kernel UD QPs.
 	 */
-	if (mlx4_qp_flags & ~(MLX4_IB_QP_LSO |
-					MLX4_IB_QP_CAP_CROSS_CHANNEL |
-					MLX4_IB_QP_CAP_MANAGED_SEND |
-					MLX4_IB_QP_CAP_MANAGED_RECV |
+	if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
 					MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
-					MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP |
-					MLX4_IB_QP_NETIF))
+					MLX4_IB_SRIOV_TUNNEL_QP |
+					MLX4_IB_SRIOV_SQP |
+					MLX4_IB_QP_NETIF |
+					MLX4_IB_QP_CREATE_ROCE_V2_GSI |
+					MLX4_IB_QP_CREATE_USE_GFP_NOIO))
 		return ERR_PTR(-EINVAL);
 
 	if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
-	       if (init_attr->qp_type != IB_QPT_UD)
-		       return ERR_PTR(-EINVAL);
+		if (init_attr->qp_type != IB_QPT_UD)
+			return ERR_PTR(-EINVAL);
 	}
 
-	if ((mlx4_qp_flags &
-		(MLX4_IB_QP_CAP_CROSS_CHANNEL |
-		 MLX4_IB_QP_CAP_MANAGED_SEND |
-		 MLX4_IB_QP_CAP_MANAGED_RECV)) &&
-	     !(to_mdev(device)->dev->caps.flags &
-		MLX4_DEV_CAP_FLAG_CROSS_CHANNEL)) {
-		pr_debug("%s Does not support cross-channel operations\n",
-			 to_mdev(device)->ib_dev.name);
-		return ERR_PTR(-EINVAL);
+	if (init_attr->create_flags) {
+		if (udata && init_attr->create_flags & ~(sup_u_create_flags))
+			return ERR_PTR(-EINVAL);
+
+		if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
+						 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
+						 MLX4_IB_QP_CREATE_ROCE_V2_GSI  |
+						 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
+		     init_attr->qp_type != IB_QPT_UD) ||
+		    (init_attr->create_flags & MLX4_IB_SRIOV_SQP &&
+		     init_attr->qp_type > IB_QPT_GSI) ||
+		    (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
+		     init_attr->qp_type != IB_QPT_GSI))
+			return ERR_PTR(-EINVAL);
 	}
 
-	if ((init_attr->create_flags &
-		~(IB_QP_CREATE_CROSS_CHANNEL |
-		  IB_QP_CREATE_MANAGED_SEND |
-		  IB_QP_CREATE_MANAGED_RECV)) &&
-	    (((mlx4_qp_flags & ~MLX4_IB_SRIOV_SQP) &&
-	      init_attr->qp_type != IB_QPT_UD) ||
-	     ((mlx4_qp_flags & MLX4_IB_SRIOV_SQP) &&
-	      init_attr->qp_type > IB_QPT_GSI)))
-		return ERR_PTR(-EINVAL);
-
-	err = check_qpg_attr(to_mdev(device), init_attr);
-	if (err)
-		return ERR_PTR(err);
-
 	switch (init_attr->qp_type) {
 	case IB_QPT_XRC_TGT:
 		pd = to_mxrcd(init_attr->xrcd)->pd;
@@ -1440,21 +1176,23 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
 		init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
 		/* fall through */
 	case IB_QPT_XRC_INI:
-		if (!(to_mdev(device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
+		if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
 			return ERR_PTR(-ENOSYS);
 		init_attr->recv_cq = init_attr->send_cq;
 		/* fall through */
 	case IB_QPT_RC:
 	case IB_QPT_UC:
 	case IB_QPT_RAW_PACKET:
-		qp = kzalloc(sizeof *qp, GFP_KERNEL);
+		qp = kzalloc(sizeof *qp, gfp);
 		if (!qp)
 			return ERR_PTR(-ENOMEM);
-		qp->pri.vid = qp->alt.vid = 0xFFFF;
+		qp->pri.vid = 0xFFFF;
+		qp->alt.vid = 0xFFFF;
 		/* fall through */
 	case IB_QPT_UD:
 	{
-		err = create_qp_common(to_mdev(device), pd, init_attr, udata, 0, &qp);
+		err = create_qp_common(to_mdev(pd->device), pd, init_attr,
+				       udata, 0, &qp, gfp);
 		if (err) {
 			kfree(qp);
 			return ERR_PTR(err);
@@ -1468,19 +1206,29 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
 	case IB_QPT_SMI:
 	case IB_QPT_GSI:
 	{
+		int sqpn;
+
 		/* Userspace is not allowed to create special QPs: */
 		if (udata)
 			return ERR_PTR(-EINVAL);
+		if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
+			int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0);
 
-		err = create_qp_common(to_mdev(device), pd, init_attr, udata,
-				       get_sqp_num(to_mdev(device), init_attr),
-				       &qp);
+			if (res)
+				return ERR_PTR(res);
+		} else {
+			sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
+		}
+
+		err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
+				       sqpn,
+				       &qp, gfp);
 		if (err)
 			return ERR_PTR(err);
 
 		qp->port	= init_attr->port_num;
-		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
-
+		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
+			init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1;
 		break;
 	}
 	default:
@@ -1491,7 +1239,41 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
 	return &qp->ibqp;
 }
 
-int mlx4_ib_destroy_qp(struct ib_qp *qp)
+struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
+				struct ib_qp_init_attr *init_attr,
+				struct ib_udata *udata) {
+	struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
+	struct ib_qp *ibqp;
+	struct mlx4_ib_dev *dev = to_mdev(device);
+
+	ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
+
+	if (!IS_ERR(ibqp) &&
+	    (init_attr->qp_type == IB_QPT_GSI) &&
+	    !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
+		struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
+		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
+
+		if (is_eth &&
+		    dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
+			init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI;
+			sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
+
+			if (IS_ERR(sqp->roce_v2_gsi)) {
+				pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
+				sqp->roce_v2_gsi = NULL;
+			} else {
+				sqp = to_msqp(to_mqp(sqp->roce_v2_gsi));
+				sqp->qp.flags |= MLX4_IB_ROCE_V2_GSI_QP;
+			}
+
+			init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
+		}
+	}
+	return ibqp;
+}
+
+static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
 {
 	struct mlx4_ib_dev *dev = to_mdev(qp->device);
 	struct mlx4_ib_qp *mqp = to_mqp(qp);
@@ -1500,6 +1282,15 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
 	if (is_qp0(dev, mqp))
 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
+	if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+		dev->qp1_proxy[mqp->port - 1] = NULL;
+		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+	}
+
+	if (mqp->counter_index)
+		mlx4_ib_free_qp_counter(dev, mqp);
+
 	pd = get_pd(mqp);
 	destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
 
@@ -1511,6 +1302,20 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
 	return 0;
 }
 
+int mlx4_ib_destroy_qp(struct ib_qp *qp)
+{
+	struct mlx4_ib_qp *mqp = to_mqp(qp);
+
+	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+		struct mlx4_ib_sqp *sqp = to_msqp(mqp);
+
+		if (sqp->roce_v2_gsi)
+			ib_destroy_qp(sqp->roce_v2_gsi);
+	}
+
+	return _mlx4_ib_destroy_qp(qp);
+}
+
 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
 {
 	switch (type) {
@@ -1581,52 +1386,40 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
 	path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
 }
 
-static int ib_rate_to_mlx4(struct mlx4_ib_dev *dev, u8 rate)
-{
-	if (rate == IB_RATE_PORT_CURRENT) {
-		return 0;
-	} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
-		return -EINVAL;
-	} else {
-		while (rate != IB_RATE_2_5_GBPS &&
-		!(1 << (rate + MLX4_STAT_RATE_OFFSET) &
-		dev->dev->caps.stat_rate_support))
-			--rate;
-	}
-
-	return rate + MLX4_STAT_RATE_OFFSET;
-}
-
-static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
-			 u8 *smac, u16 vlan_id, struct mlx4_ib_qp *qp,
-			 struct mlx4_qp_path *path, u8 port, int is_primary)
+static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
+			  u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
+			  struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
 {
 	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
 		IB_LINK_LAYER_ETHERNET;
-	u16 vlan_tag;
 	int vidx;
 	int smac_index;
 	int err;
-	u64 u64_mac;
-	struct mlx4_roce_smac_vlan_info *smac_info;
+
 
 	path->grh_mylmc     = ah->src_path_bits & 0x7f;
 	path->rlid	    = cpu_to_be16(ah->dlid);
-
-	err = ib_rate_to_mlx4(dev, ah->static_rate);
-	if (err < 0)
-		return err;
-	path->static_rate = err;
+	if (ah->static_rate) {
+		path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
+		while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
+		       !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
+			--path->static_rate;
+	} else
+		path->static_rate = 0;
 
 	if (ah->ah_flags & IB_AH_GRH) {
-		if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
+		int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev,
+								      port,
+								      ah->grh.sgid_index);
+
+		if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
 			pr_err("sgid_index (%u) too large. max is %d\n",
-			       ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
+			       real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
 			return -1;
 		}
 
 		path->grh_mylmc |= 1 << 7;
-		path->mgid_index = ah->grh.sgid_index;
+		path->mgid_index = real_sgid_index;
 		path->hop_limit  = ah->grh.hop_limit;
 		path->tclass_flowlabel =
 			cpu_to_be32((ah->grh.traffic_class << 20) |
@@ -1641,12 +1434,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
 			((port - 1) << 6) | ((ah->sl & 7) << 3);
 
-		if (is_primary)
-			smac_info = &qp->pri;
-		else
-			smac_info = &qp->alt;
-
-		vlan_tag = vlan_id;
+		path->feup |= MLX4_FEUP_FORCE_ETH_UP;
 		if (vlan_tag < 0x1000) {
 			if (smac_info->vid < 0x1000) {
 				/* both valid vlan ids */
@@ -1660,10 +1448,8 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
 					smac_info->candidate_vlan_port = port;
 					smac_info->update_vid = 1;
 					path->vlan_index = vidx;
-					path->fl = 1 << 6;
 				} else {
 					path->vlan_index = smac_info->vlan_index;
-					path->fl = 1 << 6;
 				}
 			} else {
 				/* no current vlan tag in qp */
@@ -1675,8 +1461,9 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
 				smac_info->candidate_vlan_port = port;
 				smac_info->update_vid = 1;
 				path->vlan_index = vidx;
-				path->fl = 1 << 6;
 			}
+			path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
+			path->fl = 1 << 6;
 		} else {
 			/* have current vlan tag. unregister it at modify-qp success */
 			if (smac_info->vid < 0x1000) {
@@ -1685,38 +1472,62 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
 			}
 		}
 
-
 		/* get smac_index for RoCE use.
 		 * If no smac was yet assigned, register one.
 		 * If one was already assigned, but the new mac differs,
 		 * unregister the old one and register the new one.
 		*/
-		u64_mac = mlx4_mac_to_u64(smac);
-
-		if (!smac_info->smac || smac_info->smac != u64_mac) {
+		if ((!smac_info->smac && !smac_info->smac_port) ||
+		    smac_info->smac != smac) {
 			/* register candidate now, unreg if needed, after success */
-			smac_index = mlx4_register_mac(dev->dev, port, u64_mac);
+			smac_index = mlx4_register_mac(dev->dev, port, smac);
 			if (smac_index >= 0) {
 				smac_info->candidate_smac_index = smac_index;
-				smac_info->candidate_smac = u64_mac;
+				smac_info->candidate_smac = smac;
 				smac_info->candidate_smac_port = port;
-			} else
+			} else {
 				return -EINVAL;
-		} else
+			}
+		} else {
 			smac_index = smac_info->smac_index;
+		}
 
 		memcpy(path->dmac, ah->dmac, 6);
 		path->ackto = MLX4_IB_LINK_TYPE_ETH;
 		/* put MAC table smac index for IBoE */
-		path->grh_mylmc = (u8) (smac_index) | 0x80 ;
-
-	} else
+		path->grh_mylmc = (u8) (smac_index) | 0x80;
+	} else {
 		path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
 			((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+	}
 
 	return 0;
 }
 
+static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
+			 enum ib_qp_attr_mask qp_attr_mask,
+			 struct mlx4_ib_qp *mqp,
+			 struct mlx4_qp_path *path, u8 port,
+			 u16 vlan_id, u8 *smac)
+{
+	return _mlx4_set_path(dev, &qp->ah_attr,
+			      mlx4_mac_to_u64(smac),
+			      vlan_id,
+			      path, &mqp->pri, port);
+}
+
+static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
+			     const struct ib_qp_attr *qp,
+			     enum ib_qp_attr_mask qp_attr_mask,
+			     struct mlx4_ib_qp *mqp,
+			     struct mlx4_qp_path *path, u8 port)
+{
+	return _mlx4_set_path(dev, &qp->alt_ah_attr,
+			      0,
+			      0xffff,
+			      path, &mqp->alt, port);
+}
+
 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 {
 	struct mlx4_ib_gid_entry *ge, *tmp;
@@ -1729,35 +1540,82 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
 	}
 }
 
-static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, const u8 *smac,
+static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev,
+				    struct mlx4_ib_qp *qp,
 				    struct mlx4_qp_context *context)
 {
-	struct net_device *ndev;
 	u64 u64_mac;
 	int smac_index;
 
-
-	ndev = dev->iboe.netdevs[qp->port - 1];
-	if (ndev) {
-                smac = IF_LLADDR(ndev);
-		u64_mac = mlx4_mac_to_u64(smac);
-	} else {
-		u64_mac = dev->dev->caps.def_mac[qp->port];
-	}
+	u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
 
 	context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
-	if (!qp->pri.smac) {
+	if (!qp->pri.smac && !qp->pri.smac_port) {
 		smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
 		if (smac_index >= 0) {
 			qp->pri.candidate_smac_index = smac_index;
 			qp->pri.candidate_smac = u64_mac;
 			qp->pri.candidate_smac_port = qp->port;
 			context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
-		} else
+		} else {
 			return -ENOENT;
+		}
 	}
 	return 0;
 }
+
+static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+	struct counter_index *new_counter_index;
+	int err;
+	u32 tmp_idx;
+
+	if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
+	    IB_LINK_LAYER_ETHERNET ||
+	    !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
+	    !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
+		return 0;
+
+	err = mlx4_counter_alloc(dev->dev, &tmp_idx);
+	if (err)
+		return err;
+
+	new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL);
+	if (!new_counter_index) {
+		mlx4_counter_free(dev->dev, tmp_idx);
+		return -ENOMEM;
+	}
+
+	new_counter_index->index = tmp_idx;
+	new_counter_index->allocated = 1;
+	qp->counter_index = new_counter_index;
+
+	mutex_lock(&dev->counters_table[qp->port - 1].mutex);
+	list_add_tail(&new_counter_index->list,
+		      &dev->counters_table[qp->port - 1].counters_list);
+	mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
+
+	return 0;
+}
+
+enum {
+	MLX4_QPC_ROCE_MODE_1 = 0,
+	MLX4_QPC_ROCE_MODE_2 = 2,
+	MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff
+};
+
+static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
+{
+	switch (gid_type) {
+	case IB_GID_TYPE_ROCE:
+		return MLX4_QPC_ROCE_MODE_1;
+	case IB_GID_TYPE_ROCE_UDP_ENCAP:
+		return MLX4_QPC_ROCE_MODE_2;
+	default:
+		return MLX4_QPC_ROCE_MODE_UNDEFINED;
+	}
+}
+
 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 			       const struct ib_qp_attr *attr, int attr_mask,
 			       enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -1771,7 +1629,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 	int sqd_event;
 	int steer_qp = 0;
 	int err = -EINVAL;
-	int is_eth = -1;
+	int counter_index;
+
+	/* APM is not supported under RoCE */
+	if (attr_mask & IB_QP_ALT_PATH &&
+	    rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+	    IB_LINK_LAYER_ETHERNET)
+		return -ENOTSUPP;
 
 	context = kzalloc(sizeof *context, GFP_KERNEL);
 	if (!context)
@@ -1797,9 +1661,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		}
 	}
 
-	if (qp->max_inlr_data)
-		context->param3 |= cpu_to_be32(1 << 25);
-
 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
 	else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
@@ -1828,16 +1689,23 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
 	context->sq_size_stride |= qp->sq.wqe_shift - 4;
 
+	if (new_state == IB_QPS_RESET && qp->counter_index)
+		mlx4_ib_free_qp_counter(dev, qp);
+
 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
 		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
 		context->xrcd = cpu_to_be32((u32) qp->xrcdn);
-		context->param3 |= cpu_to_be32(1 << 30);
+		if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+			context->param3 |= cpu_to_be32(1 << 30);
 	}
 
 	if (qp->ibqp.uobject)
-		context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
+		context->usr_page = cpu_to_be32(
+			mlx4_to_hw_uar_index(dev->dev,
+					     to_mucontext(ibqp->uobject->context)->uar.index));
 	else
-		context->usr_page = cpu_to_be32(qp->bf.uar->index);
+		context->usr_page = cpu_to_be32(
+			mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
 
 	if (attr_mask & IB_QP_DEST_QPN)
 		context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
@@ -1851,19 +1719,40 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 	}
 
 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
-		if (dev->counters[qp->port - 1].counter_index != -1) {
-			context->pri_path.counter_index =
-						dev->counters[qp->port - 1].counter_index;
-			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
-		} else {
-			context->pri_path.counter_index = 0xff;
-		}
+		err = create_qp_lb_counter(dev, qp);
+		if (err)
+			goto out;
 
-		if (qp->flags & MLX4_IB_QP_NETIF &&
-		    (qp->qpg_type == IB_QPG_NONE || qp->qpg_type == IB_QPG_PARENT)) {
+		counter_index =
+			dev->counters_table[qp->port - 1].default_counter;
+		if (qp->counter_index)
+			counter_index = qp->counter_index->index;
+
+		if (counter_index != -1) {
+			context->pri_path.counter_index = counter_index;
+			optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
+			if (qp->counter_index) {
+				context->pri_path.fl |=
+					MLX4_FL_ETH_SRC_CHECK_MC_LB;
+				context->pri_path.vlan_control |=
+					MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
+			}
+		} else
+			context->pri_path.counter_index =
+				MLX4_SINK_COUNTER_INDEX(dev->dev);
+
+		if (qp->flags & MLX4_IB_QP_NETIF) {
 			mlx4_ib_steer_qp_reg(dev, qp, 1);
 			steer_qp = 1;
 		}
+
+		if (ibqp->qp_type == IB_QPT_GSI) {
+			enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
+				IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
+			u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
+
+			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
+		}
 	}
 
 	if (attr_mask & IB_QP_PKEY_INDEX) {
@@ -1873,17 +1762,51 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
 	}
 
-	if ((attr_mask & IB_QP_AV) && (ibqp->qp_type != IB_QPT_RAW_PACKET)) {
-		if (mlx4_set_path(dev, &attr->ah_attr, (u8 *)attr->smac,
-				  attr_mask & IB_QP_VID ?
-				  attr->vlan_id : 0xffff ,
-				  qp, &context->pri_path,
-				  attr_mask & IB_QP_PORT ?
-				  attr->port_num : qp->port, 1))
+	if (attr_mask & IB_QP_AV) {
+		u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
+			attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+		union ib_gid gid;
+		struct ib_gid_attr gid_attr;
+		u16 vlan = 0xffff;
+		u8 smac[ETH_ALEN];
+		int status = 0;
+		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
+			attr->ah_attr.ah_flags & IB_AH_GRH;
+
+		if (is_eth) {
+			int index = attr->ah_attr.grh.sgid_index;
+
+			status = ib_get_cached_gid(ibqp->device, port_num,
+						   index, &gid, &gid_attr);
+			if (!status && !memcmp(&gid, &zgid, sizeof(gid)))
+				status = -ENOENT;
+			if (!status && gid_attr.ndev) {
+				vlan = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+				memcpy(smac, IF_LLADDR(gid_attr.ndev), ETH_ALEN);
+				dev_put(gid_attr.ndev);
+			}
+		}
+		if (status)
+			goto out;
+
+		if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
+				  port_num, vlan, smac))
 			goto out;
 
 		optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
 			   MLX4_QP_OPTPAR_SCHED_QUEUE);
+
+		if (is_eth &&
+		    (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) {
+			u8 qpc_roce_mode = gid_type_to_qpc(gid_attr.gid_type);
+
+			if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) {
+				err = -EINVAL;
+				goto out;
+			}
+			context->rlkey_roce_mode |= (qpc_roce_mode << 6);
+		}
+
 	}
 
 	if (attr_mask & IB_QP_TIMEOUT) {
@@ -1900,16 +1823,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		    dev->dev->caps.pkey_table_len[attr->alt_port_num])
 			goto out;
 
-		if (mlx4_set_path(dev, &attr->alt_ah_attr, (u8 *)attr->smac,
-				  attr_mask & IB_QP_ALT_VID ?
-				  attr->alt_vlan_id : 0xffff,
-				  qp, &context->alt_path,
-				  attr->alt_port_num, 0))
+		if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
+				      &context->alt_path,
+				      attr->alt_port_num))
 			goto out;
 
 		context->alt_path.pkey_index = attr->alt_pkey_index;
 		context->alt_path.ackto = attr->alt_timeout << 3;
-		context->alt_path.counter_index = dev->counters[attr->alt_port_num - 1].counter_index;
 		optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
 	}
 
@@ -1956,27 +1876,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
 	}
 
-	if (attr_mask & IB_M_EXT_CLASS_1)
-		context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER);
-
-	/* for now we enable also sqe on send */
-	if (attr_mask & IB_M_EXT_CLASS_2) {
-		context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_SQ);
-		context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER);
-	}
-
-	if (attr_mask & IB_M_EXT_CLASS_3)
-		context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_RQ);
-
-	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
-		context->params2 |= (qp->flags & MLX4_IB_QP_CAP_CROSS_CHANNEL ?
-			cpu_to_be32(MLX4_QP_BIT_COLL_MASTER) : 0);
-		context->params2 |= (qp->flags & MLX4_IB_QP_CAP_MANAGED_SEND ?
-			cpu_to_be32(MLX4_QP_BIT_COLL_MASTER | MLX4_QP_BIT_COLL_SYNC_SQ) : 0);
-		context->params2 |= (qp->flags & MLX4_IB_QP_CAP_MANAGED_RECV ?
-			cpu_to_be32(MLX4_QP_BIT_COLL_MASTER | MLX4_QP_BIT_COLL_SYNC_RQ) : 0);
-	}
-
 	if (ibqp->srq)
 		context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
 
@@ -2031,15 +1930,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 				context->pri_path.fl = 0x80;
 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
 		}
-		if (ibqp->qp_type == IB_QPT_RAW_PACKET &&
-		    (attr_mask & IB_QP_AV)) {
-			context->pri_path.sched_queue |=
-				((attr->ah_attr.sl & 0xf) << 3);
-			context->pri_path.feup = 1 << 6;
-		}
-		is_eth = rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
-			IB_LINK_LAYER_ETHERNET;
-		if (is_eth) {
+		if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+		    IB_LINK_LAYER_ETHERNET) {
 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
 				context->pri_path.feup = 1 << 7; /* don't fsm */
@@ -2047,18 +1939,37 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 			if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
-				err = handle_eth_ud_smac_index(dev, qp, (const u8 *)attr->smac, context);
-				if (err)
-					return -EINVAL;
+				err = handle_eth_ud_smac_index(dev, qp, context);
+				if (err) {
+					err = -EINVAL;
+					goto out;
+				}
+				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+					dev->qp1_proxy[qp->port - 1] = qp;
 			}
 		}
 	}
 
-	if (ibqp->qp_type == IB_QPT_UD)
-		if (is_eth && (new_state == IB_QPS_RTR)) {
+	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
+					MLX4_IB_LINK_TYPE_ETH;
+		if (dev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+			/* set QP to receive both tunneled & non-tunneled packets */
+			if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+				context->srqn = cpu_to_be32(7 << 28);
+		}
+	}
+
+	if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
+		int is_eth = rdma_port_get_link_layer(
+				&dev->ib_dev, qp->port) ==
+				IB_LINK_LAYER_ETHERNET;
+		if (is_eth) {
 			context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH;
 			optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH;
 		}
+	}
+
 
 	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
 	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
@@ -2067,45 +1978,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		sqd_event = 0;
 
 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
-		context->rlkey |= (1 << 4);
+		context->rlkey_roce_mode |= (1 << 4);
 
-	if ((attr_mask & IB_QP_GROUP_RSS) &&
-		(qp->qpg_data->rss_child_count > 1)) {
-		struct mlx4_ib_qpg_data *qpg_data = qp->qpg_data;
-		void *rss_context_base = &context->pri_path;
-		struct mlx4_rss_context *rss_context =
-			(struct mlx4_rss_context *) (rss_context_base
-					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH);
-
-		context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
-
-		/* This should be tbl_sz_base_qpn */
-		rss_context->base_qpn = cpu_to_be32(qpg_data->rss_qpn_base |
-				(ilog2(qpg_data->rss_child_count) << 24));
-		rss_context->default_qpn = cpu_to_be32(qpg_data->rss_qpn_base);
-		/* This should be flags_hash_fn */
-		rss_context->flags = MLX4_RSS_TCP_IPV6 |
-				     MLX4_RSS_TCP_IPV4;
-		if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS) {
-			rss_context->base_qpn_udp = rss_context->default_qpn;
-			rss_context->flags |= MLX4_RSS_IPV6 |
-					MLX4_RSS_IPV4     |
-					MLX4_RSS_UDP_IPV6 |
-					MLX4_RSS_UDP_IPV4;
-		}
-		if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
-			static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B,
-				0x1983A2FC, 0x943E1ADB, 0xD9389E6B, 0xD1039C2C,
-				0xA74499AD, 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
-			rss_context->hash_fn = MLX4_RSS_HASH_TOP;
-			memcpy(rss_context->rss_key, rsskey,
-				sizeof(rss_context->rss_key));
-		} else {
-			rss_context->hash_fn = MLX4_RSS_HASH_XOR;
-			memset(rss_context->rss_key, 0,
-				sizeof(rss_context->rss_key));
-		}
-	}
 	/*
 	 * Before passing a kernel QP to the HW, make sure that the
 	 * ownership bits of the send queue are set and the SQ
@@ -2120,17 +1994,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 			ctrl = get_send_wqe(qp, i);
 			ctrl->owner_opcode = cpu_to_be32(1 << 31);
 			if (qp->sq_max_wqes_per_wr == 1)
-				ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
+				ctrl->fence_size =
+						1 << (qp->sq.wqe_shift - 4);
 
 			stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
 		}
 	}
 
-	if ((qp->port && rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
-	    IB_LINK_LAYER_ETHERNET) && (qp->ibqp.qp_type == IB_QPT_RAW_PACKET))
-		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
-					  MLX4_IB_LINK_TYPE_ETH;
-
 	err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
 			     to_mlx4_state(new_state), context, optpar,
 			     sqd_event, &qp->mqp);
@@ -2153,29 +2023,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 	if (is_sqp(dev, qp))
 		store_sqp_attrs(to_msqp(qp), attr, attr_mask);
 
-	/* Set 'ignore_cq_overrun' bits for collectives offload */
-	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
-		if (attr_mask & (IB_M_EXT_CLASS_2 | IB_M_EXT_CLASS_3)) {
-			err = mlx4_ib_ignore_overrun_cq(ibqp->send_cq);
-			if (err) {
-				pr_err("Failed to set ignore CQ "
-				       "overrun for QP 0x%x's send CQ\n",
-				       ibqp->qp_num);
-				goto out;
-			}
-
-			if (ibqp->recv_cq != ibqp->send_cq) {
-				err = mlx4_ib_ignore_overrun_cq(ibqp->recv_cq);
-				if (err) {
-					pr_err("Failed to set ignore "
-					       "CQ overrun for QP 0x%x's recv "
-					       "CQ\n", ibqp->qp_num);
-					goto out;
-				}
-			}
-		}
-	}
-
 	/*
 	 * If we moved QP0 to RTR, bring the IB link up; if we moved
 	 * QP0 to RESET or ERROR, bring the link back down.
@@ -2210,14 +2057,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 			if (qp->rq.wqe_cnt)
 				*qp->db.db  = 0;
 
-			if (qp->flags & MLX4_IB_QP_NETIF &&
-			    (qp->qpg_type == IB_QPG_NONE ||
-			     qp->qpg_type == IB_QPG_PARENT))
+			if (qp->flags & MLX4_IB_QP_NETIF)
 				mlx4_ib_steer_qp_reg(dev, qp, 0);
 		}
-		if (qp->pri.smac) {
+		if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
 			mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
 			qp->pri.smac = 0;
+			qp->pri.smac_port = 0;
 		}
 		if (qp->alt.smac) {
 			mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -2237,42 +2083,40 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 			qp->alt.update_vid = 0;
 		}
 	}
-
 out:
+	if (err && qp->counter_index)
+		mlx4_ib_free_qp_counter(dev, qp);
 	if (err && steer_qp)
 		mlx4_ib_steer_qp_reg(dev, qp, 0);
 	kfree(context);
-	if (qp->pri.candidate_smac) {
-		if (err)
+	if (qp->pri.candidate_smac ||
+	    (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
+		if (err) {
 			mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
-		else {
-			if (qp->pri.smac) {
+		} else {
+			if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
 				mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
-			}
 			qp->pri.smac = qp->pri.candidate_smac;
 			qp->pri.smac_index = qp->pri.candidate_smac_index;
 			qp->pri.smac_port = qp->pri.candidate_smac_port;
-
 		}
 		qp->pri.candidate_smac = 0;
 		qp->pri.candidate_smac_index = 0;
 		qp->pri.candidate_smac_port = 0;
 	}
 	if (qp->alt.candidate_smac) {
-		if (err)
-			mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->pri.candidate_smac);
-		else {
-			if (qp->pri.smac) {
+		if (err) {
+			mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
+		} else {
+			if (qp->alt.smac)
 				mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
-			}
 			qp->alt.smac = qp->alt.candidate_smac;
 			qp->alt.smac_index = qp->alt.candidate_smac_index;
 			qp->alt.smac_port = qp->alt.candidate_smac_port;
-
 		}
-		qp->pri.candidate_smac = 0;
-		qp->pri.candidate_smac_index = 0;
-		qp->pri.candidate_smac_port = 0;
+		qp->alt.candidate_smac = 0;
+		qp->alt.candidate_smac_index = 0;
+		qp->alt.candidate_smac_port = 0;
 	}
 
 	if (qp->pri.update_vid) {
@@ -2312,15 +2156,14 @@ out:
 	return err;
 }
 
-int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-		      int attr_mask, struct ib_udata *udata)
+static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+			      int attr_mask, struct ib_udata *udata)
 {
 	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	enum ib_qp_state cur_state, new_state;
 	int err = -EINVAL;
 	int ll;
-
 	mutex_lock(&qp->mutex);
 
 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -2334,7 +2177,7 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	}
 
 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
-				attr_mask & ~IB_M_QP_MOD_VEND_MASK, ll)) {
+				attr_mask, ll)) {
 		pr_debug("qpn 0x%x: invalid attribute mask specified "
 			 "for transition %d to %d. qp_type %d,"
 			 " attr_mask 0x%x\n",
@@ -2343,10 +2186,20 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		goto out;
 	}
 
-	if ((attr_mask & IB_M_QP_MOD_VEND_MASK) && !dev->dev->caps.sync_qp) {
-		pr_err("extended verbs are not supported by %s\n",
-		       dev->ib_dev.name);
-		goto out;
+	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
+		if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+			if ((ibqp->qp_type == IB_QPT_RC) ||
+			    (ibqp->qp_type == IB_QPT_UD) ||
+			    (ibqp->qp_type == IB_QPT_UC) ||
+			    (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+			    (ibqp->qp_type == IB_QPT_XRC_INI)) {
+				attr->port_num = mlx4_ib_bond_next_port(dev);
+			}
+		} else {
+			/* no sense in changing port_num
+			 * when ports are bonded */
+			attr_mask &= ~IB_QP_PORT;
+		}
 	}
 
 	if ((attr_mask & IB_QP_PORT) &&
@@ -2358,6 +2211,11 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		goto out;
 	}
 
+	if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
+	    (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
+	     IB_LINK_LAYER_ETHERNET))
+		goto out;
+
 	if (attr_mask & IB_QP_PKEY_INDEX) {
 		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
 		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
@@ -2394,20 +2252,57 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
 	err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
 
+	if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
+		attr->port_num = 1;
+
 out:
 	mutex_unlock(&qp->mutex);
 	return err;
 }
 
+int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		      int attr_mask, struct ib_udata *udata)
+{
+	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+	int ret;
+
+	ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
+
+	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+		struct mlx4_ib_sqp *sqp = to_msqp(mqp);
+		int err = 0;
+
+		if (sqp->roce_v2_gsi)
+			err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask);
+		if (err)
+			pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
+			       err);
+	}
+	return ret;
+}
+
+static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
+{
+	int i;
+	for (i = 0; i < dev->caps.num_ports; i++) {
+		if (qpn == dev->caps.qp0_proxy[i] ||
+		    qpn == dev->caps.qp0_tunnel[i]) {
+			*qkey = dev->caps.qp0_qkey[i];
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
 static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
-				  struct ib_send_wr *wr,
+				  struct ib_ud_wr *wr,
 				  void *wqe, unsigned *mlx_seg_len)
 {
 	struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
 	struct ib_device *ib_dev = &mdev->ib_dev;
 	struct mlx4_wqe_mlx_seg *mlx = wqe;
 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
-	struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+	struct mlx4_ib_ah *ah = to_mah(wr->ah);
 	u16 pkey;
 	u32 qkey;
 	int send_size;
@@ -2415,20 +2310,20 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 	int spc;
 	int i;
 
-	if (wr->opcode != IB_WR_SEND)
+	if (wr->wr.opcode != IB_WR_SEND)
 		return -EINVAL;
 
 	send_size = 0;
 
-	for (i = 0; i < wr->num_sge; ++i)
-		send_size += wr->sg_list[i].length;
+	for (i = 0; i < wr->wr.num_sge; ++i)
+		send_size += wr->wr.sg_list[i].length;
 
 	/* for proxy-qp0 sends, need to add in size of tunnel header */
 	/* for tunnel-qp0 sends, tunnel header is already in s/g list */
 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
 		send_size += sizeof (struct mlx4_ib_tunnel_header);
 
-	ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header);
+	ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
 
 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
 		sqp->ud_header.lrh.service_level =
@@ -2446,18 +2341,23 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 	mlx->rlid = sqp->ud_header.lrh.destination_lid;
 
 	sqp->ud_header.lrh.virtual_lane    = 0;
-	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
+	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
 	ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
 	if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
-		sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+		sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
 	else
 		sqp->ud_header.bth.destination_qpn =
 			cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
 
 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
-	if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
-		return -EINVAL;
+	if (mlx4_is_master(mdev->dev)) {
+		if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+			return -EINVAL;
+	} else {
+		if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+			return -EINVAL;
+	}
 	sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
 	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
 
@@ -2475,11 +2375,11 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 	spc = MLX4_INLINE_ALIGN -
 	      ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
 	if (header_size <= spc) {
-		inl->byte_count = cpu_to_be32(1 << 31 | header_size);
+		inl->byte_count = cpu_to_be32((1U << 31) | header_size);
 		memcpy(inl + 1, sqp->header_buf, header_size);
 		i = 1;
 	} else {
-		inl->byte_count = cpu_to_be32(1 << 31 | spc);
+		inl->byte_count = cpu_to_be32((1U << 31) | spc);
 		memcpy(inl + 1, sqp->header_buf, spc);
 
 		inl = (void *) (inl + 1) + spc;
@@ -2498,7 +2398,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 		 * of 16 mod 64.
 		 */
 		wmb();
-		inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc));
+		inl->byte_count = cpu_to_be32((1U << 31) | (header_size - spc));
 		i = 2;
 	}
 
@@ -2507,33 +2407,54 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 	return 0;
 }
 
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
+static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
+{
+	union sl2vl_tbl_to_u64 tmp_vltab;
+	u8 vl;
+
+	if (sl > 15)
+		return 0xf;
+	tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
+	vl = tmp_vltab.sl8[sl >> 1];
+	if (sl & 1)
+		vl &= 0x0f;
+	else
+		vl >>= 4;
+	return vl;
+}
+
+#define MLX4_ROCEV2_QP1_SPORT 0xC000
+static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
 			    void *wqe, unsigned *mlx_seg_len)
 {
 	struct ib_device *ib_dev = sqp->qp.ibqp.device;
 	struct mlx4_wqe_mlx_seg *mlx = wqe;
 	struct mlx4_wqe_ctrl_seg *ctrl = wqe;
 	struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
-	struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+	struct mlx4_ib_ah *ah = to_mah(wr->ah);
 	union ib_gid sgid;
 	u16 pkey;
 	int send_size;
 	int header_size;
 	int spc;
 	int i;
-	int is_eth;
-	int is_vlan = 0;
-	int is_grh;
-	u16 uninitialized_var(vlan);
 	int err = 0;
+	u16 vlan = 0xffff;
+	bool is_eth;
+	bool is_vlan = false;
+	bool is_grh;
+	bool is_udp = false;
+	int ip_version = 0;
 
 	send_size = 0;
-	for (i = 0; i < wr->num_sge; ++i)
-		send_size += wr->sg_list[i].length;
+	for (i = 0; i < wr->wr.num_sge; ++i)
+		send_size += wr->wr.sg_list[i].length;
 
 	is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
 	is_grh = mlx4_ib_ah_grh_present(ah);
 	if (is_eth) {
+		struct ib_gid_attr gid_attr;
+
 		if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
 			/* When multi-function is enabled, the ib_core gid
 			 * indexes don't necessarily match the hw ones, so
@@ -2546,17 +2467,36 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 		} else  {
 			err = ib_get_cached_gid(ib_dev,
 						be32_to_cpu(ah->av.ib.port_pd) >> 24,
-						ah->av.ib.gid_index, &sgid);
-			if (err)
+						ah->av.ib.gid_index, &sgid,
+						&gid_attr);
+			if (!err) {
+				if (gid_attr.ndev)
+					dev_put(gid_attr.ndev);
+				if (!memcmp(&sgid, &zgid, sizeof(sgid)))
+					err = -ENOENT;
+			}
+			if (!err) {
+				is_udp = gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
+				if (is_udp) {
+					if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
+						ip_version = 4;
+					else
+						ip_version = 6;
+					is_grh = false;
+				}
+			} else {
 				return err;
+			}
 		}
-
-		if (is_eth && ah->av.eth.vlan != 0xffff) {
-			vlan = cpu_to_be16(ah->av.eth.vlan) & 0x0fff;
+		if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
+			vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
 			is_vlan = 1;
 		}
 	}
-	ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header);
+	err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
+			  ip_version, is_udp, 0, &sqp->ud_header);
+	if (err)
+		return err;
 
 	if (!is_eth) {
 		sqp->ud_header.lrh.service_level =
@@ -2565,35 +2505,57 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 		sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
 	}
 
-	if (is_grh) {
+	if (is_grh || (ip_version == 6)) {
 		sqp->ud_header.grh.traffic_class =
 			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
 		sqp->ud_header.grh.flow_label    =
 			ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
 		sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
-		if (is_eth)
+		if (is_eth) {
 			memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
-		else {
-		if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
-			/* When multi-function is enabled, the ib_core gid
-			 * indexes don't necessarily match the hw ones, so
-			 * we must use our own cache */
-			sqp->ud_header.grh.source_gid.global.subnet_prefix =
-				to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-						       subnet_prefix;
-			sqp->ud_header.grh.source_gid.global.interface_id =
-				to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-					       guid_cache[ah->av.ib.gid_index];
-		} else
-			ib_get_cached_gid(ib_dev,
-					  be32_to_cpu(ah->av.ib.port_pd) >> 24,
-					  ah->av.ib.gid_index,
-					  &sqp->ud_header.grh.source_gid);
+		} else {
+			if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+				/* When multi-function is enabled, the ib_core gid
+				 * indexes don't necessarily match the hw ones, so
+				 * we must use our own cache
+				 */
+				sqp->ud_header.grh.source_gid.global.subnet_prefix =
+					cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+								    demux[sqp->qp.port - 1].
+								    subnet_prefix)));
+				sqp->ud_header.grh.source_gid.global.interface_id =
+					to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+						       guid_cache[ah->av.ib.gid_index];
+			} else {
+				ib_get_cached_gid(ib_dev,
+						  be32_to_cpu(ah->av.ib.port_pd) >> 24,
+						  ah->av.ib.gid_index,
+						  &sqp->ud_header.grh.source_gid, NULL);
+			}
 		}
 		memcpy(sqp->ud_header.grh.destination_gid.raw,
 		       ah->av.ib.dgid, 16);
 	}
 
+	if (ip_version == 4) {
+		sqp->ud_header.ip4.tos =
+			(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
+		sqp->ud_header.ip4.id = 0;
+		sqp->ud_header.ip4.frag_off = htons(IP_DF);
+		sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
+
+		memcpy(&sqp->ud_header.ip4.saddr,
+		       sgid.raw + 12, 4);
+		memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
+		sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
+	}
+
+	if (is_udp) {
+		sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
+		sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
+		sqp->ud_header.udp.csum = 0;
+	}
+
 	mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
 
 	if (!is_eth) {
@@ -2606,7 +2568,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 		mlx->rlid = sqp->ud_header.lrh.destination_lid;
 	}
 
-	switch (wr->opcode) {
+	switch (wr->wr.opcode) {
 	case IB_WR_SEND:
 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY;
 		sqp->ud_header.immediate_present = 0;
@@ -2614,55 +2576,57 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 	case IB_WR_SEND_WITH_IMM:
 		sqp->ud_header.bth.opcode	 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
 		sqp->ud_header.immediate_present = 1;
-		sqp->ud_header.immediate_data    = wr->ex.imm_data;
+		sqp->ud_header.immediate_data    = wr->wr.ex.imm_data;
 		break;
 	default:
 		return -EINVAL;
 	}
 
 	if (is_eth) {
-		u8 *smac;
 		struct in6_addr in6;
-
+		u16 ether_type;
 		u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
 
+		ether_type = (!is_udp) ? MLX4_IB_IBOE_ETHERTYPE :
+			(ip_version == 4 ? ETHERTYPE_IP : ETHERTYPE_IPV6);
+
 		mlx->sched_prio = cpu_to_be16(pcp);
 
+		ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
 		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
-		/* FIXME: cache smac value? */
 		memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
 		memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
 		memcpy(&in6, sgid.raw, sizeof(in6));
 
-		if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
-                        smac = IF_LLADDR(to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]);
-		else
-			smac = ah->av.eth.s_mac;	/* use the src mac of the tunnel */
-		memcpy(sqp->ud_header.eth.smac_h, smac, 6);
 
 		if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
 			mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
 		if (!is_vlan) {
-			sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+			sqp->ud_header.eth.type = cpu_to_be16(ether_type);
 		} else {
-			sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
+			sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
 			sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
 		}
 	} else {
-		sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
+		sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 :
+							sl_to_vl(to_mdev(ib_dev),
+								 sqp->ud_header.lrh.service_level,
+								 sqp->qp.port);
+		if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
+			return -EINVAL;
 		if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
 			sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
 	}
-	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
+	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
 	if (!sqp->qp.ibqp.qp_num)
 		ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
 	else
-		ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
+		ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
-	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
-	sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
-					       sqp->qkey : wr->wr.ud.remote_qkey);
+	sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
+					       sqp->qkey : wr->remote_qkey);
 	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
 
 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
@@ -2750,53 +2714,26 @@ static __be32 convert_access(int acc)
 		cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
 }
 
-static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
+static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
+			struct ib_reg_wr *wr)
 {
-	struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
-	int i;
+	struct mlx4_ib_mr *mr = to_mmr(wr->mr);
 
-	for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
-		mfrpl->mapped_page_list[i] =
-			cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
-				    MLX4_MTT_FLAG_PRESENT);
-
-	fseg->flags		= convert_access(wr->wr.fast_reg.access_flags);
-	fseg->mem_key		= cpu_to_be32(wr->wr.fast_reg.rkey);
-	fseg->buf_list		= cpu_to_be64(mfrpl->map);
-	fseg->start_addr	= cpu_to_be64(wr->wr.fast_reg.iova_start);
-	fseg->reg_len		= cpu_to_be64(wr->wr.fast_reg.length);
+	fseg->flags		= convert_access(wr->access);
+	fseg->mem_key		= cpu_to_be32(wr->key);
+	fseg->buf_list		= cpu_to_be64(mr->page_map);
+	fseg->start_addr	= cpu_to_be64(mr->ibmr.iova);
+	fseg->reg_len		= cpu_to_be64(mr->ibmr.length);
 	fseg->offset		= 0; /* XXX -- is this just for ZBVA? */
-	fseg->page_size		= cpu_to_be32(wr->wr.fast_reg.page_shift);
+	fseg->page_size		= cpu_to_be32(ilog2(mr->ibmr.page_size));
 	fseg->reserved[0]	= 0;
 	fseg->reserved[1]	= 0;
 }
 
-static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
-{
-	bseg->flags1 =
-		convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
-		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ  |
-			    MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
-			    MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
-	bseg->flags2 = 0;
-	if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
-		bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
-	if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
-		bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
-	bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
-	bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
-	bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
-	bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
-}
-
 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
 {
+	memset(iseg, 0, sizeof(*iseg));
 	iseg->mem_key = cpu_to_be32(rkey);
-
-	iseg->reserved1    = 0;
-	iseg->reserved2    = 0;
-	iseg->reserved3[0] = 0;
-	iseg->reserved3[1] = 0;
 }
 
 static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
@@ -2807,45 +2744,47 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
 	rseg->reserved = 0;
 }
 
-static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
+static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
+		struct ib_atomic_wr *wr)
 {
-	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-		aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
-		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
-	} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
-		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add_mask);
+	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+		aseg->swap_add = cpu_to_be64(wr->swap);
+		aseg->compare  = cpu_to_be64(wr->compare_add);
+	} else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
+		aseg->swap_add = cpu_to_be64(wr->compare_add);
+		aseg->compare  = cpu_to_be64(wr->compare_add_mask);
 	} else {
-		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+		aseg->swap_add = cpu_to_be64(wr->compare_add);
 		aseg->compare  = 0;
 	}
 
 }
 
 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
-				  struct ib_send_wr *wr)
+				  struct ib_atomic_wr *wr)
 {
-	aseg->swap_add		= cpu_to_be64(wr->wr.atomic.swap);
-	aseg->swap_add_mask	= cpu_to_be64(wr->wr.atomic.swap_mask);
-	aseg->compare		= cpu_to_be64(wr->wr.atomic.compare_add);
-	aseg->compare_mask	= cpu_to_be64(wr->wr.atomic.compare_add_mask);
+	aseg->swap_add		= cpu_to_be64(wr->swap);
+	aseg->swap_add_mask	= cpu_to_be64(wr->swap_mask);
+	aseg->compare		= cpu_to_be64(wr->compare_add);
+	aseg->compare_mask	= cpu_to_be64(wr->compare_add_mask);
 }
 
 static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
-			     struct ib_send_wr *wr)
+			     struct ib_ud_wr *wr)
 {
-	memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
-	dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
-	dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
-	dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
-	memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
+	memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
+	dseg->dqpn = cpu_to_be32(wr->remote_qpn);
+	dseg->qkey = cpu_to_be32(wr->remote_qkey);
+	dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
+	memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
 }
 
 static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
 				    struct mlx4_wqe_datagram_seg *dseg,
-				    struct ib_send_wr *wr, enum ib_qp_type qpt)
+				    struct ib_ud_wr *wr,
+				    enum mlx4_ib_qp_type qpt)
 {
-	union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
+	union mlx4_ext_av *av = &to_mah(wr->ah)->av;
 	struct mlx4_av sqp_av = {0};
 	int port = *((u8 *) &av->ib.port_pd) & 0x3;
 
@@ -2856,43 +2795,45 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
 			cpu_to_be32(0xf0000000);
 
 	memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
-	/* This function used only for sending on QP1 proxies */
-	dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+	if (qpt == MLX4_IB_QPT_PROXY_GSI)
+		dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+	else
+		dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
 	/* Use QKEY from the QP context, which is set by master */
 	dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
 }
 
-static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len)
+static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
 {
 	struct mlx4_wqe_inline_seg *inl = wqe;
 	struct mlx4_ib_tunnel_header hdr;
-	struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+	struct mlx4_ib_ah *ah = to_mah(wr->ah);
 	int spc;
 	int i;
 
 	memcpy(&hdr.av, &ah->av, sizeof hdr.av);
-	hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
-	hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
-	hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+	hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
+	hdr.pkey_index = cpu_to_be16(wr->pkey_index);
+	hdr.qkey = cpu_to_be32(wr->remote_qkey);
 	memcpy(hdr.mac, ah->av.eth.mac, 6);
-	hdr.vlan = cpu_to_be16(ah->av.eth.vlan);
+	hdr.vlan = ah->av.eth.vlan;
 
 	spc = MLX4_INLINE_ALIGN -
 		((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
 	if (sizeof (hdr) <= spc) {
 		memcpy(inl + 1, &hdr, sizeof (hdr));
 		wmb();
-		inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr));
+		inl->byte_count = cpu_to_be32((1U << 31) | (u32)sizeof(hdr));
 		i = 1;
 	} else {
 		memcpy(inl + 1, &hdr, spc);
 		wmb();
-		inl->byte_count = cpu_to_be32(1 << 31 | spc);
+		inl->byte_count = cpu_to_be32((1U << 31) | spc);
 
 		inl = (void *) (inl + 1) + spc;
 		memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc);
 		wmb();
-		inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc));
+		inl->byte_count = cpu_to_be32((1U << 31) | (u32)(sizeof (hdr) - spc));
 		i = 2;
 	}
 
@@ -2917,7 +2858,7 @@ static void set_mlx_icrc_seg(void *dseg)
 	 */
 	wmb();
 
-	iseg->byte_count = cpu_to_be32((1 << 31) | 4);
+	iseg->byte_count = cpu_to_be32((1U << 31) | 4);
 }
 
 static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
@@ -2945,23 +2886,22 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
 	dseg->addr       = cpu_to_be64(sg->addr);
 }
 
-static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
 			 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
 			 __be32 *lso_hdr_sz, __be32 *blh)
 {
-	unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
+	unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
 
 	if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
 		*blh = cpu_to_be32(1 << 6);
 
 	if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
-		     wr->num_sge > qp->sq.max_gs - (halign >> 4)))
+		     wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
 		return -EINVAL;
 
-	memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+	memcpy(wqe->header, wr->header, wr->hlen);
 
-	*lso_hdr_sz  = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
-				   wr->wr.ud.hlen);
+	*lso_hdr_sz  = cpu_to_be32(wr->mss << 16 | wr->hlen);
 	*lso_seg_len = halign;
 	return 0;
 }
@@ -2985,89 +2925,7 @@ static void add_zero_len_inline(void *wqe)
 {
 	struct mlx4_wqe_inline_seg *inl = wqe;
 	memset(wqe, 0, 16);
-	inl->byte_count = cpu_to_be32(1 << 31);
-}
-
-static int lay_inline_data(struct mlx4_ib_qp *qp, struct ib_send_wr *wr,
-			   void *wqe, int *sz)
-{
-	struct mlx4_wqe_inline_seg *seg;
-	void *addr;
-	int len, seg_len;
-	int num_seg;
-	int off, to_copy;
-	int i;
-	int inl = 0;
-
-	seg = wqe;
-	wqe += sizeof *seg;
-	off = ((unsigned long)wqe) & (unsigned long)(MLX4_INLINE_ALIGN - 1);
-	num_seg = 0;
-	seg_len = 0;
-
-	for (i = 0; i < wr->num_sge; ++i) {
-		addr = (void *) (unsigned long)(wr->sg_list[i].addr);
-		len  = wr->sg_list[i].length;
-		inl += len;
-
-		if (inl > qp->max_inline_data) {
-			inl = 0;
-			return -1;
-		}
-
-		while (len >= MLX4_INLINE_ALIGN - off) {
-			to_copy = MLX4_INLINE_ALIGN - off;
-			memcpy(wqe, addr, to_copy);
-			len -= to_copy;
-			wqe += to_copy;
-			addr += to_copy;
-			seg_len += to_copy;
-			wmb(); /* see comment below */
-			seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len);
-			seg_len = 0;
-			seg = wqe;
-			wqe += sizeof *seg;
-			off = sizeof *seg;
-			++num_seg;
-		}
-
-		memcpy(wqe, addr, len);
-		wqe += len;
-		seg_len += len;
-		off += len;
-	}
-
-	if (seg_len) {
-		++num_seg;
-		/*
-		 * Need a barrier here to make sure
-		 * all the data is visible before the
-		 * byte_count field is set.  Otherwise
-		 * the HCA prefetcher could grab the
-		 * 64-byte chunk with this inline
-		 * segment and get a valid (!=
-		 * 0xffffffff) byte count but stale
-		 * data, and end up sending the wrong
-		 * data.
-		 */
-		wmb();
-		seg->byte_count = htonl(MLX4_INLINE_SEG | seg_len);
-	}
-
-	*sz = (inl + num_seg * sizeof *seg + 15) / 16;
-
-	return 0;
-}
-
-/*
- * Avoid using memcpy() to copy to BlueFlame page, since memcpy()
- * implementations may use move-string-buffer assembler instructions,
- * which do not guarantee order of copying.
- */
-static void mlx4_bf_copy(unsigned long *dst, unsigned long *src,
-				unsigned bytecnt)
-{
-	__iowrite64_copy(dst, src, bytecnt / 8);
+	inl->byte_count = cpu_to_be32(1U << 31);
 }
 
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -3075,7 +2933,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 {
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	void *wqe;
-	struct mlx4_wqe_ctrl_seg *uninitialized_var(ctrl);
+	struct mlx4_wqe_ctrl_seg *ctrl;
 	struct mlx4_wqe_data_seg *dseg;
 	unsigned long flags;
 	int nreq;
@@ -3086,11 +2944,41 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	unsigned uninitialized_var(seglen);
 	__be32 dummy;
 	__be32 *lso_wqe;
-	__be32 uninitialized_var(lso_hdr_sz);
+	__be32 lso_hdr_sz = 0;
 	__be32 blh;
 	int i;
-	int inl = 0;
+	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
+
+	if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+		struct mlx4_ib_sqp *sqp = to_msqp(qp);
+
+		if (sqp->roce_v2_gsi) {
+			struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
+			struct ib_gid_attr gid_attr;
+			union ib_gid gid;
+
+			if (!ib_get_cached_gid(ibqp->device,
+					       be32_to_cpu(ah->av.ib.port_pd) >> 24,
+					       ah->av.ib.gid_index, &gid,
+					       &gid_attr)) {
+				if (gid_attr.ndev)
+					dev_put(gid_attr.ndev);
+				qp = (gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
+					to_mqp(sqp->roce_v2_gsi) : qp;
+			} else {
+				pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
+				       ah->av.ib.gid_index);
+			}
+		}
+	}
+
 	spin_lock_irqsave(&qp->sq.lock, flags);
+	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+		err = -EIO;
+		*bad_wr = wr;
+		nreq = 0;
+		goto out;
+	}
 
 	ind = qp->sq_next_wqe;
 
@@ -3111,7 +2999,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		}
 
 		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
-		*((u32 *) (&ctrl->vlan_tag)) = 0;
 		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
 
 		ctrl->srcrb_flags =
@@ -3136,11 +3023,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			case IB_WR_ATOMIC_CMP_AND_SWP:
 			case IB_WR_ATOMIC_FETCH_AND_ADD:
 			case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
-				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
-					      wr->wr.atomic.rkey);
+				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+					      atomic_wr(wr)->rkey);
 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
 
-				set_atomic_seg(wqe, wr);
+				set_atomic_seg(wqe, atomic_wr(wr));
 				wqe  += sizeof (struct mlx4_wqe_atomic_seg);
 
 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
@@ -3149,11 +3036,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				break;
 
 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
-				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
-					      wr->wr.atomic.rkey);
+				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+					      atomic_wr(wr)->rkey);
 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
 
-				set_masked_atomic_seg(wqe, wr);
+				set_masked_atomic_seg(wqe, atomic_wr(wr));
 				wqe  += sizeof (struct mlx4_wqe_masked_atomic_seg);
 
 				size += (sizeof (struct mlx4_wqe_raddr_seg) +
@@ -3164,8 +3051,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			case IB_WR_RDMA_READ:
 			case IB_WR_RDMA_WRITE:
 			case IB_WR_RDMA_WRITE_WITH_IMM:
-				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
-					      wr->wr.rdma.rkey);
+				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+					      rdma_wr(wr)->rkey);
 				wqe  += sizeof (struct mlx4_wqe_raddr_seg);
 				size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
 				break;
@@ -3178,20 +3065,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
 				break;
 
-			case IB_WR_FAST_REG_MR:
+			case IB_WR_REG_MR:
 				ctrl->srcrb_flags |=
 					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
-				set_fmr_seg(wqe, wr);
-				wqe  += sizeof (struct mlx4_wqe_fmr_seg);
-				size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
+				set_reg_seg(wqe, reg_wr(wr));
+				wqe  += sizeof(struct mlx4_wqe_fmr_seg);
+				size += sizeof(struct mlx4_wqe_fmr_seg) / 16;
 				break;
 
-			case IB_WR_BIND_MW:
-				ctrl->srcrb_flags |=
-					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
-				set_bind_seg(wqe, wr);
-				wqe  += sizeof(struct mlx4_wqe_bind_seg);
-				size += sizeof(struct mlx4_wqe_bind_seg) / 16;
 			default:
 				/* No extra segments required for sends */
 				break;
@@ -3199,7 +3080,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			break;
 
 		case MLX4_IB_QPT_TUN_SMI_OWNER:
-			err =  build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
+			err =  build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
+					ctrl, &seglen);
 			if (unlikely(err)) {
 				*bad_wr = wr;
 				goto out;
@@ -3210,19 +3092,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		case MLX4_IB_QPT_TUN_SMI:
 		case MLX4_IB_QPT_TUN_GSI:
 			/* this is a UD qp used in MAD responses to slaves. */
-			set_datagram_seg(wqe, wr);
+			set_datagram_seg(wqe, ud_wr(wr));
 			/* set the forced-loopback bit in the data seg av */
 			*(__be32 *) wqe |= cpu_to_be32(0x80000000);
 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
 			break;
 		case MLX4_IB_QPT_UD:
-			set_datagram_seg(wqe, wr);
+			set_datagram_seg(wqe, ud_wr(wr));
 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
 
 			if (wr->opcode == IB_WR_LSO) {
-				err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
+				err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
+						&lso_hdr_sz, &blh);
 				if (unlikely(err)) {
 					*bad_wr = wr;
 					goto out;
@@ -3234,12 +3117,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			break;
 
 		case MLX4_IB_QPT_PROXY_SMI_OWNER:
-			if (unlikely(!mlx4_is_master(to_mdev(ibqp->device)->dev))) {
-				err = -ENOSYS;
-				*bad_wr = wr;
-				goto out;
-			}
-			err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
+			err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
+					ctrl, &seglen);
 			if (unlikely(err)) {
 				*bad_wr = wr;
 				goto out;
@@ -3250,31 +3129,30 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			add_zero_len_inline(wqe);
 			wqe += 16;
 			size++;
-			build_tunnel_header(wr, wqe, &seglen);
+			build_tunnel_header(ud_wr(wr), wqe, &seglen);
 			wqe  += seglen;
 			size += seglen / 16;
 			break;
 		case MLX4_IB_QPT_PROXY_SMI:
-			/* don't allow QP0 sends on guests */
-			err = -ENOSYS;
-			*bad_wr = wr;
-			goto out;
 		case MLX4_IB_QPT_PROXY_GSI:
 			/* If we are tunneling special qps, this is a UD qp.
 			 * In this case we first add a UD segment targeting
 			 * the tunnel qp, and then add a header with address
 			 * information */
-			set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr, ibqp->qp_type);
+			set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
+						ud_wr(wr),
+						qp->mlx4_ib_qp_type);
 			wqe  += sizeof (struct mlx4_wqe_datagram_seg);
 			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
-			build_tunnel_header(wr, wqe, &seglen);
+			build_tunnel_header(ud_wr(wr), wqe, &seglen);
 			wqe  += seglen;
 			size += seglen / 16;
 			break;
 
 		case MLX4_IB_QPT_SMI:
 		case MLX4_IB_QPT_GSI:
-			err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
+			err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
+					&seglen);
 			if (unlikely(err)) {
 				*bad_wr = wr;
 				goto out;
@@ -3293,8 +3171,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		 * cacheline.  This avoids issues with WQE
 		 * prefetching.
 		 */
+
 		dseg = wqe;
 		dseg += wr->num_sge - 1;
+		size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
 
 		/* Add one more inline data segment for ICRC for MLX sends */
 		if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
@@ -3305,19 +3185,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
 		}
 
-		if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
-			int sz;
-			err = lay_inline_data(qp, wr, wqe, &sz);
-			if (!err) {
-				inl = 1;
-				size += sz;
-			}
-		} else {
-			size += wr->num_sge *
-				(sizeof(struct mlx4_wqe_data_seg) / 16);
-			for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
-				set_data_seg(dseg, wr->sg_list + i);
-		}
+		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
+			set_data_seg(dseg, wr->sg_list + i);
 
 		/*
 		 * Possibly overwrite stamping in cacheline with LSO
@@ -3326,8 +3195,9 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		 */
 		wmb();
 		*lso_wqe = lso_hdr_sz;
+
 		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
-				    MLX4_WQE_CTRL_FENCE : 0) | size;
+					     MLX4_WQE_CTRL_FENCE : 0) | size;
 
 		/*
 		 * Make sure descriptor is fully written before
@@ -3364,27 +3234,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	}
 
 out:
-	if (nreq == 1 && inl && size > 1 && size < qp->bf.buf_size / 16) {
-		ctrl->owner_opcode |= htonl((qp->sq_next_wqe & 0xffff) << 8);
-		/* We set above doorbell_qpn bits to 0 as part of vlan
-		  * tag initialization, so |= should be correct.
-		*/
-		*(u32 *) (&ctrl->vlan_tag) |= qp->doorbell_qpn;
-		/*
-		 * Make sure that descriptor is written to memory
-		 * before writing to BlueFlame page.
-		 */
-		wmb();
-
-		++qp->sq.head;
-
-		mlx4_bf_copy(qp->bf.reg + qp->bf.offset, (unsigned long *) ctrl,
-			     ALIGN(size * 16, 64));
-		wc_wmb();
-
-		qp->bf.offset ^= qp->bf.buf_size;
-
-	} else if (nreq) {
+	if (likely(nreq)) {
 		qp->sq.head += nreq;
 
 		/*
@@ -3393,7 +3243,8 @@ out:
 		 */
 		wmb();
 
-		writel(qp->doorbell_qpn, qp->bf.uar->map + MLX4_SEND_DOORBELL);
+		writel(qp->doorbell_qpn,
+		       to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
 
 		/*
 		 * Make sure doorbells don't leak out of SQ spinlock
@@ -3401,10 +3252,8 @@ out:
 		 */
 		mmiowb();
 
-	}
-
-	if (likely(nreq)) {
 		stamp_send_wqe(qp, stamp, size * 16);
+
 		ind = pad_wraparound(qp, ind);
 		qp->sq_next_wqe = ind;
 	}
@@ -3425,10 +3274,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 	int ind;
 	int max_gs;
 	int i;
+	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
 
 	max_gs = qp->rq.max_gs;
 	spin_lock_irqsave(&qp->rq.lock, flags);
 
+	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+		err = -EIO;
+		*bad_wr = wr;
+		nreq = 0;
+		goto out;
+	}
+
 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
@@ -3666,22 +3523,8 @@ done:
 		qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
 
-	if (qp->flags & MLX4_IB_QP_CAP_CROSS_CHANNEL)
-		qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
-
-	if (qp->flags & MLX4_IB_QP_CAP_MANAGED_SEND)
-		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
-
-	if (qp->flags & MLX4_IB_QP_CAP_MANAGED_RECV)
-		qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
-
-	qp_init_attr->qpg_type = ibqp->qpg_type;
-	if (ibqp->qpg_type == IB_QPG_PARENT)
-		qp_init_attr->cap.qpg_tss_mask_sz = qp->qpg_data->qpg_tss_mask_sz;
-	else
-		qp_init_attr->cap.qpg_tss_mask_sz = 0;
-
 out:
 	mutex_unlock(&qp->mutex);
 	return err;
 }
+
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_srq.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_srq.c
index 971f91933a02..a2e55db37125 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_srq.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_srq.c
@@ -36,7 +36,7 @@
 #include <linux/slab.h>
 
 #include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
 
 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
 {
@@ -134,13 +134,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 		if (err)
 			goto err_mtt;
 	} else {
-		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
+		err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
 		if (err)
 			goto err_srq;
 
 		*srq->db.db = 0;
 
-		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
+				   GFP_KERNEL)) {
 			err = -ENOMEM;
 			goto err_db;
 		}
@@ -165,14 +166,19 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
 		if (err)
 			goto err_buf;
 
-		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
+		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
 		if (err)
 			goto err_mtt;
 
-		srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
+		srq->wrid = kmalloc_array(srq->msrq.max, sizeof(u64),
+					GFP_KERNEL | __GFP_NOWARN);
 		if (!srq->wrid) {
-			err = -ENOMEM;
-			goto err_mtt;
+			srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64),
+					      GFP_KERNEL, 0 /*PAGE_KERNEL*/);
+			if (!srq->wrid) {
+				err = -ENOMEM;
+				goto err_mtt;
+			}
 		}
 	}
 
@@ -203,7 +209,7 @@ err_wrid:
 	if (pd->uobject)
 		mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
 	else
-		kfree(srq->wrid);
+		kvfree(srq->wrid);
 
 err_mtt:
 	mlx4_mtt_cleanup(dev->dev, &srq->mtt);
@@ -280,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
 		mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
 		ib_umem_release(msrq->umem);
 	} else {
-		kfree(msrq->wrid);
+		kvfree(msrq->wrid);
 		mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
 			      &msrq->buf);
 		mlx4_db_free(dev->dev, &msrq->db);
@@ -315,8 +321,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 	int err = 0;
 	int nreq;
 	int i;
+	struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
 
 	spin_lock_irqsave(&srq->lock, flags);
+	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+		err = -EIO;
+		*bad_wr = wr;
+		nreq = 0;
+		goto out;
+	}
 
 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
 		if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
@@ -361,6 +374,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 
 		*srq->db.db = cpu_to_be32(srq->wqe_ctr);
 	}
+out:
 
 	spin_unlock_irqrestore(&srq->lock, flags);
 
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c
index 2a7d4f03f4dc..af192fead7ab 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_sysfs.c
@@ -46,21 +46,17 @@
 static ssize_t show_admin_alias_guid(struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
-	int record_num;/*0-15*/
-	int guid_index_in_rec; /*0 - 7*/
 	struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
 		container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
 	struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
 	struct mlx4_ib_dev *mdev = port->dev;
+	__be64 sysadmin_ag_val;
 
-	record_num = mlx4_ib_iov_dentry->entry_num / 8 ;
-	guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ;
+	sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev,
+					      mlx4_ib_iov_dentry->entry_num,
+					      port->num);
 
-	return sprintf(buf, "%llx\n",
-		       (long long)be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid.
-				   ports_guid[port->num - 1].
-				   all_rec_per_port[record_num].
-				   all_recs[8 * guid_index_in_rec]));
+	return sprintf(buf, "%llx\n", (long long)be64_to_cpu(sysadmin_ag_val));
 }
 
 /* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
@@ -80,6 +76,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
 	struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
 	struct mlx4_ib_dev *mdev = port->dev;
 	unsigned long long sysadmin_ag_val;
+	unsigned long flags;
 
 	record_num = mlx4_ib_iov_dentry->entry_num / 8;
 	guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8;
@@ -87,6 +84,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
 		pr_err("GUID 0 block 0 is RO\n");
 		return count;
 	}
+	spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags);
 	sscanf(buf, "%llx", &sysadmin_ag_val);
 	*(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
 		all_rec_per_port[record_num].
@@ -96,33 +94,15 @@ static ssize_t store_admin_alias_guid(struct device *dev,
 	/* Change the state to be pending for update */
 	mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
 		= MLX4_GUID_INFO_STATUS_IDLE ;
-
-	mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
-		= MLX4_GUID_INFO_RECORD_SET;
-
-	switch (sysadmin_ag_val) {
-	case MLX4_GUID_FOR_DELETE_VAL:
-		mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
-			= MLX4_GUID_INFO_RECORD_DELETE;
-		mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
-			= MLX4_GUID_SYSADMIN_ASSIGN;
-		break;
-	/* The sysadmin requests the SM to re-assign */
-	case MLX4_NOT_SET_GUID:
-		mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
-			= MLX4_GUID_DRIVER_ASSIGN;
-		break;
-	/* The sysadmin requests a specific value.*/
-	default:
-		mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
-			= MLX4_GUID_SYSADMIN_ASSIGN;
-		break;
-	}
+	mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val),
+			    mlx4_ib_iov_dentry->entry_num,
+			    port->num);
 
 	/* set the record index */
 	mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
-		= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
+		|= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
 
+	spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags);
 	mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
 
 	return count;
@@ -375,7 +355,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
 	char base_name[9];
 
 	/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
-	strlcpy(name, pci_name(dev->dev->pdev), max);
+	strlcpy(name, pci_name(dev->dev->persist->pdev), max);
 	strncpy(base_name, name, 8); /*till xxxx:yy:*/
 	base_name[8] = '\0';
 	/* with no ARI only 3 last bits are used so when the fn is higher than 8
@@ -389,8 +369,10 @@ struct mlx4_port {
 	struct mlx4_ib_dev    *dev;
 	struct attribute_group pkey_group;
 	struct attribute_group gid_group;
-	u8                     port_num;
+	struct device_attribute	enable_smi_admin;
+	struct device_attribute	smi_enabled;
 	int		       slave;
+	u8                     port_num;
 };
 
 
@@ -558,6 +540,101 @@ err:
 	return NULL;
 }
 
+static ssize_t sysfs_show_smi_enabled(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct mlx4_port *p =
+		container_of(attr, struct mlx4_port, smi_enabled);
+	ssize_t len = 0;
+
+	if (mlx4_vf_smi_enabled(p->dev->dev, p->slave, p->port_num))
+		len = sprintf(buf, "%d\n", 1);
+	else
+		len = sprintf(buf, "%d\n", 0);
+
+	return len;
+}
+
+static ssize_t sysfs_show_enable_smi_admin(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct mlx4_port *p =
+		container_of(attr, struct mlx4_port, enable_smi_admin);
+	ssize_t len = 0;
+
+	if (mlx4_vf_get_enable_smi_admin(p->dev->dev, p->slave, p->port_num))
+		len = sprintf(buf, "%d\n", 1);
+	else
+		len = sprintf(buf, "%d\n", 0);
+
+	return len;
+}
+
+static ssize_t sysfs_store_enable_smi_admin(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct mlx4_port *p =
+		container_of(attr, struct mlx4_port, enable_smi_admin);
+	int enable;
+
+	if (sscanf(buf, "%i", &enable) != 1 ||
+	    enable < 0 || enable > 1)
+		return -EINVAL;
+
+	if (mlx4_vf_set_enable_smi_admin(p->dev->dev, p->slave, p->port_num, enable))
+		return -EINVAL;
+	return count;
+}
+
+static int add_vf_smi_entries(struct mlx4_port *p)
+{
+	int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
+			IB_LINK_LAYER_ETHERNET;
+	int ret;
+
+	/* do not display entries if eth transport, or if master */
+	if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
+		return 0;
+
+	sysfs_attr_init(&p->smi_enabled.attr);
+	p->smi_enabled.show = sysfs_show_smi_enabled;
+	p->smi_enabled.store = NULL;
+	p->smi_enabled.attr.name = "smi_enabled";
+	p->smi_enabled.attr.mode = 0444;
+	ret = sysfs_create_file(&p->kobj, &p->smi_enabled.attr);
+	if (ret) {
+		pr_err("failed to create smi_enabled\n");
+		return ret;
+	}
+
+	sysfs_attr_init(&p->enable_smi_admin.attr);
+	p->enable_smi_admin.show = sysfs_show_enable_smi_admin;
+	p->enable_smi_admin.store = sysfs_store_enable_smi_admin;
+	p->enable_smi_admin.attr.name = "enable_smi_admin";
+	p->enable_smi_admin.attr.mode = 0644;
+	ret = sysfs_create_file(&p->kobj, &p->enable_smi_admin.attr);
+	if (ret) {
+		pr_err("failed to create enable_smi_admin\n");
+		sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
+		return ret;
+	}
+	return 0;
+}
+
+static void remove_vf_smi_entries(struct mlx4_port *p)
+{
+	int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) ==
+			IB_LINK_LAYER_ETHERNET;
+
+	if (is_eth || p->slave == mlx4_master_func_num(p->dev->dev))
+		return;
+
+	sysfs_remove_file(&p->kobj, &p->smi_enabled.attr);
+	sysfs_remove_file(&p->kobj, &p->enable_smi_admin.attr);
+}
+
 static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
 {
 	struct mlx4_port *p;
@@ -581,16 +658,14 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
 		goto err_alloc;
 
 	p->pkey_group.name  = "pkey_idx";
-	if (is_eth)
-		p->pkey_group.attrs =
-			alloc_group_attrs(show_port_pkey, NULL,
-					  dev->dev->caps.pkey_table_len[port_num]);
-	else
-		p->pkey_group.attrs =
-			alloc_group_attrs(show_port_pkey, store_port_pkey,
-					  dev->dev->caps.pkey_table_len[port_num]);
-	if (!p->pkey_group.attrs)
+	p->pkey_group.attrs =
+		alloc_group_attrs(show_port_pkey,
+				  is_eth ? NULL : store_port_pkey,
+				  dev->dev->caps.pkey_table_len[port_num]);
+	if (!p->pkey_group.attrs) {
+		ret = -ENOMEM;
 		goto err_alloc;
+	}
 
 	ret = sysfs_create_group(&p->kobj, &p->pkey_group);
 	if (ret)
@@ -598,13 +673,19 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
 
 	p->gid_group.name  = "gid_idx";
 	p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1);
-	if (!p->gid_group.attrs)
+	if (!p->gid_group.attrs) {
+		ret = -ENOMEM;
 		goto err_free_pkey;
+	}
 
 	ret = sysfs_create_group(&p->kobj, &p->gid_group);
 	if (ret)
 		goto err_free_gid;
 
+	ret = add_vf_smi_entries(p);
+	if (ret)
+		goto err_free_gid;
+
 	list_add_tail(&p->kobj.entry, &dev->pkeys.pkey_port_list[slave]);
 	return 0;
 
@@ -630,6 +711,7 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
 	int port;
 	struct kobject *p, *t;
 	struct mlx4_port *mport;
+	struct mlx4_active_ports actv_ports;
 
 	get_name(dev, name, slave, sizeof name);
 
@@ -652,7 +734,11 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
 		goto err_ports;
 	}
 
+	actv_ports = mlx4_get_active_ports(dev->dev, slave);
+
 	for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
+		if (!test_bit(port - 1, actv_ports.ports))
+			continue;
 		err = add_port(dev, port, slave);
 		if (err)
 			goto err_add;
@@ -667,6 +753,7 @@ err_add:
 		mport = container_of(p, struct mlx4_port, kobj);
 		sysfs_remove_group(p, &mport->pkey_group);
 		sysfs_remove_group(p, &mport->gid_group);
+		remove_vf_smi_entries(mport);
 		kobject_put(p);
 	}
 	kobject_put(dev->dev_ports_parent[slave]);
@@ -688,7 +775,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
 	if (!mlx4_is_master(device->dev))
 		return 0;
 
-	for (i = 0; i <= device->dev->num_vfs; ++i)
+	for (i = 0; i <= device->dev->persist->num_vfs; ++i)
 		register_one_pkey_tree(device, i);
 
 	return 0;
@@ -703,7 +790,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
 	if (!mlx4_is_master(device->dev))
 		return;
 
-	for (slave = device->dev->num_vfs; slave >= 0; --slave) {
+	for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
 		list_for_each_entry_safe(p, t,
 					 &device->pkeys.pkey_port_list[slave],
 					 entry) {
@@ -711,6 +798,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
 			port = container_of(p, struct mlx4_port, kobj);
 			sysfs_remove_group(p, &port->pkey_group);
 			sysfs_remove_group(p, &port->gid_group);
+			remove_vf_smi_entries(port);
 			kobject_put(p);
 			kobject_put(device->dev_ports_parent[slave]);
 		}
@@ -739,7 +827,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
 	dev->ports_parent =
 		kobject_create_and_add("ports",
 				       kobject_get(dev->iov_parent));
-	if (!dev->iov_parent) {
+	if (!dev->ports_parent) {
 		ret = -ENOMEM;
 		goto err_ports;
 	}
diff --git a/sys/dev/mlx4/mlx4_ib/user.h b/sys/dev/mlx4/mlx4_ib/user.h
deleted file mode 100644
index 07e6769ef43b..000000000000
--- a/sys/dev/mlx4/mlx4_ib/user.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_IB_USER_H
-#define MLX4_IB_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-
-#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION	3
-#define MLX4_IB_UVERBS_ABI_VERSION		4
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mlx4_ib_alloc_ucontext_resp_v3 {
-	__u32	qp_tab_size;
-	__u16	bf_reg_size;
-	__u16	bf_regs_per_page;
-};
-
-struct mlx4_ib_alloc_ucontext_resp {
-	__u32	dev_caps;
-	__u32	qp_tab_size;
-	__u16	bf_reg_size;
-	__u16	bf_regs_per_page;
-	__u32	cqe_size;
-};
-
-struct mlx4_ib_alloc_pd_resp {
-	__u32	pdn;
-	__u32	reserved;
-};
-
-struct mlx4_ib_create_cq {
-	__u64	buf_addr;
-	__u64	db_addr;
-};
-
-struct mlx4_ib_create_cq_resp {
-	__u32	cqn;
-	__u32	reserved;
-};
-
-struct mlx4_ib_resize_cq {
-	__u64	buf_addr;
-};
-
-struct mlx4_ib_create_srq {
-	__u64	buf_addr;
-	__u64	db_addr;
-};
-
-struct mlx4_ib_create_srq_resp {
-	__u32	srqn;
-	__u32	reserved;
-};
-
-struct mlx4_ib_create_qp {
-	__u64	buf_addr;
-	__u64	db_addr;
-	__u8	log_sq_bb_count;
-	__u8	log_sq_stride;
-	__u8	sq_no_prefetch;
-	__u8	reserved[5];
-};
-
-#endif /* MLX4_IB_USER_H */
diff --git a/sys/modules/mlx4ib/Makefile b/sys/modules/mlx4ib/Makefile
index 653b0f6840fc..1d8ebf820490 100644
--- a/sys/modules/mlx4ib/Makefile
+++ b/sys/modules/mlx4ib/Makefile
@@ -9,7 +9,6 @@ SRCS=	device_if.h bus_if.h vnode_if.h pci_if.h \
 	mlx4_ib_sysfs.c \
 	mlx4_ib_ah.c \
 	mlx4_ib_cq.c \
-	mlx4_ib_exp.c \
 	mlx4_ib_doorbell.c \
 	mlx4_ib_mad.c \
 	mlx4_ib_main.c \
@@ -20,6 +19,7 @@ SRCS=	device_if.h bus_if.h vnode_if.h pci_if.h \
 	mlx4_ib_cm.c
 
 CFLAGS+= -I${SRCTOP}/sys/ofed/include
+CFLAGS+= -I${SRCTOP}/sys/ofed/include/uapi
 CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
 CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
 CFLAGS+= -DINET6 -DINET