irdma(4): Add code for compilation on stable/13 branch

Current content of the irdma(4) driver consists only of code that is
compilable on current 14-CURRENT branch which makes it impossible to
merge into stable/13 branch because of missing dependencies in the ofed
tree.

This patch adds missing code that allows for merging into stable branch.
Once it is there, code relating only to version 14 or higher should be
removed.

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:	erj@
MFC after:	1 day
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D38170
This commit is contained in:
Bartosz Sobczak 2023-02-06 14:37:39 -08:00 committed by Eric Joyner
parent c0548bfc3a
commit a527c18cd7
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
7 changed files with 893 additions and 14 deletions

View File

@ -102,6 +102,9 @@
#define __aligned_u64 uint64_t __aligned(8)
#define VLAN_PRIO_SHIFT 13
#if __FreeBSD_version < 1400000
#define IB_USER_VERBS_EX_CMD_MODIFY_QP IB_USER_VERBS_CMD_MODIFY_QP
#endif
/*
* debug definition section

View File

@ -49,12 +49,14 @@
container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
#endif
#if __FreeBSD_version >= 1400000
#define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
(sizeof(struct drv_struct) + \
BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
BUILD_BUG_ON_ZERO( \
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
#endif /* __FreeBSD_version > 1400000 */
#define set_ibdev_dma_device(ibdev, dev) \
ibdev.dma_device = (dev)
@ -68,11 +70,20 @@
#define kmap_local_page(pg) page_address(pg)
#define kunmap(pg)
#define kunmap_local(pg)
#if __FreeBSD_version >= 1400026
#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \
((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL))
#else
#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \
((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr))
#endif
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
#if __FreeBSD_version < 1400026
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp)
#else
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
#endif
#ifndef IB_QP_ATTR_STANDARD_BITS
#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
#endif
@ -118,24 +129,53 @@ static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
return (*pinfo)->addr;
}
#if __FreeBSD_version < 1400026
struct ib_cq *irdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
#else
int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
#endif
struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
#if __FreeBSD_version >= 1400026
int irdma_create_ah(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata);
int irdma_create_ah_stub(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata);
#else
struct ib_ah *irdma_create_ah(struct ib_pd *ibpd,
struct ib_ah_attr *attr,
struct ib_udata *udata);
struct ib_ah *irdma_create_ah_stub(struct ib_pd *ibpd,
struct ib_ah_attr *attr,
struct ib_udata *udata);
#endif
void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr);
#if __FreeBSD_version >= 1400026
void irdma_destroy_ah(struct ib_ah *ibah, u32 flags);
void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags);
#else
int irdma_destroy_ah(struct ib_ah *ibah);
int irdma_destroy_ah_stub(struct ib_ah *ibah);
#endif
#if __FreeBSD_version < 1400026
int irdma_destroy_qp(struct ib_qp *ibqp);
#else
int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
#endif
#if __FreeBSD_version < 1400026
int irdma_dereg_mr(struct ib_mr *ib_mr);
#else
int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
#endif
void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
u8 *active_width);
enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
@ -162,6 +202,10 @@ int irdma_register_qset(struct irdma_sc_vsi *vsi,
void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void ib_unregister_device(struct ib_device *ibdev);
#if __FreeBSD_version < 1400026
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot);
#endif
void irdma_disassociate_ucontext(struct ib_ucontext *context);
int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp,
struct ib_qp_attr *attr,
@ -195,8 +239,13 @@ int irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
struct irdma_mr;
struct irdma_cq;
struct irdma_cq_buf;
#if __FreeBSD_version < 1400026
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
#else
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
#endif
int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
u16 access);
struct ib_mr *irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
@ -229,11 +278,32 @@ void irdma_dealloc_push_page(struct irdma_pci_f *rf,
struct irdma_sc_qp *qp);
int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev,
struct irdma_cq_buf *lcqe_buf);
#if __FreeBSD_version < 1400026
int irdma_destroy_cq(struct ib_cq *ib_cq);
#else
void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
#endif
#if __FreeBSD_version < 1400026
struct ib_ucontext *irdma_alloc_ucontext(struct ib_device *, struct ib_udata *);
#else
int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
#endif
#if __FreeBSD_version < 1400026
int irdma_dealloc_ucontext(struct ib_ucontext *);
#else
void irdma_dealloc_ucontext(struct ib_ucontext *context);
#endif
#if __FreeBSD_version < 1400026
struct ib_pd *irdma_alloc_pd(struct ib_device *, struct ib_ucontext *,
struct ib_udata *);
#else
int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
#endif
#if __FreeBSD_version < 1400026
int irdma_dealloc_pd(struct ib_pd *);
#else
void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
#endif
int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *,
const struct ib_gid_attr *, void **);
int irdma_del_gid(struct ib_device *, u8, unsigned int, void **);

View File

@ -97,6 +97,7 @@ irdma_del_gid(struct ib_device *device,
return 0;
}
#if __FreeBSD_version >= 1400026
/**
* irdma_alloc_mr - register stag for fast memory registration
* @pd: ibpd pointer
@ -108,6 +109,18 @@ struct ib_mr *
irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata)
{
#else
/**
* irdma_alloc_mr - register stag for fast memory registration
* @pd: ibpd pointer
* @mr_type: memory for stag registrion
* @max_num_sg: man number of pages
*/
struct ib_mr *
irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{
#endif
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl;
@ -162,6 +175,7 @@ irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
#if __FreeBSD_version >= 1400026
/**
* irdma_alloc_ucontext - Allocate the user context data structure
* @uctx: context
@ -253,7 +267,116 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
req.userspace_ver, IRDMA_ABI_VER);
return -EINVAL;
}
#endif
#if __FreeBSD_version < 1400026
/**
* irdma_alloc_ucontext - Allocate the user context data structure
* @ibdev: ib device pointer
* @udata: user data
*
* This keeps track of all objects associated with a particular
* user-mode client.
*/
struct ib_ucontext *
irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
{
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_alloc_ucontext_req req = {0};
struct irdma_alloc_ucontext_resp uresp = {0};
struct irdma_ucontext *ucontext;
struct irdma_uk_attrs *uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
return ERR_PTR(-EINVAL);
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return ERR_PTR(-EINVAL);
if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
goto ver_error;
ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
if (!ucontext)
return ERR_PTR(-ENOMEM);
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 legacy support with libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1) {
kfree(ucontext);
return ERR_PTR(-EOPNOTSUPP);
}
ucontext->legacy_mode = true;
uresp.max_qps = iwdev->rf->max_qp;
uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
uresp.kernel_ver = req.userspace_ver;
if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) {
kfree(ucontext);
return ERR_PTR(-EFAULT);
}
} else {
u64 bar_off;
uresp.kernel_ver = IRDMA_ABI_VER;
uresp.feature_flags = uk_attrs->feature_flags;
uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
uresp.max_hw_inline = uk_attrs->max_hw_inline;
uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
bar_off =
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
spin_lock_init(&ucontext->mmap_tbl_lock);
ucontext->db_mmap_entry =
irdma_user_mmap_entry_add_hash(ucontext, bar_off,
IRDMA_MMAP_IO_NC,
&uresp.db_mmap_key);
if (!ucontext->db_mmap_entry) {
spin_lock_destroy(&ucontext->mmap_tbl_lock);
kfree(ucontext);
return ERR_PTR(-ENOMEM);
}
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
spin_lock_destroy(&ucontext->mmap_tbl_lock);
kfree(ucontext);
return ERR_PTR(-EFAULT);
}
}
INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->vma_list);
mutex_init(&ucontext->vma_list_mutex);
return &ucontext->ibucontext;
ver_error:
ibdev_err(&iwdev->ibdev,
"Invalid userspace driver version detected. Detected version %d, should be %d\n",
req.userspace_ver, IRDMA_ABI_VER);
return ERR_PTR(-EINVAL);
}
#endif
#if __FreeBSD_version >= 1400026
/**
* irdma_dealloc_ucontext - deallocate the user context data structure
* @context: user context created during alloc
@ -267,8 +390,28 @@ irdma_dealloc_ucontext(struct ib_ucontext *context)
return;
}
#endif
#if __FreeBSD_version < 1400026
/**
* irdma_dealloc_ucontext - deallocate the user context data structure
* @context: user context created during alloc
*/
int
irdma_dealloc_ucontext(struct ib_ucontext *context)
{
struct irdma_ucontext *ucontext = to_ucontext(context);
irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
spin_lock_destroy(&ucontext->mmap_tbl_lock);
kfree(ucontext);
return 0;
}
#endif
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
#if __FreeBSD_version >= 1400026
/**
* irdma_alloc_pd - allocate protection domain
* @pd: protection domain
@ -319,7 +462,67 @@ irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
return err;
}
#endif
#if __FreeBSD_version < 1400026
/**
* irdma_alloc_pd - allocate protection domain
* @ibdev: IB device
* @context: user context
* @udata: user data
*/
struct ib_pd *
irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata)
{
struct irdma_pd *iwpd;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_alloc_pd_resp uresp = {0};
struct irdma_sc_pd *sc_pd;
u32 pd_id = 0;
int err;
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
return ERR_PTR(err);
iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
if (!iwpd) {
err = -ENOMEM;
goto free_res;
}
sc_pd = &iwpd->sc_pd;
if (udata) {
struct irdma_ucontext *ucontext = to_ucontext(context);
irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
uresp.pd_id = pd_id;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
err = -EFAULT;
goto error;
}
} else {
irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
}
return &iwpd->ibpd;
error:
kfree(iwpd);
free_res:
irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
return ERR_PTR(err);
}
#endif
#if __FreeBSD_version >= 1400026
void
irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
@ -329,6 +532,21 @@ irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
}
#endif
#if __FreeBSD_version < 1400026
int
irdma_dealloc_pd(struct ib_pd *ibpd)
{
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
kfree(iwpd);
return 0;
}
#endif
static void
irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
const struct ib_gid_attr *sgid_attr,
@ -416,6 +634,7 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
#if __FreeBSD_version >= 1400026
/**
* irdma_create_ah - create address handle
* @ib_ah: ptr to AH
@ -539,6 +758,7 @@ irdma_create_ah(struct ib_ah *ib_ah,
return err;
}
#endif
void
irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
@ -546,19 +766,172 @@ irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
ether_addr_copy(dmac, attr->dmac);
}
#if __FreeBSD_version < 1400026
struct ib_ah *
irdma_create_ah_stub(struct ib_pd *ibpd,
struct ib_ah_attr *attr,
struct ib_udata *udata)
#else
int
irdma_create_ah_stub(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata)
#endif
{
#if __FreeBSD_version >= 1400026
return -ENOSYS;
#else
return ERR_PTR(-ENOSYS);
#endif
}
#if __FreeBSD_version >= 1400026
void
irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
{
return;
}
#else
int
irdma_destroy_ah_stub(struct ib_ah *ibah)
{
return -ENOSYS;
}
#endif
#if __FreeBSD_version < 1400026
/**
* irdma_create_ah - create address handle
* @ibpd: ptr to pd
* @attr: address handle attributes
* @udata: user data
*
* returns a pointer to an address handle
*/
struct ib_ah *
irdma_create_ah(struct ib_pd *ibpd,
struct ib_ah_attr *attr,
struct ib_udata *udata)
{
struct irdma_pd *pd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_ah *ah;
union ib_gid sgid;
struct ib_gid_attr sgid_attr;
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_sc_ah *sc_ah;
u32 ah_id = 0;
struct irdma_ah_info *ah_info;
struct irdma_create_ah_resp uresp;
union {
struct sockaddr saddr;
struct sockaddr_in saddr_in;
struct sockaddr_in6 saddr_in6;
} sgid_addr, dgid_addr;
int err;
u8 dmac[ETH_ALEN];
bool sleep = udata ? true : false;
if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
return ERR_PTR(-EINVAL);
err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
rf->max_ah, &ah_id, &rf->next_ah);
if (err)
return ERR_PTR(err);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) {
irdma_free_rsrc(rf, rf->allocated_ahs, ah_id);
return ERR_PTR(-ENOMEM);
}
ah->pd = pd;
sc_ah = &ah->sc_ah;
sc_ah->ah_info.ah_idx = ah_id;
sc_ah->ah_info.vsi = &iwdev->vsi;
irdma_sc_init_ah(&rf->sc_dev, sc_ah);
ah->sgid_index = attr->grh.sgid_index;
memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
rcu_read_lock();
err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
attr->grh.sgid_index, &sgid, &sgid_attr);
rcu_read_unlock();
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"GID lookup at idx=%d with port=%d failed\n",
attr->grh.sgid_index, attr->port_num);
err = -EINVAL;
goto err_gid_l2;
}
rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
ah->av.attrs = *attr;
ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
sgid_attr.gid_type,
&sgid);
if (sgid_attr.ndev)
dev_put(sgid_attr.ndev);
ah->av.sgid_addr.saddr = sgid_addr.saddr;
ah->av.dgid_addr.saddr = dgid_addr.saddr;
ah_info = &sc_ah->ah_info;
ah_info->ah_idx = ah_id;
ah_info->pd_idx = pd->sc_pd.pd_id;
ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
if (attr->ah_flags & IB_AH_GRH) {
ah_info->flow_label = attr->grh.flow_label;
ah_info->hop_ttl = attr->grh.hop_limit;
ah_info->tc_tos = attr->grh.traffic_class;
}
if (udata)
ib_resolve_eth_dmac(ibpd->device, attr);
irdma_ether_copy(dmac, attr);
irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
dmac, ah->av.net_type);
err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
if (err)
goto err_gid_l2;
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP-OP Create AH fail");
goto err_gid_l2;
}
err = irdma_create_ah_wait(rf, sc_ah, sleep);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP create AH timed out");
goto err_gid_l2;
}
if (udata) {
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (err) {
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
IRDMA_OP_AH_DESTROY, false, NULL, ah);
goto err_gid_l2;
}
}
return &ah->ibah;
err_gid_l2:
kfree(ah);
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
return ERR_PTR(err);
}
#endif
/**
* irdma_free_qp_rsrc - free up memory resources for qp
@ -797,8 +1170,13 @@ irdma_create_qp(struct ib_pd *ibpd,
* @ibqp: qp's ib pointer also to get to device's qp address
* @udata: user data
*/
#if __FreeBSD_version >= 1400026
int
irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
#else
int
irdma_destroy_qp(struct ib_qp *ibqp)
#endif
{
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
@ -835,17 +1213,31 @@ irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
* @attr: attributes for cq
* @udata: user data
*/
#if __FreeBSD_version >= 1400026
int
irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
#else
struct ib_cq *
irdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
#endif
{
#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
#if __FreeBSD_version >= 1400026
struct ib_device *ibdev = ibcq->device;
#endif
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
#if __FreeBSD_version >= 1400026
struct irdma_cq *iwcq = to_iwcq(ibcq);
#else
struct irdma_cq *iwcq;
#endif
u32 cq_num = 0;
struct irdma_sc_cq *cq;
struct irdma_sc_dev *dev = &rf->sc_dev;
@ -859,6 +1251,7 @@ irdma_create_cq(struct ib_cq *ibcq,
int entries = attr->cqe;
bool cqe_64byte_ena;
#if __FreeBSD_version >= 1400026
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
@ -866,10 +1259,27 @@ irdma_create_cq(struct ib_cq *ibcq,
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
return -EINVAL;
#else
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return ERR_PTR(err_code);
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
return ERR_PTR(-EINVAL);
iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
if (!iwcq)
return ERR_PTR(-ENOMEM);
#endif
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
#if __FreeBSD_version >= 1400026
return err_code;
#else
goto error;
#endif
cq = &iwcq->sc_cq;
cq->back_cq = iwcq;
atomic_set(&iwcq->refcnt, 1);
@ -898,7 +1308,11 @@ irdma_create_cq(struct ib_cq *ibcq,
struct irdma_cq_mr *cqmr_shadow;
iwcq->user_mode = true;
#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
ucontext = to_ucontext(context);
#endif
if (ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen))) {
@ -1030,12 +1444,22 @@ irdma_create_cq(struct ib_cq *ibcq,
rf->cq_table[cq_num] = iwcq;
init_completion(&iwcq->free_cq);
#if __FreeBSD_version >= 1400026
return 0;
#else
return &iwcq->ibcq;
#endif
cq_destroy:
irdma_cq_wq_destroy(rf, cq);
cq_free_rsrc:
irdma_cq_free_rsrc(rf, iwcq);
#if __FreeBSD_version >= 1400026
return err_code;
#else
error:
kfree(iwcq);
return ERR_PTR(err_code);
#endif
}
/**
@ -1085,6 +1509,7 @@ irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
* @ah_flags: destroy flags
*/
#if __FreeBSD_version >= 1400026
void
irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
{
@ -1097,9 +1522,33 @@ irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
ah->sc_ah.ah_info.ah_idx);
}
#endif
#if __FreeBSD_version < 1400026
int
irdma_destroy_ah(struct ib_ah *ibah)
{
struct irdma_device *iwdev = to_iwdev(ibah->device);
struct irdma_ah *ah = to_iwah(ibah);
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
false, NULL, ah);
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
ah->sc_ah.ah_info.ah_idx);
kfree(ah);
return 0;
}
#endif
#if __FreeBSD_version >= 1400026
int
irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
#else
int
irdma_dereg_mr(struct ib_mr *ib_mr)
#endif
{
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
@ -1109,10 +1558,15 @@ irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
struct irdma_ucontext *ucontext;
#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext,
ibucontext);
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
struct ib_pd *ibpd = ib_mr->pd;
ucontext = to_ucontext(ibpd->uobject->context);
#endif
irdma_del_memlist(iwmr, ucontext);
}
goto done;
@ -1219,6 +1673,7 @@ kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
return 0;
}
#if __FreeBSD_version >= 1400026
/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
@ -1253,6 +1708,46 @@ irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
irdma_cq_free_rsrc(iwdev->rf, iwcq);
}
#endif
#if __FreeBSD_version < 1400026
/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
*/
int
irdma_destroy_cq(struct ib_cq *ib_cq)
{
struct irdma_device *iwdev = to_iwdev(ib_cq->device);
struct irdma_cq *iwcq = to_iwcq(ib_cq);
struct irdma_sc_cq *cq = &iwcq->sc_cq;
struct irdma_sc_dev *dev = cq->dev;
struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
unsigned long flags;
spin_lock_irqsave(&iwcq->lock, flags);
if (!list_empty(&iwcq->cmpl_generated))
irdma_remove_cmpls_list(iwcq);
if (!list_empty(&iwcq->resize_list))
irdma_process_resize_list(iwcq, iwdev, NULL);
spin_unlock_irqrestore(&iwcq->lock, flags);
irdma_cq_rem_ref(ib_cq);
wait_for_completion(&iwcq->free_cq);
irdma_cq_wq_destroy(iwdev->rf, cq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
irdma_cq_free_rsrc(iwdev->rf, iwcq);
kfree(iwcq);
return 0;
}
#endif
/**
* irdma_alloc_mw - Allocate memory window
* @pd: Protection domain
@ -1316,6 +1811,113 @@ kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
(cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
}
#if __FreeBSD_version < 1400026
struct irdma_vma_data {
struct list_head list;
struct vm_area_struct *vma;
struct mutex *vma_list_mutex; /* protect the vma_list */
};
/**
* irdma_vma_open -
* @vma: User VMA
*/
static void
irdma_vma_open(struct vm_area_struct *vma)
{
vma->vm_ops = NULL;
}
/**
* irdma_vma_close - Remove vma data from vma list
* @vma: User VMA
*/
static void
irdma_vma_close(struct vm_area_struct *vma)
{
struct irdma_vma_data *vma_data;
vma_data = vma->vm_private_data;
vma->vm_private_data = NULL;
vma_data->vma = NULL;
mutex_lock(vma_data->vma_list_mutex);
list_del(&vma_data->list);
mutex_unlock(vma_data->vma_list_mutex);
kfree(vma_data);
}
static const struct vm_operations_struct irdma_vm_ops = {
.open = irdma_vma_open,
.close = irdma_vma_close
};
/**
* irdma_set_vma_data - Save vma data in context list
* @vma: User VMA
* @context: ib user context
*/
static int
irdma_set_vma_data(struct vm_area_struct *vma,
struct irdma_ucontext *context)
{
struct list_head *vma_head = &context->vma_list;
struct irdma_vma_data *vma_entry;
vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL);
if (!vma_entry)
return -ENOMEM;
vma->vm_private_data = vma_entry;
vma->vm_ops = &irdma_vm_ops;
vma_entry->vma = vma;
vma_entry->vma_list_mutex = &context->vma_list_mutex;
mutex_lock(&context->vma_list_mutex);
list_add(&vma_entry->list, vma_head);
mutex_unlock(&context->vma_list_mutex);
return 0;
}
/**
* irdma_disassociate_ucontext - Disassociate user context
* @context: ib user context
*/
void
irdma_disassociate_ucontext(struct ib_ucontext *context)
{
struct irdma_ucontext *ucontext = to_ucontext(context);
struct irdma_vma_data *vma_data, *n;
struct vm_area_struct *vma;
mutex_lock(&ucontext->vma_list_mutex);
list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) {
vma = vma_data->vma;
zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
vma->vm_ops = NULL;
list_del(&vma_data->list);
kfree(vma_data);
}
mutex_unlock(&ucontext->vma_list_mutex);
}
int
rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
if (io_remap_pfn_range(vma,
vma->vm_start,
pfn,
size,
prot))
return -EAGAIN;
return irdma_set_vma_data(vma, to_ucontext(context));
}
#else
/**
* irdma_disassociate_ucontext - Disassociate user context
* @context: ib user context
@ -1324,6 +1926,7 @@ void
irdma_disassociate_ucontext(struct ib_ucontext *context)
{
}
#endif
struct ib_device *
ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)

View File

@ -44,7 +44,9 @@
#include <netinet/if_ether.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#if __FreeBSD_version >= 1400000
#include <rdma/uverbs_ioctl.h>
#endif
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@ -408,6 +410,7 @@ static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
return container_of(ibucontext, struct irdma_ucontext, ibucontext);
}
#if __FreeBSD_version >= 1400026
static inline struct irdma_user_mmap_entry *
to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
{
@ -415,6 +418,7 @@ to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
rdma_entry);
}
#endif
static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct irdma_pd, ibpd);

View File

@ -107,10 +107,16 @@ irdma_mmap_legacy(struct irdma_ucontext *ucontext,
pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
#if __FreeBSD_version >= 1400026
return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot), NULL);
#else
return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot));
#endif
}
#if __FreeBSD_version >= 1400026
static void
irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
@ -143,6 +149,103 @@ irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
return &entry->rdma_entry;
}
#else
static inline bool
find_key_in_mmap_tbl(struct irdma_ucontext *ucontext, u64 key)
{
struct irdma_user_mmap_entry *entry;
HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, key) {
if (entry->pgoff_key == key)
return true;
}
return false;
}
struct irdma_user_mmap_entry *
irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
{
struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
unsigned long flags;
int retry_cnt = 0;
if (!entry)
return NULL;
entry->bar_offset = bar_offset;
entry->mmap_flag = mmap_flag;
entry->ucontext = ucontext;
do {
get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key));
/* The key is a page offset */
entry->pgoff_key >>= PAGE_SHIFT;
/* In the event of a collision in the hash table, retry a new key */
spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) {
HASH_ADD(ucontext->mmap_hash_tbl, &entry->hlist, entry->pgoff_key);
spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
goto hash_add_done;
}
spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
} while (retry_cnt++ < 10);
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS, "mmap table add failed: Cannot find a unique key\n");
kfree(entry);
return NULL;
hash_add_done:
/* libc mmap uses a byte offset */
*mmap_offset = entry->pgoff_key << PAGE_SHIFT;
return entry;
}
static struct irdma_user_mmap_entry *
irdma_find_user_mmap_entry(struct irdma_ucontext *ucontext,
struct vm_area_struct *vma)
{
struct irdma_user_mmap_entry *entry;
unsigned long flags;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return NULL;
spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, vma->vm_pgoff) {
if (entry->pgoff_key == vma->vm_pgoff) {
spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
return entry;
}
}
spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
return NULL;
}
void
irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry)
{
struct irdma_ucontext *ucontext;
unsigned long flags;
if (!entry)
return;
ucontext = entry->ucontext;
spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
HASH_DEL(ucontext->mmap_hash_tbl, &entry->hlist);
spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
kfree(entry);
}
#endif
/**
* irdma_mmap - user memory map
* @context: context created during alloc
@ -151,7 +254,9 @@ irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
static int
irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry *rdma_entry;
#endif
struct irdma_user_mmap_entry *entry;
struct irdma_ucontext *ucontext;
u64 pfn;
@ -163,6 +268,7 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (ucontext->legacy_mode)
return irdma_mmap_legacy(ucontext, vma);
#if __FreeBSD_version >= 1400026
rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
if (!rdma_entry) {
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
@ -172,6 +278,15 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
entry = to_irdma_mmap_entry(rdma_entry);
#else
entry = irdma_find_user_mmap_entry(ucontext, vma);
if (!entry) {
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
"pgoff[0x%lx] does not have valid entry\n",
vma->vm_pgoff);
return -EINVAL;
}
#endif
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
"bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
entry->mmap_flag);
@ -181,14 +296,24 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
switch (entry->mmap_flag) {
case IRDMA_MMAP_IO_NC:
#if __FreeBSD_version >= 1400026
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot),
rdma_entry);
#else
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot));
#endif
break;
case IRDMA_MMAP_IO_WC:
#if __FreeBSD_version >= 1400026
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot),
rdma_entry);
#else
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot));
#endif
break;
default:
ret = -EINVAL;
@ -198,7 +323,9 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
"bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
entry->bar_offset, entry->mmap_flag, ret);
#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_put(rdma_entry);
#endif
return ret;
}
@ -300,11 +427,19 @@ void
irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
{
if (iwqp->push_db_mmap_entry) {
#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
#else
irdma_user_mmap_entry_del_hash(iwqp->push_db_mmap_entry);
#endif
iwqp->push_db_mmap_entry = NULL;
}
if (iwqp->push_wqe_mmap_entry) {
#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
#else
irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
#endif
iwqp->push_wqe_mmap_entry = NULL;
}
}
@ -322,19 +457,34 @@ irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
#if __FreeBSD_version >= 1400026
iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_WC,
push_wqe_mmap_key);
#else
iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
IRDMA_MMAP_IO_WC, push_wqe_mmap_key);
#endif
if (!iwqp->push_wqe_mmap_entry)
return -ENOMEM;
/* push doorbell page */
bar_off += IRDMA_HW_PAGE_SIZE;
#if __FreeBSD_version >= 1400026
iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_NC,
push_db_mmap_key);
#else
iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
IRDMA_MMAP_IO_NC, push_db_mmap_key);
#endif
if (!iwqp->push_db_mmap_entry) {
#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
#else
irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
#endif
return -ENOMEM;
}
@ -398,8 +548,11 @@ irdma_setup_umode_qp(struct ib_udata *udata,
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
iwqp->user_mode = 1;
if (req.user_wqe_bufs) {
#if __FreeBSD_version >= 1400026
struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
#endif
info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
@ -1089,8 +1242,11 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext, ibucontext);
#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
ucontext = to_ucontext(ibqp->uobject->context);
#endif
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
@ -1336,8 +1492,11 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext, ibucontext);
#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
ucontext = to_ucontext(ibqp->uobject->context);
#endif
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
@ -1476,8 +1635,11 @@ irdma_resize_cq(struct ib_cq *ibcq, int entries,
if (udata) {
struct irdma_resize_cq_req req = {};
struct irdma_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
#if __FreeBSD_version >= 1400026
rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
to_ucontext(ibcq->uobject->context);
#endif
/* CQ resize not supported with legacy GEN_1 libi40iw */
if (ucontext->legacy_mode)
@ -2129,8 +2291,11 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (err)
goto error;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
ucontext = to_ucontext(pd->uobject->context);
#endif
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
iwpbl->on_list = true;
@ -2150,8 +2315,11 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (err)
goto error;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
ucontext = to_ucontext(pd->uobject->context);
#endif
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
iwpbl->on_list = true;
@ -3363,6 +3531,7 @@ irdma_set_device_ops(struct ib_device *ibdev)
{
struct ib_device *dev_ops = ibdev;
#if __FreeBSD_version >= 1400000
dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
@ -3371,6 +3540,7 @@ irdma_set_device_ops(struct ib_device *ibdev)
irdma_ucontext,
ibucontext);
#endif /* __FreeBSD_version >= 1400000 */
dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
dev_ops->alloc_mr = irdma_alloc_mr;
dev_ops->alloc_mw = irdma_alloc_mw;
@ -3391,7 +3561,9 @@ irdma_set_device_ops(struct ib_device *ibdev)
dev_ops->get_netdev = irdma_get_netdev;
dev_ops->map_mr_sg = irdma_map_mr_sg;
dev_ops->mmap = irdma_mmap;
#if __FreeBSD_version >= 1400026
dev_ops->mmap_free = irdma_mmap_free;
#endif
dev_ops->poll_cq = irdma_poll_cq;
dev_ops->post_recv = irdma_post_recv;
dev_ops->post_send = irdma_post_send;

View File

@ -47,7 +47,13 @@
struct irdma_ucontext {
struct ib_ucontext ibucontext;
struct irdma_device *iwdev;
#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry *db_mmap_entry;
#else
struct irdma_user_mmap_entry *db_mmap_entry;
DECLARE_HASHTABLE(mmap_hash_tbl, 6);
spinlock_t mmap_tbl_lock; /* protect mmap hash table entries */
#endif
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
@ -194,8 +200,13 @@ struct irdma_qp {
struct irdma_cq *iwscq;
struct irdma_cq *iwrcq;
struct irdma_pd *iwpd;
#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry *push_wqe_mmap_entry;
struct rdma_user_mmap_entry *push_db_mmap_entry;
#else
struct irdma_user_mmap_entry *push_wqe_mmap_entry;
struct irdma_user_mmap_entry *push_db_mmap_entry;
#endif
struct irdma_qp_host_ctx_info ctx_info;
union {
struct irdma_iwarp_offload_info iwarp_info;
@ -256,7 +267,13 @@ enum irdma_mmap_flag {
};
struct irdma_user_mmap_entry {
#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry rdma_entry;
#else
struct irdma_ucontext *ucontext;
struct hlist_node hlist;
u64 pgoff_key; /* Used to compute offset (in bytes) returned to user libc's mmap */
#endif
u64 bar_offset;
u8 mmap_flag;
};
@ -300,9 +317,16 @@ static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
ether_addr_copy(mac, mac6);
}
#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
#else
struct irdma_user_mmap_entry *
irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
void irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry);
#endif
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);

View File

@ -101,6 +101,9 @@
#define __aligned_u64 uint64_t __aligned(8)
#define VLAN_PRIO_SHIFT 13
#if __FreeBSD_version < 1400000
#define IB_USER_VERBS_EX_CMD_MODIFY_QP IB_USER_VERBS_CMD_MODIFY_QP
#endif
/*
* debug definition section