irdma(4): Upgrade the driver to 1.1.11-k

Summary of changes:
- postpone mtu size assignment during load to avoid race condition
- refactor some of the debug prints
- add request reset handler
- refactor flush scheduler to increase efficiency and avoid racing
- put correct vlan_tag for UD traffic with PFC
- suspend QP before going to ERROR state to avoid CQP timout
- fix arithmetic error on irdma_debug_bugf
- allow debug flag to be settable during driver load
- introduce meaningful default values for DCQCN algorithm
- interrupt naming convention improvements
- skip unsignaled completions in poll_cmpl

Signed-off-by: Bartosz Sobczak bartosz.sobczak@intel.com
Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:	hselasky@
MFC after:	1 week
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D39173
This commit is contained in:
Bartosz Sobczak 2023-03-28 14:15:15 -07:00 committed by Eric Joyner
parent b674303707
commit 35105900c6
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
37 changed files with 994 additions and 641 deletions

View File

@ -115,6 +115,7 @@ struct irdma_get_context {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
__aligned_u64 comp_mask;
};
struct irdma_get_context_resp {
@ -136,6 +137,7 @@ struct irdma_get_context_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
};
struct irdma_ureg_mr {

View File

@ -41,8 +41,8 @@ enum i40iw_device_caps_const {
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_PUSH_PAGE_COUNT = 0,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 63,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 64,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_MAX_WQE_SIZE_RQ = 128,
I40IW_MAX_PDS = 32768,

View File

@ -2,7 +2,7 @@
* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
*
*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2022 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
@ -53,10 +53,15 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
};
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
__aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
@ -77,6 +82,7 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
};
struct irdma_alloc_pd_resp {

View File

@ -75,6 +75,7 @@
#define IRDMA_CQE_QTYPE_RQ 0
#define IRDMA_CQE_QTYPE_SQ 1
#define IRDMA_QP_SW_MIN_WQSIZE 8 /* in WRs*/
#define IRDMA_QP_WQE_MIN_SIZE 32
#define IRDMA_QP_WQE_MAX_SIZE 256
#define IRDMA_QP_WQE_MIN_QUANTA 1
@ -304,6 +305,17 @@
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
#define IRDMA_GET_RING_OFFSET(_ring, _i) \
( \
((_ring).head + (_i)) % (_ring).size \
)
#define IRDMA_GET_CQ_ELEM_AT_OFFSET(_cq, _i, _cqe) \
{ \
register __u32 offset; \
offset = IRDMA_GET_RING_OFFSET((_cq)->cq_ring, _i); \
(_cqe) = (_cq)->cq_base[offset].buf; \
}
#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
( \
(_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
@ -437,12 +449,6 @@
IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
}
enum irdma_protocol_used {
IRDMA_ANY_PROTOCOL = 0,
IRDMA_IWARP_PROTOCOL_ONLY = 1,
IRDMA_ROCE_PROTOCOL_ONLY = 2,
};
enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_32 = 32,
IRDMA_WQE_SIZE_64 = 64,

View File

@ -641,7 +641,7 @@ irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
sge_len -= bytes_copied;
if (!quanta_bytes_remaining) {
/* Remaining inline bytes reside after the hdr */
/* Remaining inline bytes reside after hdr */
wqe += 16;
quanta_bytes_remaining = 32;
}
@ -710,7 +710,7 @@ irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
if (!quanta_bytes_remaining) {
quanta_bytes_remaining = 31;
/* Remaining inline bytes reside after the hdr */
/* Remaining inline bytes reside after hdr */
if (first_quanta) {
first_quanta = false;
wqe += 16;
@ -1111,7 +1111,6 @@ irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
u8 arm_next = 0;
u8 arm_seq_num;
cq->armed = true;
get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
arm_seq_num++;
@ -1338,6 +1337,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
@ -1366,10 +1367,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
if (!qp || qp->destroy_pending) {
ret_code = EFAULT;
goto exit;
@ -1493,7 +1491,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
} while (1);
if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
info->minor_err == FLUSH_PROT_ERR)
info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))

View File

@ -49,7 +49,7 @@
/**
* Driver version
*/
char libirdma_version[] = "1.1.5-k";
char libirdma_version[] = "1.1.11-k";
unsigned int irdma_dbg;
@ -170,7 +170,7 @@ irdma_init_context(struct verbs_device *vdev,
iwvctx->uk_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
iwvctx->uk_attrs.min_hw_wq_size = IRDMA_MIN_WQ_SIZE_GEN2;
iwvctx->uk_attrs.min_hw_wq_size = IRDMA_QP_SW_MIN_WQSIZE;
iwvctx->abi_ver = IRDMA_ABI_VER;
mmap_key = resp.db_mmap_key;

View File

@ -87,7 +87,8 @@ struct irdma_uvcontext {
struct irdma_uk_attrs uk_attrs;
void *db;
int abi_ver;
bool legacy_mode;
bool legacy_mode:1;
bool use_raw_attrs:1;
};
struct irdma_uqp;

View File

@ -211,7 +211,6 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@ -521,7 +520,6 @@ struct irdma_cq_uk {
u32 cq_size;
struct irdma_ring cq_ring;
u8 polarity;
bool armed:1;
bool avoid_mem_cflct:1;
};

View File

@ -1566,11 +1566,10 @@ irdma_ucreate_qp(struct ibv_pd *pd,
info.sq_size = info.sq_depth >> info.sq_shift;
info.rq_size = info.rq_depth >> info.rq_shift;
/**
* For older ABI version (less than 6) passes raw sq and rq
* quanta in cap.max_send_wr and cap.max_recv_wr.
* But then kernel had no way of calculating the actual qp size.
* Maintain backward compatibility with older ABI which pass sq
* and rq depth (in quanta) in cap.max_send_wr a cap.max_recv_wr
*/
if (iwvctx->abi_ver <= 5) {
if (!iwvctx->use_raw_attrs) {
attr->cap.max_send_wr = info.sq_size;
attr->cap.max_recv_wr = info.rq_size;
}

View File

@ -119,7 +119,7 @@ do { \
irdma_debug(dev, mask, "%s\n", desc); \
irdma_debug(dev, mask, "starting address virt=%p phy=%lxh\n", buf, irdma_get_virt_to_phy(buf)); \
for (i = 0; i < size ; i += 8) \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)buf)[i / 8]); \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)(buf))[i / 8]); \
} while(0)
#define irdma_debug(h, m, s, ...) \
@ -137,11 +137,12 @@ do { \
if (irdma_dbg) \
printf("libirdma-%s: " fmt, __func__, ##args); \
} while (0)
#define irdma_dev_err(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_err(ibdev, fmt, ...) \
pr_err("%s:%s:%d ERR "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
#define irdma_dev_warn(ibdev, fmt, ...) \
pr_warn("%s:%s:%d WARN "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
#define ibdev_err(ibdev, fmt, ...) printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \

View File

@ -93,6 +93,18 @@ irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
}
void
irdma_request_reset(struct irdma_pci_f *rf)
{
struct ice_rdma_peer *peer = rf->peer_info;
struct ice_rdma_request req = {0};
req.type = ICE_RDMA_EVENT_RESET;
printf("%s:%d requesting pf-reset\n", __func__, __LINE__);
IRDMA_DI_REQ_HANDLER(peer, &req);
}
int
irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
{
@ -611,32 +623,38 @@ irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
&rf->dcqcn_params.min_rate, 0,
"set minimum rate limit value, in MBits per second, default=0");
rf->dcqcn_params.dcqcn_f = 5;
SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
"set number of times to stay in each stage of bandwidth recovery, default=0");
"set number of times to stay in each stage of bandwidth recovery, default=5");
rf->dcqcn_params.dcqcn_t = 0x37;
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
"set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
"set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0x37");
rf->dcqcn_params.dcqcn_b = 0x249f0;
SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
"set number of MSS to add to the congestion window in additive increase mode, default=0");
"set number of MSS to add to the congestion window in additive increase mode, default=0x249f0");
rf->dcqcn_params.rai_factor = 1;
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
&rf->dcqcn_params.rai_factor, 0,
"set number of MSS to add to the congestion window in additive increase mode, default=0");
"set number of MSS to add to the congestion window in additive increase mode, default=1");
rf->dcqcn_params.hai_factor = 5;
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
&rf->dcqcn_params.hai_factor, 0,
"set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
"set number of MSS to add to the congestion window in hyperactive increase mode, default=5");
rf->dcqcn_params.rreduce_mperiod = 50;
SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
&rf->dcqcn_params.rreduce_mperiod, 0,
"set minimum time between 2 consecutive rate reductions for a single flow, default=0");
"set minimum time between 2 consecutive rate reductions for a single flow, default=50");
}
/**
@ -743,3 +761,31 @@ irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
{
kfree(chunk->bitmapmem.va);
}
void
irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi)
{
struct irdma_sc_qp *qp = NULL;
struct irdma_qp *iwqp;
struct irdma_pci_f *rf;
u8 i;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
while (qp) {
if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_UDA) {
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
continue;
}
iwqp = qp->qp_uk.back_qp;
rf = iwqp->iwdev->rf;
irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
kfree(iwqp->kqp.sq_wrid_mem);
kfree(iwqp->kqp.rq_wrid_mem);
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
kfree(iwqp);
}
}
}

View File

@ -40,10 +40,11 @@
#define TASKLET_DATA_TYPE unsigned long
#define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE)
#ifndef tasklet_setup
#define tasklet_setup(tasklet, callback) \
tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback), \
(TASKLET_DATA_TYPE)(tasklet))
#endif
#ifndef from_tasklet
#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
@ -176,8 +177,7 @@ int irdma_dereg_mr(struct ib_mr *ib_mr);
#else
int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
#endif
void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
u8 *active_width);
int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u8 *speed, u8 *width);
enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
@ -197,6 +197,7 @@ int irdma_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats, u8 port_num,
int index);
void irdma_request_reset(struct irdma_pci_f *rf);
int irdma_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
@ -337,4 +338,13 @@ static inline size_t irdma_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned
ALIGN_DOWN(iova, pgsz))) / pgsz;
}
static inline void addrconf_addr_eui48(u8 *deui, const char *const addr)
{
memcpy(deui, addr, 3);
deui[3] = 0xFF;
deui[4] = 0xFE;
memcpy(deui + 5, addr + 3, 3);
deui[0] ^= 2;
}
#endif /* FBSD_KCOMPAT_H */

View File

@ -53,7 +53,7 @@
/**
* Driver version
*/
char irdma_driver_version[] = "1.1.5-k";
char irdma_driver_version[] = "1.1.11-k";
#define pf_if_d(peer) peer->ifp->if_dunit
@ -223,9 +223,13 @@ static void
irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
{
if (mtu < IRDMA_MIN_MTU_IPV4)
irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
irdma_dev_warn(to_ibdev(dev),
"MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n",
mtu);
else if (mtu < IRDMA_MIN_MTU_IPV6)
irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
irdma_dev_warn(to_ibdev(dev),
"MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n",
mtu);
}
/**
@ -336,22 +340,25 @@ irdma_finalize_task(void *context, int pending)
int status = 0;
if (iwdev->iw_status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred closing %d (%d)\n",
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Starting deferred closing %d (%d)\n",
rf->peer_info->pf_id, pf_if_d(peer));
irdma_dereg_ipaddr_event_cb(rf);
irdma_ib_unregister_device(iwdev);
req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
req.enable_filter = false;
IRDMA_DI_REQ_HANDLER(peer, &req);
irdma_cleanup_dead_qps(&iwdev->vsi);
irdma_rt_deinit_hw(iwdev);
} else {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred opening %d (%d)\n",
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Starting deferred opening %d (%d)\n",
rf->peer_info->pf_id, pf_if_d(peer));
l2params.mtu = peer->mtu;
irdma_get_qos_info(&l2params, &peer->initial_qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
l2params.mtu = peer->mtu;
status = irdma_rt_init_hw(iwdev, &l2params);
if (status) {
irdma_pr_err("RT init failed %d\n", status);
@ -368,7 +375,8 @@ irdma_finalize_task(void *context, int pending)
req.enable_filter = true;
IRDMA_DI_REQ_HANDLER(peer, &req);
irdma_reg_ipaddr_event_cb(rf);
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Deferred opening finished %d (%d)\n",
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
"Deferred opening finished %d (%d)\n",
rf->peer_info->pf_id, pf_if_d(peer));
}
}
@ -459,6 +467,7 @@ irdma_fill_device_info(struct irdma_device *iwdev,
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
rf->check_fc = irdma_check_fc_for_qp;
rf->gen_ops.request_reset = irdma_request_reset;
irdma_set_rf_user_cfg_params(rf);
rf->default_vsi.vsi_idx = peer->pf_vsi_num;
@ -483,6 +492,7 @@ irdma_fill_device_info(struct irdma_device *iwdev,
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
iwdev->roce_rtomin = 5;
if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) {
iwdev->roce_mode = true;
@ -583,7 +593,8 @@ irdma_remove(struct ice_rdma_peer *peer)
struct irdma_handler *hdl;
struct irdma_device *iwdev;
irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT, "removing %s\n", __FUNCTION__);
irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT,
"removing %s irdma%d\n", __func__, pf_if_d(peer));
hdl = irdma_find_handler(peer);
if (!hdl)
@ -614,7 +625,8 @@ irdma_remove(struct ice_rdma_peer *peer)
kfree(iwdev->hdl);
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
irdma_pr_info("IRDMA hardware deinitialization complete\n");
irdma_pr_info("IRDMA hardware deinitialization complete irdma%d\n",
pf_if_d(peer));
return 0;
}

View File

@ -122,7 +122,7 @@ enum icrdma_device_caps_const {
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 32,
ICRDMA_MAX_ORD_SIZE = 64,
ICRDMA_MAX_ORD_SIZE = 32,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
};

View File

@ -53,10 +53,15 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
};
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
__aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
@ -77,6 +82,7 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
};
struct irdma_alloc_pd_resp {

View File

@ -206,9 +206,10 @@ irdma_send_cm_event(struct irdma_cm_node *cm_node,
event.event = type;
event.status = status;
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
cm_node, cm_id, cm_node->accelerated, cm_node->state, type, status);
cm_node, cm_id, cm_node->accelerated, cm_node->state, type,
status);
switch (type) {
case IW_CM_EVENT_CONNECT_REQUEST:
@ -288,8 +289,9 @@ irdma_create_event(struct irdma_cm_node *cm_node,
event->cm_info.rem_port = cm_node->rem_port;
event->cm_info.loc_port = cm_node->loc_port;
event->cm_info.cm_id = cm_node->cm_id;
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
cm_node,
event, type, event->cm_info.loc_addr,
event->cm_info.rem_addr);
irdma_cm_post_event(event);
@ -356,15 +358,13 @@ irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
u32 hdr_len = 0;
if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"AH invalid\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "AH invalid\n");
return NULL;
}
sqbuf = irdma_puda_get_bufpool(vsi->ilq);
if (!sqbuf) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"SQ buf NULL\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "SQ buf NULL\n");
return NULL;
}
@ -645,7 +645,7 @@ irdma_send_reset(struct irdma_cm_node *cm_node)
if (!sqbuf)
return -ENOMEM;
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
__builtin_return_address(0), cm_node, cm_node->cm_id,
cm_node->accelerated, cm_node->state, cm_node->rem_port,
@ -666,8 +666,9 @@ irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
irdma_cleanup_retrans_entry(cm_node);
cm_node->cm_core->stats_connect_errs++;
if (reset) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"cm_node=%p state=%d\n", cm_node, cm_node->state);
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "cm_node=%p state=%d\n", cm_node,
cm_node->state);
atomic_inc(&cm_node->refcnt);
irdma_send_reset(cm_node);
}
@ -687,8 +688,9 @@ irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
irdma_cleanup_retrans_entry(cm_node);
cm_node->cm_core->stats_passive_errs++;
cm_node->state = IRDMA_CM_STATE_CLOSED;
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"cm_node=%p state =%d\n", cm_node, cm_node->state);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"cm_node=%p state =%d\n",
cm_node, cm_node->state);
if (reset)
irdma_send_reset(cm_node);
else
@ -747,8 +749,7 @@ irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
offset += 1;
continue;
case OPTION_NUM_MSS:
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"MSS Length: %d Offset: %d Size: %d\n",
all_options->mss.len, offset, optionsize);
got_mss_option = 1;
@ -768,8 +769,7 @@ irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
all_options->windowscale.shiftcount;
break;
default:
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Unsupported TCP Option: %x\n",
all_options->base.optionnum);
break;
@ -801,9 +801,9 @@ irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
ret = irdma_process_options(cm_node, optionsloc, optionsize,
(u32)tcph->th_flags & TH_SYN);
if (ret) {
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
"Node %p, Sending Reset\n", cm_node);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Node %p, Sending Reset\n",
cm_node);
if (passive)
irdma_passive_open_err(cm_node, true);
else
@ -950,8 +950,9 @@ irdma_send_mpa_request(struct irdma_cm_node *cm_node)
&cm_node->mpa_hdr,
MPA_KEY_REQUEST);
if (!cm_node->mpa_hdr.size) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"mpa size = %d\n", cm_node->mpa_hdr.size);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"mpa size = %d\n",
cm_node->mpa_hdr.size);
return -EINVAL;
}
@ -1061,9 +1062,9 @@ irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
/* Not supported RDMA0 operation */
return -EOPNOTSUPP;
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"MPAV2 Negotiated ORD: %d, IRD: %d\n", cm_node->ord_size,
cm_node->ird_size);
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "MPAV2 Negotiated ORD: %d, IRD: %d\n",
cm_node->ord_size, cm_node->ird_size);
return 0;
}
@ -1084,8 +1085,8 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
*type = IRDMA_MPA_REQUEST_ACCEPT;
if (len < sizeof(struct ietf_mpa_v1)) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"ietf buffer small (%x)\n", len);
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "ietf buffer small (%x)\n", len);
return -EINVAL;
}
@ -1094,20 +1095,23 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
priv_data_len = ntohs(mpa_frame->priv_data_len);
if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"private_data too big %d\n", priv_data_len);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"private_data too big %d\n",
priv_data_len);
return -EOVERFLOW;
}
if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"unsupported mpa rev = %d\n", mpa_frame->rev);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"unsupported mpa rev = %d\n",
mpa_frame->rev);
return -EINVAL;
}
if (mpa_frame->rev > cm_node->mpa_frame_rev) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"rev %d\n", mpa_frame->rev);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"rev %d\n",
mpa_frame->rev);
return -EINVAL;
}
@ -1115,31 +1119,29 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
IETF_MPA_KEY_SIZE)) {
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
"Unexpected MPA Key received\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "Unexpected MPA Key received\n");
return -EINVAL;
}
} else {
if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
IETF_MPA_KEY_SIZE)) {
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
"Unexpected MPA Key received\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "Unexpected MPA Key received\n");
return -EINVAL;
}
}
if (priv_data_len + mpa_hdr_len > len) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"ietf buffer len(%x + %x != %x)\n", priv_data_len,
mpa_hdr_len, len);
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "ietf buffer len(%x + %x != %x)\n",
priv_data_len, mpa_hdr_len, len);
return -EOVERFLOW;
}
if (len > IRDMA_MAX_CM_BUF) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"ietf buffer large len = %d\n", len);
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "ietf buffer large len = %d\n", len);
return -EOVERFLOW;
}
@ -1211,7 +1213,7 @@ irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
new_send->timetosend += (HZ / 10);
if (cm_node->close_entry) {
kfree(new_send);
irdma_debug(iwdev_to_idev(cm_node->iwdev),
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "already close entry\n");
return -EINVAL;
}
@ -1520,12 +1522,13 @@ irdma_send_fin(struct irdma_cm_node *cm_node)
* irdma_find_listener - find a cm node listening on this addr-port pair
* @cm_core: cm's core
* @dst_addr: listener ip addr
* @ipv4: flag indicating IPv4 when true
* @dst_port: listener tcp port num
* @vlan_id: virtual LAN ID
* @listener_state: state to match with listen node's
*/
static struct irdma_cm_listener *
irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4, u16 dst_port,
u16 vlan_id, enum irdma_cm_listener_state listener_state)
{
struct irdma_cm_listener *listen_node;
@ -1539,7 +1542,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
list_for_each_entry(listen_node, &cm_core->listen_list, list) {
memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
listen_port = listen_node->loc_port;
if (listen_port != dst_port ||
if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
!(listener_state & listen_node->listener_state))
continue;
/* compare node pair, return node handle if a match */
@ -1579,13 +1582,13 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
child_listen_node = list_entry(pos, struct irdma_cm_listener,
child_listen_list);
if (child_listen_node->ipv4)
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"removing child listen for IP=%pI4, port=%d, vlan=%d\n",
child_listen_node->loc_addr,
child_listen_node->loc_port,
child_listen_node->vlan_id);
else
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"removing child listen for IP=%pI6, port=%d, vlan=%d\n",
child_listen_node->loc_addr,
child_listen_node->loc_port,
@ -1603,8 +1606,8 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
} else {
ret = 0;
}
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"Child listen node freed = %p\n",
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "Child listen node freed = %p\n",
child_listen_node);
kfree(child_listen_node);
cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
@ -1614,6 +1617,10 @@ irdma_del_multiple_qhash(struct irdma_device *iwdev,
return ret;
}
static u8 irdma_get_egress_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4){
return prio;
}
/**
* irdma_netdev_vlan_ipv6 - Gets the netdev and mac
* @addr: local IPv6 address
@ -1702,20 +1709,18 @@ irdma_add_mqh_6(struct irdma_device *iwdev,
if_addr_rlock(ip_dev);
IRDMA_TAILQ_FOREACH(ifp, &ip_dev->if_addrhead, ifa_link) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"IP=%pI6, vlan_id=%d, MAC=%pM\n",
&((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr, rdma_vlan_dev_vlan_id(ip_dev),
IF_LLADDR(ip_dev));
if (((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_family != AF_INET6)
continue;
child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
irdma_debug(iwdev_to_idev(iwdev),
IRDMA_DEBUG_CM,
"listener memory allocation\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "listener memory allocation\n");
ret = -ENOMEM;
if_addr_runlock(ip_dev);
goto exit;
@ -1729,6 +1734,11 @@ irdma_add_mqh_6(struct irdma_device *iwdev,
((struct sockaddr_in6 *)ifp->ifa_addr)->sin6_addr.__u6_addr.__u6_addr32);
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
if (!iwdev->vsi.dscp_mode)
cm_info->user_pri =
irdma_get_egress_vlan_prio(child_listen_node->loc_addr,
cm_info->user_pri,
false);
ret = irdma_manage_qhash(iwdev, cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@ -1785,20 +1795,19 @@ irdma_add_mqh_4(struct irdma_device *iwdev,
if_addr_rlock(ip_dev);
IRDMA_TAILQ_FOREACH(ifa, &ip_dev->if_addrhead, ifa_link) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
&ifa->ifa_addr,
rdma_vlan_dev_vlan_id(ip_dev), IF_LLADDR(ip_dev));
&ifa->ifa_addr, rdma_vlan_dev_vlan_id(ip_dev),
IF_LLADDR(ip_dev));
if (((struct sockaddr_in *)ifa->ifa_addr)->sin_family != AF_INET)
continue;
child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Allocating child listener %p\n",
child_listen_node);
if (!child_listen_node) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"listener memory allocation\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "listener memory allocation\n");
if_addr_runlock(ip_dev);
ret = -ENOMEM;
goto exit;
@ -1812,6 +1821,11 @@ irdma_add_mqh_4(struct irdma_device *iwdev,
ntohl(((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr);
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
if (!iwdev->vsi.dscp_mode)
cm_info->user_pri =
irdma_get_egress_vlan_prio(child_listen_node->loc_addr,
cm_info->user_pri,
true);
ret = irdma_manage_qhash(iwdev, cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@ -1932,7 +1946,7 @@ irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
err = irdma_send_reset(cm_node);
if (err) {
cm_node->state = IRDMA_CM_STATE_CLOSED;
irdma_debug(iwdev_to_idev(cm_node->iwdev),
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "send reset failed\n");
} else {
old_state = cm_node->state;
@ -1971,9 +1985,10 @@ irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
cm_core->stats_listen_destroyed++;
cm_core->stats_listen_nodes_destroyed++;
irdma_debug(iwdev_to_idev(listener->iwdev), IRDMA_DEBUG_CM,
"loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
listener->loc_port, listener->loc_addr, listener, listener->cm_id, listener->qhash_set,
irdma_debug(&listener->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
listener->loc_port, listener->loc_addr, listener,
listener->cm_id, listener->qhash_set,
listener->vlan_id, apbvt_del);
kfree(listener);
listener = NULL;
@ -2105,12 +2120,10 @@ irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
ah_info.ipv4_valid = true;
ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
#ifdef VIMAGE
CURVNET_SET_QUIET(vnet);
ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
ah_info.dest_ip_addr[0]);
CURVNET_RESTORE();
#endif
} else {
memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
sizeof(ah_info.dest_ip_addr));
@ -2181,8 +2194,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
cm_node->user_pri = cm_info->user_pri;
if (listener) {
if (listener->tos != cm_info->tos)
irdma_dev_warn(
&iwdev->rf->sc_dev,
irdma_dev_warn(&iwdev->ibdev,
"application TOS[%d] and remote client TOS[%d] mismatch\n",
listener->tos, cm_info->tos);
if (iwdev->vsi.dscp_mode) {
@ -2190,9 +2202,14 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
} else {
cm_node->tos = max(listener->tos, cm_info->tos);
cm_node->user_pri = rt_tos2priority(cm_node->tos);
cm_node->user_pri =
irdma_get_egress_vlan_prio(cm_info->loc_addr,
cm_node->user_pri,
cm_info->ipv4);
}
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_DCB,
"listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DCB,
"listener: TOS:[%d] UP:[%d]\n",
cm_node->tos,
cm_node->user_pri);
}
memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
@ -2247,8 +2264,8 @@ irdma_destroy_connection(struct irdma_cm_node *cm_node)
/* if the node is destroyed before connection was accelerated */
if (!cm_node->accelerated && cm_node->accept_pend) {
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"node destroyed before established\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "node destroyed before established\n");
atomic_dec(&cm_node->listener->pend_accepts_cnt);
}
if (cm_node->close_entry)
@ -2371,8 +2388,9 @@ irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
break;
case IRDMA_CM_STATE_OFFLOADED:
default:
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"bad state node state = %d\n", cm_node->state);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"bad state node state = %d\n",
cm_node->state);
break;
}
}
@ -2386,10 +2404,11 @@ static void
irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
struct irdma_puda_buf *rbuf)
{
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
__builtin_return_address(0), cm_node, cm_node->state,
cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
cm_node->loc_addr);
irdma_cleanup_retrans_entry(cm_node);
switch (cm_node->state) {
@ -2463,7 +2482,7 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
switch (cm_node->state) {
case IRDMA_CM_STATE_ESTABLISHED:
if (res_type == IRDMA_MPA_REQUEST_REJECT)
irdma_debug(iwdev_to_idev(cm_node->iwdev),
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "state for reject\n");
cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
type = IRDMA_CM_EVENT_MPA_REQ;
@ -2483,8 +2502,9 @@ irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
irdma_send_ack(cm_node);
break;
default:
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"wrong cm_node state =%d\n", cm_node->state);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"wrong cm_node state =%d\n",
cm_node->state);
break;
}
irdma_create_event(cm_node, type);
@ -2528,8 +2548,8 @@ irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
err = -1;
if (err)
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"seq number err\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "seq number err\n");
return err;
}
@ -2636,7 +2656,7 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
irdma_cleanup_retrans_entry(cm_node);
/* active open */
if (irdma_check_syn(cm_node, tcph)) {
irdma_debug(iwdev_to_idev(cm_node->iwdev),
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "check syn fail\n");
return;
}
@ -2644,8 +2664,7 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
/* setup options */
err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
if (err) {
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"cm_node=%p tcp_options failed\n",
cm_node);
break;
@ -2655,8 +2674,7 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
irdma_send_ack(cm_node); /* ACK for the syn_ack */
err = irdma_send_mpa_request(cm_node);
if (err) {
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"cm_node=%p irdma_send_mpa_request failed\n",
cm_node);
break;
@ -2839,7 +2857,7 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
unsigned long flags;
/* cannot have multiple matching listeners */
listener = irdma_find_listener(cm_core, cm_info->loc_addr,
listener = irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
cm_info->loc_port, cm_info->vlan_id,
IRDMA_CM_LISTENER_EITHER_STATE);
if (listener &&
@ -2961,8 +2979,8 @@ irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
cm_node->state = IRDMA_CM_STATE_CLOSED;
if (irdma_send_reset(cm_node))
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"send reset failed\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "send reset failed\n");
return ret;
}
@ -3008,9 +3026,8 @@ irdma_cm_close(struct irdma_cm_node *cm_node)
break;
case IRDMA_CM_STATE_OFFLOADED:
if (cm_node->send_entry)
irdma_debug(iwdev_to_idev(cm_node->iwdev),
IRDMA_DEBUG_CM,
"CM send_entry in OFFLOADED state\n");
irdma_debug(&cm_node->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "CM send_entry in OFFLOADED state\n");
irdma_rem_ref_cm_node(cm_node);
break;
}
@ -3059,8 +3076,8 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
cm_info.user_pri = (vtag & EVL_PRI_MASK) >>
VLAN_PRIO_SHIFT;
cm_info.vlan_id = vtag & EVL_VLID_MASK;
irdma_debug(iwdev_to_idev(cm_core->iwdev),
IRDMA_DEBUG_CM, "vlan_id=%d\n",
irdma_debug(&cm_core->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"vlan_id=%d\n",
cm_info.vlan_id);
} else {
cm_info.vlan_id = 0xFFFF;
@ -3096,12 +3113,13 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
listener = irdma_find_listener(cm_core,
cm_info.loc_addr,
cm_info.ipv4,
cm_info.loc_port,
cm_info.vlan_id,
IRDMA_CM_LISTENER_ACTIVE_STATE);
if (!listener) {
cm_info.cm_id = NULL;
irdma_debug(iwdev_to_idev(cm_core->iwdev),
irdma_debug(&cm_core->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "no listener found\n");
return;
}
@ -3110,7 +3128,7 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
listener);
if (!cm_node) {
irdma_debug(iwdev_to_idev(cm_core->iwdev),
irdma_debug(&cm_core->iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "allocate node failed\n");
atomic_dec(&listener->refcnt);
return;
@ -3195,15 +3213,10 @@ irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
void
irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
{
unsigned long flags;
if (!cm_core)
return;
spin_lock_irqsave(&cm_core->ht_lock, flags);
if (timer_pending(&cm_core->tcp_timer))
del_timer_sync(&cm_core->tcp_timer);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
del_timer_sync(&cm_core->tcp_timer);
destroy_workqueue(cm_core->event_wq);
cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
@ -3344,8 +3357,9 @@ irdma_cm_disconn(struct irdma_qp *iwqp)
spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"qp_id %d is already freed\n", iwqp->ibqp.qp_num);
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "qp_id %d is already freed\n",
iwqp->ibqp.qp_num);
kfree(work);
return;
}
@ -3368,8 +3382,7 @@ irdma_qp_disconnect(struct irdma_qp *iwqp)
iwqp->active_conn = 0;
/* close the CM node down if it is still active */
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"Call close API\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "Call close API\n");
irdma_cm_close(iwqp->cm_node);
}
@ -3473,7 +3486,7 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
IW_CM_EVENT_DISCONNECT,
disconn_status);
if (err)
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"disconnect event failed: - cm_id = %p\n",
cm_id);
}
@ -3482,7 +3495,7 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
err = irdma_send_cm_event(iwqp->cm_node, cm_id,
IW_CM_EVENT_CLOSE, 0);
if (err)
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"close event failed: - cm_id = %p\n",
cm_id);
irdma_qp_disconnect(iwqp);
@ -3565,8 +3578,8 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
irdma_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
NULL);
}
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"Accept vlan_id=%d\n", cm_node->vlan_id);
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "Accept vlan_id=%d\n",
cm_node->vlan_id);
if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
ret = -EINVAL;
@ -3646,7 +3659,7 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->rts_ae_rcvd,
IRDMA_MAX_TIMEOUT);
if (!wait_ret) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
cm_node, cm_node->loc_port,
cm_node->rem_port, cm_node->cm_id);
@ -3664,7 +3677,7 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->accept_pend = 0;
}
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
@ -3770,17 +3783,20 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
raddr6->sin6_addr.__u6_addr.__u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
cm_info.rem_port = ntohs(raddr6->sin6_port);
irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
NULL);
irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
}
cm_info.cm_id = cm_id;
cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
cm_info.tos = cm_id->tos;
if (iwdev->vsi.dscp_mode)
if (iwdev->vsi.dscp_mode) {
cm_info.user_pri =
iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
else
} else {
cm_info.user_pri = rt_tos2priority(cm_id->tos);
cm_info.user_pri = irdma_get_egress_vlan_prio(cm_info.loc_addr,
cm_info.user_pri,
cm_info.ipv4);
}
if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
return -ENOMEM;
@ -3788,8 +3804,9 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
irdma_qp_add_qos(&iwqp->sc_qp);
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_DCB,
"TOS:[%d] UP:[%d]\n", cm_id->tos, cm_info.user_pri);
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DCB,
"TOS:[%d] UP:[%d]\n", cm_id->tos,
cm_info.user_pri);
ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
&cm_node);
@ -3826,7 +3843,7 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto err;
}
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
@ -3835,12 +3852,12 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err:
if (cm_info.ipv4)
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"connect() FAILED: dest addr=%pI4",
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "connect() FAILED: dest addr=%pI4",
cm_info.rem_addr);
else
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"connect() FAILED: dest addr=%pI6",
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "connect() FAILED: dest addr=%pI6",
cm_info.rem_addr);
irdma_rem_ref_cm_node(cm_node);
iwdev->cm_core.stats_connect_errs++;
@ -3911,8 +3928,8 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
&cm_info);
if (!cm_listen_node) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"cm_listen_node == NULL\n");
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "cm_listen_node == NULL\n");
return -ENOMEM;
}
@ -3931,6 +3948,11 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
if (err)
goto error;
} else {
if (!iwdev->vsi.dscp_mode)
cm_info.user_pri = cm_listen_node->user_pri =
irdma_get_egress_vlan_prio(cm_info.loc_addr,
cm_info.user_pri,
cm_info.ipv4);
err = irdma_manage_qhash(iwdev, &cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@ -3948,9 +3970,10 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
}
cm_id->add_ref(cm_id);
cm_listen_node->cm_core->stats_listen_created++;
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
cm_listen_node->loc_port, cm_listen_node->loc_addr, cm_listen_node, cm_listen_node->cm_id,
cm_listen_node->loc_port, cm_listen_node->loc_addr,
cm_listen_node, cm_listen_node->cm_id,
cm_listen_node->qhash_set, cm_listen_node->vlan_id);
return 0;
@ -3976,8 +3999,8 @@ irdma_destroy_listen(struct iw_cm_id *cm_id)
irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
true);
else
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
"cm_id->provider_data was NULL\n");
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_CM, "cm_id->provider_data was NULL\n");
cm_id->rem_ref(cm_id);
@ -3985,7 +4008,8 @@ irdma_destroy_listen(struct iw_cm_id *cm_id)
}
/**
* irdma_teardown_list_prep - add conn nodes slated for tear down to list
* irdma_iw_teardown_list_prep - add conn nodes slated for tear
* down to list
* @cm_core: cm's core
* @teardown_list: a list to which cm_node will be selected
* @ipaddr: pointer to ip address
@ -3993,11 +4017,11 @@ irdma_destroy_listen(struct iw_cm_id *cm_id)
* @disconnect_all: flag indicating disconnect all QPs
*/
static void
irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
struct list_head *teardown_list,
u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all)
irdma_iw_teardown_list_prep(struct irdma_cm_core *cm_core,
struct list_head *teardown_list,
u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all)
{
struct irdma_cm_node *cm_node;
int bkt;
@ -4011,6 +4035,75 @@ irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
}
}
static inline bool
irdma_ip_vlan_match(u32 *ip1, u16 vlan_id1,
bool check_vlan, u32 *ip2,
u16 vlan_id2, bool ipv4)
{
return (!check_vlan || vlan_id1 == vlan_id2) &&
!memcmp(ip1, ip2, ipv4 ? 4 : 16);
}
/**
* irdma_roce_teardown_list_prep - add conn nodes slated for
* tear down to list
* @iwdev: RDMA device
* @teardown_list: a list to which cm_node will be selected
* @ipaddr: pointer to ip address
* @nfo: pointer to cm_info structure instance
* @disconnect_all: flag indicating disconnect all QPs
*/
static void
irdma_roce_teardown_list_prep(struct irdma_device *iwdev,
struct list_head *teardown_list,
u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all)
{
struct irdma_sc_vsi *vsi = &iwdev->vsi;
struct irdma_sc_qp *sc_qp;
struct list_head *list_node;
struct irdma_qp *qp;
unsigned long flags;
int i;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_lock(&vsi->qos[i].qos_mutex);
list_for_each(list_node, &vsi->qos[i].qplist) {
u32 qp_ip[4];
sc_qp = container_of(list_node, struct irdma_sc_qp,
list);
if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
continue;
qp = sc_qp->qp_uk.back_qp;
if (!disconnect_all) {
if (nfo->ipv4)
qp_ip[0] = qp->udp_info.local_ipaddr[3];
else
memcpy(qp_ip,
&qp->udp_info.local_ipaddr[0],
sizeof(qp_ip));
}
if (disconnect_all ||
irdma_ip_vlan_match(qp_ip,
qp->udp_info.vlan_tag & EVL_VLID_MASK,
qp->udp_info.insert_vlan_tag,
ipaddr, nfo->vlan_id, nfo->ipv4)) {
spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
irdma_qp_add_ref(&qp->ibqp);
list_add(&qp->teardown_entry, teardown_list);
}
spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
}
}
mutex_unlock(&vsi->qos[i].qos_mutex);
}
}
/**
* irdma_cm_event_connected - handle connected active node
* @event: the info for cm_node of connection
@ -4054,7 +4147,7 @@ irdma_cm_event_connected(struct irdma_cm_event *event)
iwqp->rts_ae_rcvd,
IRDMA_MAX_TIMEOUT);
if (!wait_ret)
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
cm_node, cm_node->loc_port,
cm_node->rem_port, cm_node->cm_id);
@ -4092,8 +4185,9 @@ irdma_cm_event_reset(struct irdma_cm_event *event)
if (!iwqp)
return;
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"reset event %p - cm_id = %p\n", event->cm_node, cm_id);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"reset event %p - cm_id = %p\n",
event->cm_node, cm_id);
iwqp->cm_id = NULL;
irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
@ -4144,8 +4238,9 @@ irdma_cm_event_handler(struct work_struct *work)
irdma_event_connect_error(event);
break;
default:
irdma_debug(iwdev_to_idev(cm_node->iwdev), IRDMA_DEBUG_CM,
"bad event type = %d\n", event->type);
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"bad event type = %d\n",
event->type);
break;
}
@ -4174,8 +4269,9 @@ irdma_cm_post_event(struct irdma_cm_event *event)
*
* teardown QPs where source or destination addr matches ip addr
*/
void
irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
static void
irdma_cm_teardown_connections(struct irdma_device *iwdev,
u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all)
{
@ -4185,20 +4281,34 @@ irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node;
struct list_head teardown_list;
struct ib_qp_attr attr;
struct irdma_qp *qp;
INIT_LIST_HEAD(&teardown_list);
rcu_read_lock();
irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
irdma_iw_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
rcu_read_unlock();
attr.qp_state = IB_QPS_ERR;
list_for_each_safe(list_node, list_core_temp, &teardown_list) {
cm_node = container_of(list_node, struct irdma_cm_node,
teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
if (iwdev->rf->reset)
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
return;
INIT_LIST_HEAD(&teardown_list);
irdma_roce_teardown_list_prep(iwdev, &teardown_list, ipaddr, nfo, disconnect_all);
list_for_each_safe(list_node, list_core_temp, &teardown_list) {
qp = container_of(list_node, struct irdma_qp, teardown_entry);
irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
irdma_ib_qp_event(qp, IRDMA_QP_EVENT_CATASTROPHIC);
irdma_qp_rem_ref(&qp->ibqp);
}
}

View File

@ -428,9 +428,6 @@ int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
int irdma_cm_start(struct irdma_device *dev);
int irdma_cm_stop(struct irdma_device *dev);
bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);

View File

@ -2035,7 +2035,7 @@ irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
* irdma_get_stats_idx - Return stats index
* @vsi: pointer to the vsi
*/
static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){
static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){
struct irdma_stats_inst_info stats_info = {0};
struct irdma_sc_dev *dev = vsi->dev;
@ -2081,7 +2081,7 @@ irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
/* when stat allocation is not required default to fcn_id. */
vsi->stats_idx = info->fcn_id;
if (info->alloc_stats_inst) {
u8 stats_idx = irdma_get_stats_idx(vsi);
u16 stats_idx = irdma_get_stats_idx(vsi);
if (stats_idx != IRDMA_INVALID_STATS_IDX) {
vsi->stats_inst_alloc = true;
@ -2368,7 +2368,6 @@ irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src) : 0;
set_64bit_val(wqe, IRDMA_BYTE_8, temp);
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
@ -2944,7 +2943,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf,
* parses fpm commit info and copy base value
* of hmc objects in hmc_info
*/
static int
static void
irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
struct irdma_hmc_obj_info *info,
u32 *sd)
@ -3015,7 +3014,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
else
*sd = (u32)(size >> 21);
return 0;
}
/**
@ -3986,10 +3984,8 @@ irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
ceq->polarity ^= 1;
} while (cq_idx == IRDMA_INVALID_CQ_IDX);
if (cq) {
cq->cq_uk.armed = false;
if (cq)
irdma_sc_cq_ack(cq);
}
return cq;
}
@ -4216,12 +4212,12 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
case IRDMA_AE_RESET_SENT:
case IRDMA_AE_TERMINATE_SENT:
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_QP_SUSPEND_COMPLETE:
case IRDMA_AE_UDA_L4LEN_INVALID:
info->qp = true;
@ -4247,6 +4243,13 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->qp = true;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_RESOURCE_EXHAUSTION:
/*
* ae_src contains the exhausted resource with a unique decoding. Set RSVD here to prevent matching
* with a CQ or QP.
*/
ae_src = IRDMA_AE_SOURCE_RSVD;
break;
default:
break;
}
@ -4257,6 +4260,7 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->qp = true;
info->rq = true;
info->compl_ctx = compl_ctx;
info->err_rq_idx_valid = true;
break;
case IRDMA_AE_SOURCE_CQ:
case IRDMA_AE_SOURCE_CQ_0110:
@ -4272,6 +4276,10 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_WR:
info->qp = true;
info->compl_ctx = compl_ctx;
info->in_rdrsp_wr = true;
break;
case IRDMA_AE_SOURCE_IN_RR:
info->qp = true;
info->compl_ctx = compl_ctx;
@ -4300,12 +4308,11 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
* @dev: sc device struct
* @count: allocate count
*/
int
void
irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
{
db_wr32(count, dev->aeq_alloc_db);
return 0;
}
/**
@ -4547,9 +4554,9 @@ irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u16 hmc_fn_id)
ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
&commit_fpm_mem, true, wait_type);
if (!ret_code)
ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
hmc_info->hmc_obj,
&hmc_info->sd_table.sd_cnt);
irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
hmc_info->hmc_obj,
&hmc_info->sd_table.sd_cnt);
irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER",
commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE);
@ -4915,12 +4922,14 @@ cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
hmc_fpm_misc->rrf_block_size;
if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
hmc_fpm_misc->ooiscf_block_size;
if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
hmc_fpm_misc->ooiscf_block_size;
}
}
/**

View File

@ -138,6 +138,9 @@
#define IRDMA_AE_SOURCE_OUT_RR 0xd
#define IRDMA_AE_SOURCE_OUT_RR_1111 0xf
#define IRDMA_AE_SOURCE_RSRC_EXHT_Q1 0x1
#define IRDMA_AE_SOURCE_RSRC_EXHT_XT_RR 0x5
#define IRDMA_TCP_STATE_NON_EXISTENT 0
#define IRDMA_TCP_STATE_CLOSED 1
#define IRDMA_TCP_STATE_LISTEN 2
@ -193,6 +196,7 @@
#define IRDMA_CQE_QTYPE_RQ 0
#define IRDMA_CQE_QTYPE_SQ 1
#define IRDMA_QP_SW_MIN_WQSIZE 8 /* in WRs*/
#define IRDMA_QP_WQE_MIN_SIZE 32
#define IRDMA_QP_WQE_MAX_SIZE 256
#define IRDMA_QP_WQE_MIN_QUANTA 1
@ -1392,6 +1396,17 @@ enum irdma_cqp_op_type {
((_ring).tail + (_idx)) % (_ring).size \
)
#define IRDMA_GET_RING_OFFSET(_ring, _i) \
( \
((_ring).head + (_i)) % (_ring).size \
)
#define IRDMA_GET_CQ_ELEM_AT_OFFSET(_cq, _i, _cqe) \
{ \
register __u32 offset; \
offset = IRDMA_GET_RING_OFFSET((_cq)->cq_ring, _i); \
(_cqe) = (_cq)->cq_base[offset].buf; \
}
#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
( \
(_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \

View File

@ -119,13 +119,13 @@ irdma_puda_ce_handler(struct irdma_pci_f *rf,
if (status == -ENOENT)
break;
if (status) {
irdma_debug(dev, IRDMA_DEBUG_ERR, "puda status = %d\n",
status);
irdma_debug(dev, IRDMA_DEBUG_ERR, "puda status = %d\n", status);
break;
}
if (compl_error) {
irdma_debug(dev, IRDMA_DEBUG_ERR,
"puda compl_err =0x%x\n", compl_error);
"puda compl_err =0x%x\n",
compl_error);
break;
}
} while (1);
@ -176,7 +176,6 @@ irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
qp_err = irdma_ae_to_qp_err_code(info->ae_id);
qp->flush_code = qp_err.flush_code;
@ -220,6 +219,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_qp *iwqp = NULL;
struct irdma_cq *iwcq = NULL;
struct irdma_sc_qp *qp = NULL;
struct irdma_device *iwdev = rf->iwdev;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
unsigned long flags;
@ -235,9 +235,10 @@ irdma_process_aeq(struct irdma_pci_f *rf)
break;
aeqcnt++;
irdma_debug(dev, IRDMA_DEBUG_AEQ,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
"ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, info->iwarp_state, info->ae_src);
info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
info->iwarp_state, info->ae_src);
if (info->qp) {
spin_lock_irqsave(&rf->qptable_lock, flags);
@ -248,11 +249,14 @@ irdma_process_aeq(struct irdma_pci_f *rf)
if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
struct irdma_device *iwdev = rf->iwdev;
if (!iwdev->vsi.tc_change_pending)
continue;
atomic_dec(&iwdev->vsi.qp_suspend_reqs);
wake_up(&iwdev->suspend_wq);
continue;
}
irdma_debug(dev, IRDMA_DEBUG_AEQ,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
"qp_id %d is already freed\n",
info->qp_cq_id);
continue;
@ -329,8 +333,9 @@ irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TERMINATE_RECEIVED:
irdma_terminate_received(qp, info);
break;
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_CQ_OPERATION_ERROR:
irdma_dev_err(dev,
irdma_dev_err(&iwdev->ibdev,
"Processing CQ[0x%x] op error, AE 0x%04X\n",
info->qp_cq_id, info->ae_id);
spin_lock_irqsave(&rf->cqtable_lock, flags);
@ -338,7 +343,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
if (!iwcq) {
spin_unlock_irqrestore(&rf->cqtable_lock,
flags);
irdma_debug(dev, IRDMA_DEBUG_AEQ,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
"cq_id %d is already freed\n",
info->qp_cq_id);
continue;
@ -358,7 +363,12 @@ irdma_process_aeq(struct irdma_pci_f *rf)
break;
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
break;
case IRDMA_AE_RESOURCE_EXHAUSTION:
irdma_dev_err(&iwdev->ibdev,
"Resource exhaustion reason: q1 = %d xmit or rreq = %d\n",
info->ae_src == IRDMA_AE_SOURCE_RSRC_EXHT_Q1,
info->ae_src == IRDMA_AE_SOURCE_RSRC_EXHT_XT_RR);
break;
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
@ -381,13 +391,12 @@ irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
irdma_dev_err(dev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_source=%d\n",
irdma_dev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d ae_source=%d\n",
info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
ctx_info->roce_info->err_rq_idx_valid = info->rq;
ctx_info->roce_info->err_rq_idx_valid = info->err_rq_idx_valid;
if (info->rq) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
@ -397,7 +406,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
irdma_cm_disconn(iwqp);
break;
}
ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
ctx_info->iwarp_info->err_rq_idx_valid = info->err_rq_idx_valid;
if (info->rq) {
ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
@ -438,9 +447,10 @@ irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
* @t: tasklet_struct ptr
*/
static void
irdma_dpc(struct tasklet_struct *t)
irdma_dpc(unsigned long t)
{
struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
struct irdma_pci_f *rf = from_tasklet(rf, (struct tasklet_struct *)t,
dpc_tasklet);
if (rf->msix_shared)
irdma_process_ceq(rf, rf->ceqlist);
@ -453,9 +463,10 @@ irdma_dpc(struct tasklet_struct *t)
* @t: tasklet_struct ptr
*/
static void
irdma_ceq_dpc(struct tasklet_struct *t)
irdma_ceq_dpc(unsigned long t)
{
struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
struct irdma_ceq *iwceq = from_tasklet(iwceq, (struct tasklet_struct *)t,
dpc_tasklet);
struct irdma_pci_f *rf = iwceq->rf;
irdma_process_ceq(rf, iwceq);
@ -479,7 +490,7 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
u32 size;
if (!rf->msix_count) {
irdma_dev_err(&rf->sc_dev, "No MSI-X vectors reserved for RDMA.\n");
irdma_dev_err(to_ibdev(&rf->sc_dev), "No MSI-X vectors reserved for RDMA.\n");
return -EINVAL;
}
@ -605,8 +616,7 @@ irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
destroy_workqueue(rf->cqp_cmpl_wq);
status = irdma_sc_cqp_destroy(dev->cqp, free_hwcqp);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n",
status);
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n", status);
irdma_cleanup_pending_cqp_op(rf);
irdma_free_dma_mem(dev->hw, &cqp->sq);
@ -653,8 +663,7 @@ irdma_destroy_aeq(struct irdma_pci_f *rf)
aeq->sc_aeq.size = 0;
status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy AEQ failed %d\n",
status);
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy AEQ failed %d\n", status);
exit:
if (aeq->virtual_map)
@ -682,15 +691,15 @@ irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
if (status) {
irdma_debug(dev, IRDMA_DEBUG_ERR,
"CEQ destroy command failed %d\n", status);
irdma_debug(dev, IRDMA_DEBUG_ERR, "CEQ destroy command failed %d\n", status);
goto exit;
}
status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR,
"CEQ destroy completion failed %d\n", status);
"CEQ destroy completion failed %d\n",
status);
exit:
spin_lock_destroy(&iwceq->ce_lock);
spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock);
@ -776,8 +785,7 @@ irdma_destroy_ccq(struct irdma_pci_f *rf)
if (!rf->reset)
status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR, "CCQ destroy failed %d\n",
status);
irdma_debug(dev, IRDMA_DEBUG_ERR, "CCQ destroy failed %d\n", status);
irdma_free_dma_mem(dev->hw, &ccq->mem_cq);
}
@ -803,7 +811,8 @@ irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
info.privileged = privileged;
if (irdma_sc_del_hmc_obj(dev, &info, reset))
irdma_debug(dev, IRDMA_DEBUG_ERR,
"del HMC obj of type %d failed\n", obj_type);
"del HMC obj of type %d failed\n",
obj_type);
}
/**
@ -1005,8 +1014,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
}
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
if (status) {
irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n",
status);
irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n", status);
goto err_ctx;
}
@ -1114,7 +1122,7 @@ irdma_alloc_set_mac(struct irdma_device *iwdev)
&iwdev->mac_ip_table_idx);
if (!status) {
status = irdma_add_local_mac_entry(iwdev->rf,
(u8 *)IF_LLADDR(iwdev->netdev),
(const u8 *)IF_LLADDR(iwdev->netdev),
(u8)iwdev->mac_ip_table_idx);
if (status)
irdma_del_local_mac_entry(iwdev->rf,
@ -1147,14 +1155,16 @@ irdma_irq_request(struct irdma_pci_f *rf,
msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (!msix_vec->res) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"Unable to allocate bus resource int[%d]\n", rid);
"Unable to allocate bus resource int[%d]\n",
rid);
return -EINVAL;
}
err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE,
NULL, handler, argument, &msix_vec->tag);
if (err) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"Unable to register handler with %x status\n", err);
"Unable to register handler with %x status\n",
err);
status = -EINVAL;
goto fail_intr;
}
@ -1185,20 +1195,24 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
int status;
if (rf->msix_shared && !ceq_id) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
if (status)
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "AEQCEQ");
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
} else {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-CEQ-%d",
dev_name(&rf->pcidev->dev), ceq_id);
tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
status = irdma_irq_request(rf, msix_vec, irdma_ceq_handler, iwceq);
if (status)
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "CEQ");
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
}
msix_vec->ceq_id = ceq_id;
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
@ -1219,15 +1233,16 @@ irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
u32 ret = 0;
if (!rf->msix_shared) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
"irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
ret = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
if (ret)
return ret;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "irdma");
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
}
if (ret) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"aeq irq config fail\n");
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, "aeq irq config fail\n");
return -EINVAL;
}
@ -1323,7 +1338,8 @@ irdma_setup_ceq_0(struct irdma_pci_f *rf)
status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
if (status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"create ceq status = %d\n", status);
"create ceq status = %d\n",
status);
goto exit;
}
@ -1378,7 +1394,8 @@ irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
if (status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"create ceq status = %d\n", status);
"create ceq status = %d\n",
status);
goto del_ceqs;
}
spin_lock_init(&iwceq->ce_lock);
@ -1555,8 +1572,7 @@ irdma_initialize_ilq(struct irdma_device *iwdev)
info.xmit_complete = irdma_free_sqbuf;
status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR,
"ilq create fail\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ilq create fail\n");
return status;
}
@ -1585,8 +1601,7 @@ irdma_initialize_ieq(struct irdma_device *iwdev)
info.tx_buf_cnt = 4096;
status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR,
"ieq create fail\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ieq create fail\n");
return status;
}
@ -1709,6 +1724,10 @@ irdma_initialize_dev(struct irdma_pci_f *rf)
info.bar0 = rf->hw.hw_addr;
info.hmc_fn_id = rf->peer_info->pf_id;
/*
* the debug_mask is already assigned at this point through sysctl and so the value shouldn't be overwritten
*/
info.debug_mask = rf->sc_dev.debug_mask;
info.hw = &rf->hw;
status = irdma_sc_dev_init(&rf->sc_dev, &info);
if (status)
@ -1733,8 +1752,7 @@ void
irdma_rt_deinit_hw(struct irdma_device *iwdev)
{
struct irdma_sc_qp qp = {{0}};
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n",
iwdev->init_state);
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", iwdev->init_state);
switch (iwdev->init_state) {
case IP_ADDR_REGISTERED:
@ -1765,8 +1783,7 @@ irdma_rt_deinit_hw(struct irdma_device *iwdev)
iwdev->rf->reset);
break;
default:
irdma_dev_warn(&iwdev->rf->sc_dev, "bad init_state = %d\n",
iwdev->init_state);
irdma_dev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
break;
}
@ -1821,14 +1838,14 @@ irdma_setup_init_state(struct irdma_pci_f *rf)
static void
irdma_get_used_rsrc(struct irdma_device *iwdev)
{
iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,
iwdev->rf->max_pd, 0);
iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp, 0);
iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,
iwdev->rf->max_cq, 0);
iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr, 0);
iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
iwdev->rf->max_pd);
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
iwdev->rf->max_cq);
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr);
}
void
@ -1864,7 +1881,7 @@ irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
break;
case INVALID_STATE:
default:
irdma_pr_warn("bad init_state = %d\n", rf->init_state);
irdma_dev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
break;
}
}
@ -1978,8 +1995,8 @@ irdma_rt_init_hw(struct irdma_device *iwdev,
return 0;
} while (0);
irdma_dev_err(idev_to_dev(dev), "HW runtime init FAIL status = %d last cmpl = %d\n",
status, iwdev->init_state);
dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
status, iwdev->init_state);
irdma_rt_deinit_hw(iwdev);
return status;
@ -2197,10 +2214,8 @@ irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
info.maj_err_code,
info.min_err_code))
irdma_dev_err(dev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
info.op_code, info.maj_err_code,
info.min_err_code);
irdma_dev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
info.op_code, info.maj_err_code, info.min_err_code);
if (cqp_request) {
cqp_request->compl_info.maj_err_code = info.maj_err_code;
cqp_request->compl_info.min_err_code = info.min_err_code;
@ -2323,7 +2338,7 @@ irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
* @idx: the index of the mac ip address to add
*/
int
irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx)
irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
{
struct irdma_local_mac_entry_info *info;
struct irdma_cqp *iwcqp = &rf->cqp;
@ -2414,7 +2429,7 @@ irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
cqp_info->post_sq = 1;
cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_DEV, "%s: port=0x%04x\n",
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "%s: port=0x%04x\n",
(!add_port) ? "DELETE" : "ADD", accel_local_port);
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
@ -2566,7 +2581,6 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
bool wait)
{
struct irdma_qhash_table_info *info;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@ -2616,17 +2630,21 @@ irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
atomic_inc(&cm_node->refcnt);
}
if (info->ipv4_valid)
irdma_debug(dev, IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
(!mtype) ? "DELETE" : "ADD", __builtin_return_address(0),
info->dest_port, info->src_port, info->dest_ip, info->src_ip,
info->mac_addr, cminfo->vlan_id, cmnode ? cmnode : NULL);
(!mtype) ? "DELETE" : "ADD",
__builtin_return_address(0), info->dest_port,
info->src_port, info->dest_ip, info->src_ip,
info->mac_addr, cminfo->vlan_id,
cmnode ? cmnode : NULL);
else
irdma_debug(dev, IRDMA_DEBUG_CM,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
(!mtype) ? "DELETE" : "ADD", __builtin_return_address(0),
info->dest_port, info->src_port, info->dest_ip, info->src_ip,
info->mac_addr, cminfo->vlan_id, cmnode ? cmnode : NULL);
(!mtype) ? "DELETE" : "ADD",
__builtin_return_address(0), info->dest_port,
info->src_port, info->dest_ip, info->src_ip,
info->mac_addr, cminfo->vlan_id,
cmnode ? cmnode : NULL);
cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
@ -2694,8 +2712,9 @@ irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
qp->qp_uk.sq_flush_complete = true;
}
}
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_VERBS,
"qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
"qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
cqp_request->compl_info.maj_err_code, cqp_request->compl_info.min_err_code);
@ -2770,7 +2789,7 @@ irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
info.rq_minor_code = flush_code;
}
if (irdma_upload_context && irdma_upload_qp_context(iwqp, 0, 1))
irdma_print("failed to upload QP context\n");
irdma_dev_warn(&iwqp->iwdev->ibdev, "failed to upload QP context\n");
if (!iwqp->user_mode)
irdma_sched_qp_flush_work(iwqp);
}

View File

@ -192,7 +192,7 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
struct irdma_alloc_ucontext_req req = {0};
struct irdma_alloc_ucontext_resp uresp = {0};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs;
struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
@ -207,7 +207,9 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
ucontext->use_raw_attrs = true;
/* GEN_1 support for libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
@ -234,6 +236,7 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
bar_off =
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
@ -262,7 +265,7 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
return 0;
ver_error:
irdma_dev_err(&iwdev->rf->sc_dev,
irdma_dev_err(&iwdev->ibdev,
"Invalid userspace driver version detected. Detected version %d, should be %d\n",
req.userspace_ver, IRDMA_ABI_VER);
return -EINVAL;
@ -285,7 +288,7 @@ irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
struct irdma_alloc_ucontext_req req = {0};
struct irdma_alloc_ucontext_resp uresp = {0};
struct irdma_ucontext *ucontext;
struct irdma_uk_attrs *uk_attrs;
struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
@ -304,7 +307,9 @@ irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
ucontext->use_raw_attrs = true;
/* GEN_1 legacy support with libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1) {
@ -335,6 +340,7 @@ irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
bar_off =
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
@ -369,9 +375,9 @@ irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
return &ucontext->ibucontext;
ver_error:
ibdev_err(&iwdev->ibdev,
"Invalid userspace driver version detected. Detected version %d, should be %d\n",
req.userspace_ver, IRDMA_ABI_VER);
irdma_dev_err(&iwdev->ibdev,
"Invalid userspace driver version detected. Detected version %d, should be %d\n",
req.userspace_ver, IRDMA_ABI_VER);
return ERR_PTR(-EINVAL);
}
#endif
@ -440,8 +446,7 @@ irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
sc_pd = &iwpd->sc_pd;
if (udata) {
struct irdma_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
uresp.pd_id = pd_id;
@ -454,6 +459,9 @@ irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
}
spin_lock_init(&iwpd->udqp_list_lock);
INIT_LIST_HEAD(&iwpd->udqp_list);
return 0;
error:
@ -509,6 +517,9 @@ irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_u
irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
}
spin_lock_init(&iwpd->udqp_list_lock);
INIT_LIST_HEAD(&iwpd->udqp_list);
return &iwpd->ibpd;
error:
@ -547,6 +558,54 @@ irdma_dealloc_pd(struct ib_pd *ibpd)
}
#endif
/**
* irdma_find_qp_update_qs - update QS handle for UD QPs
* @rf: RDMA PCI function
* @pd: protection domain object
* @user_pri: selected user priority
*/
static void
irdma_find_qp_update_qs(struct irdma_pci_f *rf,
struct irdma_pd *pd, u8 user_pri)
{
struct irdma_qp *iwqp;
struct list_head *tmp_node, *list_node;
struct irdma_udqs_work *work;
unsigned long flags;
bool qs_change;
spin_lock_irqsave(&pd->udqp_list_lock, flags);
list_for_each_safe(list_node, tmp_node, &pd->udqp_list) {
qs_change = true;
iwqp = list_entry(list_node, struct irdma_qp, ud_list_elem);
irdma_qp_add_ref(&iwqp->ibqp);
/* check if qs_handle needs to be changed */
if (iwqp->sc_qp.qs_handle == iwqp->sc_qp.vsi->qos[user_pri].qs_handle) {
if (iwqp->ctx_info.user_pri == user_pri) {
/* qs_handle and user_pri don't change */
irdma_qp_rem_ref(&iwqp->ibqp);
continue;
}
qs_change = false;
}
/* perform qp qos change */
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
irdma_qp_rem_ref(&iwqp->ibqp);
spin_unlock_irqrestore(&pd->udqp_list_lock, flags);
return;
}
work->iwqp = iwqp;
work->user_prio = user_pri;
work->qs_change = qs_change;
INIT_WORK(&work->work, irdma_udqp_qs_worker);
if (qs_change)
irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND);
queue_work(rf->iwdev->cleanup_wq, &work->work);
}
spin_unlock_irqrestore(&pd->udqp_list_lock, flags);
}
static void
irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
const struct ib_gid_attr *sgid_attr,
@ -559,12 +618,10 @@ irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
ah_info->src_ip_addr[0] =
ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
#ifdef VIMAGE
CURVNET_SET_QUIET(vnet);
ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
ah_info->dest_ip_addr[0]);
CURVNET_RESTORE();
#endif
if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
}
@ -581,12 +638,19 @@ irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
}
}
static inline u8 irdma_get_vlan_ndev_prio(struct ifnet *ndev, u8 prio){
return prio;
}
static int
irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
struct irdma_pd *pd,
struct irdma_ah_info *ah_info,
const struct ib_gid_attr *sgid_attr,
u8 *dmac)
{
u16 vlan_prio;
if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
else
@ -601,9 +665,12 @@ irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
ah_info->vlan_tag = 0;
if (ah_info->vlan_tag < VLAN_N_VID) {
struct ifnet *ndev = sgid_attr->ndev;
ah_info->insert_vlan_tag = true;
ah_info->vlan_tag |=
(u16)rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
vlan_prio = (u16)irdma_get_vlan_ndev_prio(ndev, rt_tos2priority(ah_info->tc_tos));
ah_info->vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT;
irdma_find_qp_update_qs(iwdev->rf, pd, vlan_prio);
}
if (iwdev->roce_dcqcn_en) {
ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
@ -689,7 +756,7 @@ irdma_create_ah(struct ib_ah *ib_ah,
attr->grh.sgid_index, &sgid, &sgid_attr);
rcu_read_unlock();
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"GID lookup at idx=%d with port=%d failed\n",
attr->grh.sgid_index, attr->port_num);
err = -EINVAL;
@ -723,22 +790,20 @@ irdma_create_ah(struct ib_ah *ib_ah,
irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
dmac, ah->av.net_type);
err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
if (err)
goto err_gid_l2;
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP-OP Create AH fail");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP-OP Create AH fail");
goto err_gid_l2;
}
err = irdma_create_ah_wait(rf, sc_ah, sleep);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP create AH timed out");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
goto err_gid_l2;
}
@ -859,7 +924,7 @@ irdma_create_ah(struct ib_pd *ibpd,
attr->grh.sgid_index, &sgid, &sgid_attr);
rcu_read_unlock();
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"GID lookup at idx=%d with port=%d failed\n",
attr->grh.sgid_index, attr->port_num);
err = -EINVAL;
@ -895,22 +960,20 @@ irdma_create_ah(struct ib_pd *ibpd,
irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
dmac, ah->av.net_type);
err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
if (err)
goto err_gid_l2;
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP-OP Create AH fail");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "CQP-OP Create AH fail");
goto err_gid_l2;
}
err = irdma_create_ah_wait(rf, sc_ah, sleep);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP create AH timed out");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
goto err_gid_l2;
}
@ -990,6 +1053,7 @@ irdma_create_qp(struct ib_pd *ibpd,
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {{0}};
struct irdma_qp_host_ctx_info *ctx_info;
unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@ -1074,8 +1138,7 @@ irdma_create_qp(struct ib_pd *ibpd,
}
if (err_code) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"setup qp failed\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "setup qp failed\n");
goto error;
}
@ -1098,8 +1161,7 @@ irdma_create_qp(struct ib_pd *ibpd,
ret = irdma_sc_qp_init(qp, &init_info);
if (ret) {
err_code = -EPROTO;
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"qp_init fail\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "qp_init fail\n");
goto error;
}
@ -1130,6 +1192,10 @@ irdma_create_qp(struct ib_pd *ibpd,
}
irdma_qp_add_qos(&iwqp->sc_qp);
spin_lock_irqsave(&iwpd->udqp_list_lock, flags);
if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD)
list_add_tail(&iwqp->ud_list_elem, &iwpd->udqp_list);
spin_unlock_irqrestore(&iwpd->udqp_list_lock, flags);
}
if (udata) {
@ -1149,8 +1215,7 @@ irdma_create_qp(struct ib_pd *ibpd,
err_code = ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen));
if (err_code) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"copy_to_udata failed\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
kc_irdma_destroy_qp(&iwqp->ibqp, udata);
return ERR_PTR(err_code);
}
@ -1180,19 +1245,25 @@ irdma_destroy_qp(struct ib_qp *ibqp)
{
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
unsigned long flags;
if (iwqp->sc_qp.qp_uk.destroy_pending)
goto free_rsrc;
iwqp->sc_qp.qp_uk.destroy_pending = true;
spin_lock_irqsave(&iwqp->iwpd->udqp_list_lock, flags);
if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD)
list_del(&iwqp->ud_list_elem);
spin_unlock_irqrestore(&iwqp->iwpd->udqp_list_lock, flags);
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
irdma_modify_qp_to_err(&iwqp->sc_qp);
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
if (!iwdev->rf->reset &&
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
return -ENOTRECOVERABLE;
if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE;
free_rsrc:
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
@ -1292,6 +1363,7 @@ irdma_create_cq(struct ib_device *ibdev,
cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
ukinfo->avoid_mem_cflct = cqe_64byte_ena;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
atomic_set(&iwcq->armed, 0);
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
info.ceq_id_valid = true;
@ -1403,8 +1475,7 @@ irdma_create_cq(struct ib_device *ibdev,
info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
(u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"init cq fail\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "init cq fail\n");
err_code = -EPROTO;
goto cq_free_rsrc;
}
@ -1434,8 +1505,7 @@ irdma_create_cq(struct ib_device *ibdev,
resp.cq_size = info.cq_uk_init_info.cq_size;
if (ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen))) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"copy to user data\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy to user data\n");
err_code = -EPROTO;
goto cq_destroy;
}
@ -1666,6 +1736,9 @@ kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
}
av->net_type = kc_rdma_gid_attr_network_type(sgid_attr,
sgid_attr.gid_type,
&sgid);
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
dev_put(sgid_attr.ndev);
iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
@ -2047,37 +2120,6 @@ irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
/**
* irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
* @link_speed: netdev phy link speed
* @active_speed: IB port speed
* @active_width: IB port width
*/
void
irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
u8 *active_width)
{
if (link_speed <= SPEED_1000) {
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_SDR;
} else if (link_speed <= SPEED_10000) {
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_FDR10;
} else if (link_speed <= SPEED_20000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_DDR;
} else if (link_speed <= SPEED_25000) {
*active_width = IB_WIDTH_1X;
*active_speed = IB_SPEED_EDR;
} else if (link_speed <= SPEED_40000) {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_FDR10;
} else {
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_EDR;
}
}
/**
* irdma_query_port - get port attributes
* @ibdev: device pointer from stack
@ -2106,8 +2148,7 @@ irdma_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
&props->active_width);
ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width);
if (rdma_protocol_roce(ibdev, 1)) {
props->gid_tbl_len = 32;
@ -2304,3 +2345,37 @@ kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);
}
int
ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width)
{
struct ifnet *netdev = ibdev->get_netdev(ibdev, port_num);
u32 netdev_speed;
if (!netdev)
return -ENODEV;
netdev_speed = netdev->if_baudrate;
dev_put(netdev);
if (netdev_speed <= SPEED_1000) {
*width = IB_WIDTH_1X;
*speed = IB_SPEED_SDR;
} else if (netdev_speed <= SPEED_10000) {
*width = IB_WIDTH_1X;
*speed = IB_SPEED_FDR10;
} else if (netdev_speed <= SPEED_20000) {
*width = IB_WIDTH_4X;
*speed = IB_SPEED_DDR;
} else if (netdev_speed <= SPEED_25000) {
*width = IB_WIDTH_1X;
*speed = IB_SPEED_EDR;
} else if (netdev_speed <= SPEED_40000) {
*width = IB_WIDTH_4X;
*speed = IB_SPEED_FDR10;
} else {
*width = IB_WIDTH_4X;
*speed = IB_SPEED_EDR;
}
return 0;
}

View File

@ -133,6 +133,8 @@ extern bool irdma_upload_context;
#define IRDMA_REFLUSH BIT(2)
#define IRDMA_FLUSH_WAIT BIT(3)
#define IRDMA_IRQ_NAME_STR_LEN 64
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
@ -230,6 +232,7 @@ struct irdma_msix_vector {
u32 irq;
u32 cpu_affinity;
u32 ceq_id;
char name[IRDMA_IRQ_NAME_STR_LEN];
struct resource *res;
void *tag;
};
@ -376,6 +379,7 @@ struct irdma_device {
u16 vsi_num;
u8 rcv_wscale;
u8 iw_status;
u8 roce_rtomin;
u8 rd_fence_rate;
bool override_rcv_wnd:1;
bool override_cwnd:1;
@ -529,7 +533,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
void irdma_put_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);
int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
@ -589,6 +593,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
void irdma_udqp_qs_worker(struct work_struct *work);
bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
@ -599,4 +604,5 @@ void irdma_add_ip(struct irdma_device *iwdev);
void irdma_add_handler(struct irdma_handler *hdl);
void irdma_del_handler(struct irdma_handler *hdl);
void cqp_compl_worker(struct work_struct *work);
void irdma_cleanup_dead_qps(struct irdma_sc_vsi *vsi);
#endif /* IRDMA_MAIN_H */

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -468,16 +468,16 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @level1_only: flag for a level 1 PBLE
* @lvl: Bitmask for requested pble level
*/
static int
get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, bool level1_only)
struct irdma_pble_alloc *palloc, u8 lvl)
{
int status = 0;
status = get_lvl1_pble(pble_rsrc, palloc);
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
return status;
status = get_lvl2_pble(pble_rsrc, palloc);
@ -490,12 +490,12 @@ get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pble_cnt: #of pbles requested
* @level1_only: true if only pble level 1 to acquire
* @lvl: requested pble level mask
*/
int
irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, u32 pble_cnt,
bool level1_only)
u8 lvl)
{
int status = 0;
int max_sds = 0;
@ -509,7 +509,7 @@ irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
/*
* check first to see if we can get pble's without acquiring additional sd's
*/
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
if (!status)
goto exit;
@ -519,9 +519,9 @@ irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
if (status)
break;
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
/* if level1_only, only go through it once */
if (!status || level1_only)
if (!status || lvl == PBLE_LEVEL_1)
break;
}

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -148,7 +148,7 @@ void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc);
int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, u32 pble_cnt,
bool level1_only);
u8 lvl);
int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
struct irdma_chunk *pchunk);
int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,

View File

@ -69,8 +69,6 @@ int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
u8 op);
int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
struct irdma_stats_inst_info *stats_info);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
struct irdma_gather_stats *gather_stats,
struct irdma_gather_stats *last_gather_stats,

View File

@ -230,7 +230,6 @@ irdma_puda_dele_buf(struct irdma_sc_dev *dev,
*/
static __le64 * irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
u32 *wqe_idx){
__le64 *wqe = NULL;
int ret_code = 0;
*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
@ -238,11 +237,9 @@ static __le64 * irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
qp->swqe_polarity = !qp->swqe_polarity;
IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
if (ret_code)
return wqe;
return NULL;
wqe = qp->sq_base[*wqe_idx].elem;
return wqe;
return qp->sq_base[*wqe_idx].elem;
}
/**
@ -1516,8 +1513,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
error:
while (!list_empty(&pbufl)) {
buf = (struct irdma_puda_buf *)(&pbufl)->prev;
list_del(&buf->list);
list_add(&buf->list, rxlist);
list_move(&buf->list, rxlist);
}
if (txbuf)
irdma_puda_ret_bufpool(ieq, txbuf);

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -126,7 +126,7 @@ struct irdma_puda_rsrc_info {
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
u16 buf_size;
u16 mss; /* FIXME: Windows driver still using this */
u8 stats_idx;
u16 stats_idx;
bool stats_idx_valid:1;
int abi_ver;
};
@ -177,7 +177,7 @@ struct irdma_puda_rsrc {
u64 pmode_count;
u64 partials_handled;
u16 mss; /* FIXME: Windows driver still using this */
u8 stats_idx;
u16 stats_idx;
bool check_crc:1;
bool stats_idx_valid:1;
};

View File

@ -564,7 +564,6 @@ struct irdma_hmc_fpm_misc {
struct irdma_qos {
struct list_head qplist;
struct mutex qos_mutex; /* protect QoS attributes per QoS level */
u64 lan_qos_handle;
u32 l2_sched_node_id;
u16 qs_handle;
u8 traffic_class;
@ -990,6 +989,11 @@ struct irdma_aeqe_info {
bool in_rdrsp_wr:1;
bool out_rdrsp:1;
bool aeqe_overflow:1;
/* This flag is used to determine if we should pass the rq tail
* in the QP context for FW/HW. It is set when ae_src is rq for GEN1/GEN2
* And additionally set for inbound atomic, read and write for GEN3
*/
bool err_rq_idx_valid:1;
u8 q2_data_written;
u8 ae_src;
};
@ -1217,7 +1221,7 @@ int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info);
int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
struct irdma_aeqe_info *info);
int irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
int abi_ver);

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2016 - 2021 Intel Corporation
* Copyright (c) 2016 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -122,7 +122,7 @@ irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
* irdma_create_mg_ctx() - create a mcg context
* @info: multicast group context info
*/
static int
static void
irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
{
struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
@ -142,8 +142,6 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
ctx_idx++;
}
}
return 0;
}
/**
@ -159,7 +157,6 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
u64 scratch)
{
__le64 *wqe;
int ret_code = 0;
if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, "mg_id out of range\n");
@ -172,9 +169,7 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
return -ENOSPC;
}
ret_code = irdma_create_mg_ctx(info);
if (ret_code)
return ret_code;
irdma_create_mg_ctx(info);
set_64bit_val(wqe, IRDMA_BYTE_32, info->dma_mem_mc.pa);
set_64bit_val(wqe, IRDMA_BYTE_16,

View File

@ -641,7 +641,7 @@ irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
sge_len -= bytes_copied;
if (!quanta_bytes_remaining) {
/* Remaining inline bytes reside after the hdr */
/* Remaining inline bytes reside after hdr */
wqe += 16;
quanta_bytes_remaining = 32;
}
@ -683,8 +683,8 @@ irdma_set_mw_bind_wqe(__le64 * wqe,
* @polarity: polarity of wqe valid bit
*/
static void
irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
u8 polarity)
irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
u32 num_sges, u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
u32 quanta_bytes_remaining = 8;
@ -710,7 +710,7 @@ irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list, u32 num_sges,
if (!quanta_bytes_remaining) {
quanta_bytes_remaining = 31;
/* Remaining inline bytes reside after the hdr */
/* Remaining inline bytes reside after hdr */
if (first_quanta) {
first_quanta = false;
wqe += 16;
@ -946,58 +946,6 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
return 0;
}
/**
* irdma_uk_mw_bind - bind Memory Window
* @qp: hw qp ptr
* @info: post sq information
* @post_sq: flag to post sq
*/
int
irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq)
{
__le64 *wqe;
struct irdma_bind_window *op_info;
u64 hdr;
u32 wqe_idx;
bool local_fence;
u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.bind_window;
local_fence = info->local_fence;
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, 0, info);
if (!wqe)
return -ENOSPC;
qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
FIELD_PREP(IRDMAQPSQ_VABASEDTO,
(op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
(op_info->mem_window_type_1 ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
irdma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
if (info->push_wqe)
irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
return 0;
}
/**
* irdma_uk_post_receive - post receive wqe
* @qp: hw qp ptr
@ -1111,7 +1059,6 @@ irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
u8 arm_next = 0;
u8 arm_seq_num;
cq->armed = true;
get_64bit_val(cq->shadow_area, IRDMA_BYTE_32, &temp_val);
arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
arm_seq_num++;
@ -1173,6 +1120,58 @@ irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
return 0;
}
/**
* irdma_detect_unsignaled_cmpls - check if unsignaled cmpl is to be reported
* @cq: hw cq
* @qp: hw qp
* @info: cq poll information collected
* @wge_idx: index of the WR in SQ ring
*/
static int
irdma_detect_unsignaled_cmpls(struct irdma_cq_uk *cq,
struct irdma_qp_uk *qp,
struct irdma_cq_poll_info *info,
u32 wqe_idx)
{
u64 qword0, qword1, qword2, qword3;
__le64 *cqe, *wqe;
int i;
u32 widx;
if (qp->sq_wrtrk_array[wqe_idx].signaled == 0) {
cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
irdma_pr_err("%p %d %d\n", cqe, cq->cq_ring.head, wqe_idx);
for (i = -10; i <= 10; i++) {
IRDMA_GET_CQ_ELEM_AT_OFFSET(cq, i + cq->cq_ring.size, cqe);
get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(cqe, IRDMA_BYTE_8, &qword1);
get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
widx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
irdma_pr_err("%d %04x %p %016lx %016lx %016lx %016lx ",
i, widx, cqe, qword0, qword1, qword2, qword3);
if ((u8)FIELD_GET(IRDMA_CQ_SQ, qword3)) {
irdma_pr_err("%lx %x %x %x ",
qp->sq_wrtrk_array[widx].wrid, qp->sq_wrtrk_array[widx].wr_len,
qp->sq_wrtrk_array[widx].quanta, qp->sq_wrtrk_array[widx].signaled);
wqe = qp->sq_base[widx].elem;
get_64bit_val(wqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(wqe, IRDMA_BYTE_8, &qword1);
get_64bit_val(wqe, IRDMA_BYTE_16, &qword2);
get_64bit_val(wqe, IRDMA_BYTE_24, &qword3);
irdma_pr_err("%016lx %016lx %016lx %016lx \n",
qword0, qword1, qword2, qword3);
} else {
irdma_pr_err("\n");
}
}
return -ENOENT;
}
return 0;
}
/**
* irdma_uk_cq_poll_cmpl - get cq completion info
* @cq: hw cq
@ -1261,6 +1260,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
@ -1289,10 +1290,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
if (!qp || qp->destroy_pending) {
ret_code = -EFAULT;
goto exit;
@ -1382,6 +1380,9 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
if (!info->comp_status)
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
ret_code = irdma_detect_unsignaled_cmpls(cq, qp, info, wqe_idx);
if (ret_code != 0)
goto exit;
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
@ -1404,7 +1405,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
sw_wqe = qp->sq_base[tail].elem;
get_64bit_val(sw_wqe, IRDMA_BYTE_24,
&wqe_qword);
info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
wqe_qword);
IRDMA_RING_SET_TAIL(qp->sq_ring,
tail + qp->sq_wrtrk_array[tail].quanta);
if (info->op_type != IRDMAQP_OP_NOP) {
@ -1415,7 +1417,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
} while (1);
if (info->op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
info->minor_err == FLUSH_PROT_ERR)
info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))

View File

@ -209,7 +209,6 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@ -408,8 +407,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
bool post_sq);
int irdma_uk_post_receive(struct irdma_qp_uk *qp,
@ -519,7 +516,6 @@ struct irdma_cq_uk {
u32 cq_size;
struct irdma_ring cq_ring;
u8 polarity;
bool armed:1;
bool avoid_mem_cflct:1;
};

View File

@ -174,7 +174,7 @@ irdma_register_notifiers(struct irdma_device *iwdev)
iwdev->nb_netdevice_event.notifier_call = irdma_netdevice_event;
ret = register_netdevice_notifier(&iwdev->nb_netdevice_event);
if (ret) {
ibdev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n");
irdma_dev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n");
return ret;
}
return ret;
@ -207,8 +207,7 @@ irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
}
}
if (!cqp_request) {
irdma_debug(cqp->sc_cqp.dev, IRDMA_DEBUG_ERR,
"CQP Request Fail: No Memory");
irdma_debug(cqp->sc_cqp.dev, IRDMA_DEBUG_ERR, "CQP Request Fail: No Memory");
return NULL;
}
@ -413,6 +412,8 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
[IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
[IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
[IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
[IRDMA_OP_WS_FAILOVER_START] = "Failover Start Cmd",
[IRDMA_OP_WS_FAILOVER_COMPLETE] = "Failover Complete Cmd",
[IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
[IRDMA_OP_GEN_AE] = "Generate AE Cmd",
[IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
@ -450,8 +451,7 @@ irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
irdma_debug(dev, IRDMA_DEBUG_CQP,
"[%s Error][%s] maj=0x%x min=0x%x\n",
irdma_noncrit_err_list[i].desc,
irdma_cqp_cmd_names[cqp_cmd],
maj_err_code,
irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
min_err_code);
return false;
}
@ -474,7 +474,7 @@ irdma_handle_cqp_op(struct irdma_pci_f *rf,
bool put_cqp_request = true;
if (rf->reset)
return -EBUSY;
return 0;
irdma_get_cqp_request(cqp_request);
status = irdma_process_cqp_cmd(dev, info);
@ -494,10 +494,11 @@ irdma_handle_cqp_op(struct irdma_pci_f *rf,
if (irdma_cqp_crit_err(dev, info->cqp_cmd,
cqp_request->compl_info.maj_err_code,
cqp_request->compl_info.min_err_code))
irdma_dev_err(dev,
irdma_dev_err(&rf->iwdev->ibdev,
"[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status,
cqp_request->waiting, cqp_request->compl_info.error,
cqp_request->compl_info.maj_err_code,
cqp_request->compl_info.min_err_code);
if (put_cqp_request)
@ -559,7 +560,7 @@ irdma_cq_rem_ref(struct ib_cq *ibcq)
}
struct ib_device *
irdma_get_ibdev(struct irdma_sc_dev *dev)
to_ibdev(struct irdma_sc_dev *dev)
{
return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
}
@ -1102,7 +1103,7 @@ irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
struct irdma_gen_ae_info info = {0};
struct irdma_pci_f *rf = dev_to_rf(dev);
irdma_debug(dev, IRDMA_DEBUG_AEQ, "Generate MPA CRC AE\n");
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_AEQ, "Generate MPA CRC AE\n");
info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
info.ae_src = IRDMA_AE_SOURCE_RQ;
irdma_gen_ae(rf, qp, &info, false);
@ -1606,7 +1607,7 @@ irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
&compl_info);
node_info->qs_handle = compl_info.op_ret_val;
irdma_debug(cqp->dev, IRDMA_DEBUG_DCB,
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB,
"opcode=%d, compl_info.retval=%d\n",
compl_info.op_code, compl_info.op_ret_val);
} else {
@ -2190,7 +2191,6 @@ irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw)
ret = irdma_handle_cqp_op(rf, cqp_request);
if (ret)
goto error;
irdma_debug(dev, IRDMA_DEBUG_QP, "PRINT CONTXT QP [%d]\n", info->qp_id);
{
u32 i, j;
@ -2252,7 +2252,8 @@ irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_
irdma_debug(iwcq->sc_cq.dev, IRDMA_DEBUG_VERBS,
"%s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%lx\n",
__func__, cq_poll_info->qp_id, cq_poll_info->op_type, cq_poll_info->wr_id);
__func__, cq_poll_info->qp_id, cq_poll_info->op_type,
cq_poll_info->wr_id);
return 0;
}
@ -2299,14 +2300,10 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
__le64 *sw_wqe;
u64 wqe_qword;
u32 wqe_idx;
u8 compl_generated = 0;
unsigned long flags;
bool reschedule = false;
bool compl_generated = false;
unsigned long flags1;
#define SQ_COMPL_GENERATED (0x01)
#define RQ_COMPL_GENERATED (0x02)
spin_lock_irqsave(&iwqp->iwscq->lock, flags);
spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
if (irdma_cq_empty(iwqp->iwscq)) {
unsigned long flags2;
@ -2315,7 +2312,7 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
return;
}
@ -2329,21 +2326,31 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
/* remove the SQ WR by moving SQ tail */
IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
IRDMA_RING_SET_TAIL(*sq_ring,
sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
kfree(cmpl);
continue;
}
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
"%s: adding wr_id = 0x%lx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id);
"%s: adding wr_id = 0x%lx SQ Completion to list qp_id=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id);
list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
compl_generated |= SQ_COMPL_GENERATED;
compl_generated = true;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
if (compl_generated) {
irdma_comp_handler(iwqp->iwscq);
compl_generated = false;
}
} else {
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
reschedule = true;
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
irdma_sched_qp_flush_work(iwqp);
}
spin_lock_irqsave(&iwqp->iwrcq->lock, flags);
spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
if (irdma_cq_empty(iwqp->iwrcq)) {
unsigned long flags2;
@ -2352,7 +2359,7 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
return;
}
@ -2367,31 +2374,55 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
"%s: adding wr_id = 0x%lx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx);
__func__, cmpl->cpi.wr_id, qp->qp_id,
wqe_idx);
list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
compl_generated |= RQ_COMPL_GENERATED;
compl_generated = true;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
if (compl_generated)
irdma_comp_handler(iwqp->iwrcq);
} else {
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
reschedule = true;
}
if (reschedule)
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
irdma_sched_qp_flush_work(iwqp);
if (compl_generated) {
if (iwqp->iwscq == iwqp->iwrcq) {
irdma_comp_handler(iwqp->iwscq);
} else {
if (compl_generated & SQ_COMPL_GENERATED)
irdma_comp_handler(iwqp->iwscq);
if (compl_generated & RQ_COMPL_GENERATED)
irdma_comp_handler(iwqp->iwrcq);
}
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_VERBS,
"0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n",
compl_generated, iwqp->ibqp.qp_num);
}
}
/**
* irdma_udqp_qs_change - change qs for UD QP in a worker thread
* @iwqp: QP pointer
* @user_prio: new user priority value
* @qs_change: when false, only user priority changes, QS handle do not need to change
*/
static void
irdma_udqp_qs_change(struct irdma_qp *iwqp, u8 user_prio, bool qs_change)
{
irdma_qp_rem_qos(&iwqp->sc_qp);
if (qs_change)
iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, iwqp->ctx_info.user_pri);
iwqp->ctx_info.user_pri = user_prio;
iwqp->sc_qp.user_pri = user_prio;
if (qs_change)
if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, user_prio))
irdma_dev_warn(&iwqp->iwdev->ibdev,
"WS add failed during %s, qp_id: %x user_pri: %x",
__func__, iwqp->ibqp.qp_num, user_prio);
irdma_qp_add_qos(&iwqp->sc_qp);
}
void
irdma_udqp_qs_worker(struct work_struct *work)
{
struct irdma_udqs_work *udqs_work = container_of(work, struct irdma_udqs_work, work);
irdma_udqp_qs_change(udqs_work->iwqp, udqs_work->user_prio, udqs_work->qs_change);
if (udqs_work->qs_change)
irdma_cqp_qp_suspend_resume(&udqs_work->iwqp->sc_qp, IRDMA_OP_RESUME);
irdma_qp_rem_ref(&udqs_work->iwqp->ibqp);
kfree(udqs_work);
}

View File

@ -55,7 +55,8 @@ irdma_query_device(struct ib_device *ibdev,
return -EINVAL;
memset(props, 0, sizeof(*props));
ether_addr_copy((u8 *)&props->sys_image_guid, IF_LLADDR(iwdev->netdev));
addrconf_addr_eui48((u8 *)&props->sys_image_guid,
IF_LLADDR(iwdev->netdev));
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
@ -193,7 +194,8 @@ irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
} while (retry_cnt++ < 10);
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS, "mmap table add failed: Cannot find a unique key\n");
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"mmap table add failed: Cannot find a unique key\n");
kfree(entry);
return NULL;
@ -271,7 +273,7 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
#if __FreeBSD_version >= 1400026
rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
if (!rdma_entry) {
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"pgoff[0x%lx] does not have valid entry\n",
vma->vm_pgoff);
return -EINVAL;
@ -281,15 +283,15 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
#else
entry = irdma_find_user_mmap_entry(ucontext, vma);
if (!entry) {
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"pgoff[0x%lx] does not have valid entry\n",
vma->vm_pgoff);
return -EINVAL;
}
#endif
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
"bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
entry->mmap_flag);
irdma_debug(&ucontext->iwdev->rf->sc_dev,
IRDMA_DEBUG_VERBS, "bar_offset [0x%lx] mmap_flag [%d]\n",
entry->bar_offset, entry->mmap_flag);
pfn = (entry->bar_offset +
pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
@ -320,7 +322,7 @@ irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
}
if (ret)
irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
entry->bar_offset, entry->mmap_flag, ret);
#if __FreeBSD_version >= 1400026
@ -463,7 +465,8 @@ irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
push_wqe_mmap_key);
#else
iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
IRDMA_MMAP_IO_WC, push_wqe_mmap_key);
IRDMA_MMAP_IO_WC,
push_wqe_mmap_key);
#endif
if (!iwqp->push_wqe_mmap_entry)
return -ENOMEM;
@ -477,7 +480,8 @@ irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
#else
iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
IRDMA_MMAP_IO_NC, push_db_mmap_key);
IRDMA_MMAP_IO_NC,
push_db_mmap_key);
#endif
if (!iwqp->push_db_mmap_entry) {
#if __FreeBSD_version >= 1400026
@ -532,6 +536,11 @@ irdma_setup_umode_qp(struct ib_udata *udata,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr)
{
#if __FreeBSD_version >= 1400026
struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
#endif
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
struct irdma_create_qp_req req = {0};
unsigned long flags;
@ -540,7 +549,7 @@ irdma_setup_umode_qp(struct ib_udata *udata,
ret = ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen));
if (ret) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"ib_copy_from_data fail\n");
return ret;
}
@ -548,11 +557,6 @@ irdma_setup_umode_qp(struct ib_udata *udata,
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
iwqp->user_mode = 1;
if (req.user_wqe_bufs) {
#if __FreeBSD_version >= 1400026
struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
#else
struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
#endif
info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
@ -561,16 +565,18 @@ irdma_setup_umode_qp(struct ib_udata *udata,
if (!iwqp->iwpbl) {
ret = -ENODATA;
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"no pbl info\n");
return ret;
}
}
if (ukinfo->abi_ver <= 5) {
if (!ucontext->use_raw_attrs) {
/**
* For ABI version less than 6 passes raw sq and rq
* quanta in cap.max_send_wr and cap.max_recv_wr.
* Maintain backward compat with older ABI which passes sq and
* rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
* There is no way to compute the correct value of
* iwqp->max_send_wr/max_recv_wr in the kernel.
*/
iwqp->max_send_wr = init_attr->cap.max_send_wr;
iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
@ -737,7 +743,7 @@ irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
roce_info->wr_rdresp_en = true;
roce_info->bind_en = true;
roce_info->dcqcn_en = false;
roce_info->rtomin = 5;
roce_info->rtomin = iwdev->roce_rtomin;
roce_info->ack_credits = iwdev->roce_ackcreds;
roce_info->ird_size = dev->hw_attrs.max_hw_ird;
@ -817,6 +823,8 @@ irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
void
irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
{
if (iwqp->sc_qp.qp_uk.destroy_pending)
return;
irdma_qp_add_ref(&iwqp->ibqp);
if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
@ -1026,7 +1034,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
if (av->sgid_addr.saddr.sa_family == AF_INET6) {
if (av->net_type == RDMA_NETWORK_IPV6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
__be32 *saddr =
@ -1037,10 +1045,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->ipv4 = false;
irdma_copy_ip_ntohl(local_ip, daddr);
udp_info->arp_idx = irdma_arp_table(iwdev->rf, local_ip,
NULL, IRDMA_ARP_RESOLVE);
} else {
} else if (av->net_type == RDMA_NETWORK_IPV4) {
__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
@ -1056,6 +1061,8 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->local_ipaddr[1] = 0;
udp_info->local_ipaddr[2] = 0;
udp_info->local_ipaddr[3] = ntohl(saddr);
} else {
return -EINVAL;
}
udp_info->arp_idx =
irdma_add_arp(iwdev->rf, local_ip,
@ -1064,10 +1071,10 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ord=%d\n",
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ord);
irdma_dev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ord=%d\n",
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ord);
return -EINVAL;
}
if (attr->max_rd_atomic)
@ -1077,10 +1084,10 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ird=%d\n",
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ird);
irdma_dev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ird=%d\n",
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ird);
return -EINVAL;
}
if (attr->max_dest_rd_atomic)
@ -1098,19 +1105,20 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
irdma_debug(dev, IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
__builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state,
iwqp->iwarp_state, attr_mask);
__builtin_return_address(0), ibqp->qp_num, attr->qp_state,
iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
iwqp->ibqp.qp_type, attr_mask,
IB_LINK_LAYER_ETHERNET)) {
irdma_print("modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
irdma_dev_warn(&iwdev->ibdev,
"modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
ret = -EINVAL;
goto exit;
}
@ -1178,6 +1186,8 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
case IB_QPS_ERR:
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND);
spin_unlock_irqrestore(&iwqp->lock, flags);
info.next_iwarp_state = IRDMA_QP_STATE_SQD;
irdma_hw_modify_qp(iwdev, iwqp, &info, true);
@ -1259,8 +1269,7 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udata->outlen));
if (ret) {
irdma_remove_push_mmap_entries(iwqp);
irdma_debug(iwdev_to_idev(iwdev),
IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"copy_to_udata failed\n");
return ret;
}
@ -1314,10 +1323,11 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
offload_info = &iwqp->iwarp_info;
tcp_info = &iwqp->tcp_info;
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
irdma_debug(dev, IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
__builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state,
iwqp->last_aeq, iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
__builtin_return_address(0), ibqp->qp_num, attr->qp_state,
iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
@ -1510,8 +1520,8 @@ irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
udata->outlen));
if (err) {
irdma_remove_push_mmap_entries(iwqp);
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"copy_to_udata failed\n");
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
return err;
}
}
@ -1861,12 +1871,11 @@ irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
* irdma_setup_pbles - copy user pg address to pble's
* @rf: RDMA PCI function
* @iwmr: mr pointer for this memory registration
* @use_pbles: flag if to use pble's
* @lvl_1_only: request only level 1 pble if true
* @lvl: requested pble levels
*/
static int
irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
bool use_pbles, bool lvl_1_only)
u8 lvl)
{
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
@ -1875,9 +1884,9 @@ irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
int status;
enum irdma_pble_level level = PBLE_LEVEL_1;
if (use_pbles) {
if (lvl) {
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
lvl_1_only);
lvl);
if (status)
return status;
@ -1892,7 +1901,7 @@ irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
irdma_copy_user_pgaddrs(iwmr, pbl, level);
if (use_pbles)
if (lvl)
iwmr->pgaddrmem[0] = *pbl;
return 0;
@ -1903,12 +1912,12 @@ irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
* @iwdev: irdma device
* @req: information for q memory management
* @iwpbl: pble struct
* @use_pbles: flag to use pble
* @lvl: pble level mask
*/
static int
irdma_handle_q_mem(struct irdma_device *iwdev,
struct irdma_mem_reg_req *req,
struct irdma_pbl *iwpbl, bool use_pbles)
struct irdma_pbl *iwpbl, u8 lvl)
{
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_mr *iwmr = iwpbl->iwmr;
@ -1921,11 +1930,11 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
bool ret = true;
pg_size = iwmr->page_size;
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
if (err)
return err;
if (use_pbles)
if (lvl)
arr = palloc->level1.addr;
switch (iwmr->type) {
@ -1933,7 +1942,7 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
total = req->sq_pages + req->rq_pages;
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t) arr[total];
if (use_pbles) {
if (lvl) {
ret = irdma_check_mem_contiguous(arr, req->sq_pages,
pg_size);
if (ret)
@ -1958,7 +1967,7 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
if (!cqmr->split)
cqmr->shadow = (dma_addr_t) arr[req->cq_pages];
if (use_pbles)
if (lvl)
ret = irdma_check_mem_contiguous(arr, req->cq_pages,
pg_size);
@ -1968,12 +1977,11 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
hmc_p->addr = arr[0];
break;
default:
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"MR type error\n");
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "MR type error\n");
err = -EINVAL;
}
if (use_pbles && ret) {
if (lvl && ret) {
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
iwpbl->pbl_allocated = false;
}
@ -2233,9 +2241,9 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct irdma_mem_reg_req req = {};
u32 total, stag = 0;
u8 shadow_pgcnt = 1;
bool use_pbles = false;
unsigned long flags;
int err = -EINVAL;
u8 lvl;
int ret;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
@ -2247,7 +2255,7 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
region = ib_umem_get(pd->uobject->context, start, len, access, 0);
if (IS_ERR(region)) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"Failed to create ib_umem region\n");
return (struct ib_mr *)region;
}
@ -2286,8 +2294,8 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
goto error;
}
total = req.sq_pages + req.rq_pages;
use_pbles = (total > 2);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
if (err)
goto error;
@ -2310,8 +2318,8 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
goto error;
}
use_pbles = (req.cq_pages > 1);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
if (err)
goto error;
@ -2326,13 +2334,12 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_MEM:
use_pbles = (iwmr->page_cnt != 1);
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
if (err)
goto error;
if (use_pbles) {
if (lvl) {
ret = irdma_check_mr_contiguous(palloc,
iwmr->page_size);
if (ret) {
@ -2436,13 +2443,13 @@ irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct ib_pd *pd = iwmr->ibmr.pd;
struct ib_umem *region;
bool use_pbles;
u8 lvl;
int err;
region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
if (IS_ERR(region)) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"Failed to create ib_umem region\n");
return (struct ib_mr *)region;
}
@ -2457,13 +2464,13 @@ irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
virt);
use_pbles = (iwmr->page_cnt != 1);
lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
if (err)
goto error;
if (use_pbles) {
if (lvl) {
err = irdma_check_mr_contiguous(palloc,
iwmr->page_size);
if (err) {
@ -2727,6 +2734,7 @@ irdma_post_send(struct ib_qp *ibqp,
break;
case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG;
info.local_fence = info.read_fence;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
break;
@ -2759,8 +2767,7 @@ irdma_post_send(struct ib_qp *ibqp,
}
default:
err = -EINVAL;
irdma_debug(iwdev_to_idev(iwqp->iwdev),
IRDMA_DEBUG_VERBS,
irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"upost_send bad opcode = 0x%x\n",
ib_wr->opcode);
break;
@ -2779,6 +2786,7 @@ irdma_post_send(struct ib_qp *ibqp,
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_sched_qp_flush_work(iwqp);
}
if (err)
*bad_wr = ib_wr;
@ -2816,8 +2824,8 @@ irdma_post_recv(struct ib_qp *ibqp,
post_recv.sg_list = sg_list;
err = irdma_uk_post_receive(ukqp, &post_recv);
if (err) {
irdma_debug(iwdev_to_idev(iwqp->iwdev),
IRDMA_DEBUG_VERBS, "post_recv err %d\n",
irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"post_recv err %d\n",
err);
goto out;
}
@ -2829,6 +2837,7 @@ irdma_post_recv(struct ib_qp *ibqp,
spin_unlock_irqrestore(&iwqp->lock, flags);
if (iwqp->flush_issued)
irdma_sched_qp_flush_work(iwqp);
if (err)
*bad_wr = ib_wr;
@ -2896,8 +2905,8 @@ set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
break;
default:
qp = cq_poll_info->qp_handle;
ibdev_err(irdma_get_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
cq_poll_info->op_type);
irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
cq_poll_info->op_type);
entry->status = IB_WC_GENERAL_ERR;
}
}
@ -3109,8 +3118,9 @@ __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
return npolled;
error:
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"%s: Error polling CQ, irdma_err: %d\n",
__func__, ret);
return ret;
}
@ -3292,8 +3302,9 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
ipv4 = false;
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"qp_id=%d, IP6address=%pI6\n",
ibqp->qp_num,
ip_addr);
irdma_mcast_mac_v6(ip_addr, dmac);
} else {
@ -3301,7 +3312,7 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
ipv4 = true;
vlan_id = irdma_get_vlan_ipv4(ip_addr);
irdma_mcast_mac_v4(ip_addr, dmac);
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"qp_id=%d, IP4address=%pI4, MAC=%pM\n",
ibqp->qp_num, ip_addr, dmac);
}
@ -3431,8 +3442,8 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
if (!mc_qht_elem) {
spin_unlock_irqrestore(&rf->qh_list_lock, flags);
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"address not found MCG\n");
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_VERBS, "address not found MCG\n");
return 0;
}
@ -3444,8 +3455,8 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
IRDMA_OP_MC_DESTROY);
if (ret) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"failed MC_DESTROY MCG\n");
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_VERBS, "failed MC_DESTROY MCG\n");
spin_lock_irqsave(&rf->qh_list_lock, flags);
mcast_list_add(rf, mc_qht_elem);
spin_unlock_irqrestore(&rf->qh_list_lock, flags);
@ -3462,8 +3473,8 @@ irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
IRDMA_OP_MC_MODIFY);
if (ret) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"failed Modify MCG\n");
irdma_debug(&iwdev->rf->sc_dev,
IRDMA_DEBUG_VERBS, "failed Modify MCG\n");
return ret;
}
}
@ -3496,23 +3507,6 @@ irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
return 0;
}
static __be64 irdma_mac_to_guid(struct ifnet *ndev){
const unsigned char *mac = IF_LLADDR(ndev);
__be64 guid;
unsigned char *dst = (unsigned char *)&guid;
dst[0] = mac[0] ^ 2;
dst[1] = mac[1];
dst[2] = mac[2];
dst[3] = 0xff;
dst[4] = 0xfe;
dst[5] = mac[3];
dst[6] = mac[4];
dst[7] = mac[5];
return guid;
}
static struct ifnet *
irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
{
@ -3632,7 +3626,8 @@ irdma_init_roce_device(struct irdma_device *iwdev)
{
kc_set_roce_uverbs_cmd_mask(iwdev);
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
IF_LLADDR(iwdev->netdev));
irdma_set_device_roce_ops(&iwdev->ibdev);
if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
irdma_set_device_mcast_ops(&iwdev->ibdev);
@ -3648,7 +3643,8 @@ irdma_init_iw_device(struct irdma_device *iwdev)
struct ifnet *netdev = iwdev->netdev;
iwdev->ibdev.node_type = RDMA_NODE_RNIC;
ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, IF_LLADDR(netdev));
addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
IF_LLADDR(netdev));
iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
if (!iwdev->ibdev.iwcm)
return -ENOMEM;
@ -3675,7 +3671,6 @@ irdma_init_iw_device(struct irdma_device *iwdev)
static int
irdma_init_rdma_device(struct irdma_device *iwdev)
{
struct pci_dev *pcidev = iwdev->rf->pcidev;
int ret;
iwdev->ibdev.owner = THIS_MODULE;
@ -3693,7 +3688,7 @@ irdma_init_rdma_device(struct irdma_device *iwdev)
iwdev->ibdev.phys_port_cnt = 1;
iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev);
set_ibdev_dma_device(iwdev->ibdev, &iwdev->rf->pcidev->dev);
irdma_set_device_ops(&iwdev->ibdev);
if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
irdma_set_device_gen1_ops(&iwdev->ibdev);

View File

@ -62,12 +62,15 @@ struct irdma_ucontext {
struct list_head vma_list;
struct mutex vma_list_mutex; /* protect the vma_list */
int abi_ver;
bool legacy_mode;
bool legacy_mode:1;
bool use_raw_attrs:1;
};
struct irdma_pd {
struct ib_pd ibpd;
struct irdma_sc_pd sc_pd;
struct list_head udqp_list;
spinlock_t udqp_list_lock;
};
struct irdma_av {
@ -184,6 +187,15 @@ struct disconn_work {
struct irdma_qp *iwqp;
};
struct if_notify_work {
struct work_struct work;
struct irdma_device *iwdev;
u32 ipaddr[4];
u16 vlan_id;
bool ipv4:1;
bool ifup:1;
};
struct iw_cm_id;
struct irdma_qp_kmode {
@ -220,6 +232,7 @@ struct irdma_qp {
struct irdma_ah roce_ah;
struct list_head teardown_entry;
struct list_head ud_list_elem;
atomic_t refcnt;
struct iw_cm_id *cm_id;
struct irdma_cm_node *cm_node;
@ -261,6 +274,13 @@ struct irdma_qp {
u8 pau_mode : 1;
};
struct irdma_udqs_work {
struct work_struct work;
struct irdma_qp *iwqp;
u8 user_prio;
bool qs_change:1;
};
enum irdma_mmap_flag {
IRDMA_MMAP_IO_NC,
IRDMA_MMAP_IO_WC,

View File

@ -83,7 +83,6 @@ irdma_alloc_node(struct irdma_sc_vsi *vsi,
if (!node->rel_bw)
node->rel_bw = 1;
node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
} else {
node->rel_bw = 1;
@ -383,7 +382,6 @@ irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].traffic_class == traffic_class) {
vsi->qos[i].qs_handle = tc_node->qs_handle;
vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle;
vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
vsi->qos[i].valid = true;
}

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -52,7 +52,6 @@ struct irdma_ws_node {
struct list_head siblings;
struct list_head child_list_head;
struct irdma_ws_node *parent;
u64 lan_qs_handle; /* opaque handle used by LAN */
u32 l2_sched_node_id;
u16 index;
u16 qs_handle;

View File

@ -118,7 +118,7 @@ do { \
irdma_debug(dev, mask, "%s\n", desc); \
irdma_debug(dev, mask, "starting address virt=%p phy=%lxh\n", buf, irdma_get_virt_to_phy(buf)); \
for (i = 0; i < size ; i += 8) \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)buf)[i / 8]); \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)(buf))[i / 8]); \
} while(0)
#define irdma_debug(h, m, s, ...) \
@ -130,11 +130,12 @@ do { \
printf("irdma " s, ##__VA_ARGS__); \
} \
} while (0)
#define irdma_dev_err(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_err(ibdev, fmt, ...) \
pr_err("%s:%s:%d ERR "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
#define irdma_dev_warn(ibdev, fmt, ...) \
pr_warn("%s:%s:%d WARN "fmt, (ibdev)->name, __func__, __LINE__, ##__VA_ARGS__)
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
#define ibdev_err(ibdev, fmt, ...) printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
@ -245,6 +246,6 @@ void irdma_unmap_vm_page_list(struct irdma_hw *hw, u64 *pg_arr, u32 pg_cnt);
int irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
u64 *pg_arr, u32 pg_cnt);
struct ib_device *irdma_get_ibdev(struct irdma_sc_dev *dev);
struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
#endif /* _ICRDMA_OSDEP_H_ */