irdma(4): Upgrade driver to 1.1.5-k

This is to upgrade current irdma driver version (in support of RDMA on
Intel(R) Ethernet Controller E810) to 1.1.5-k

change summary:
- refactor defines for hardware registers
- rereg_mr verb added in libirdma
- fix print warning during compilation
- rt_ros2priority macro fix
- irdma.4 validated with mandoc
- fixing nd6_resolve usage
- added libirdma_query_device
- sysctl for irdma version
- aeq_alloc_db fix
- dwork_flush protected with qp refcount
- PFC fixes

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:	erj@
Sponsored by:	Intel Corporation
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D36944
This commit is contained in:
Bartosz Sobczak 2022-12-21 17:10:15 -08:00 committed by Eric Joyner
parent 3fe0cb6695
commit 777e472cd8
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
42 changed files with 4308 additions and 4272 deletions

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (C) 2019 - 2020 Intel Corporation
* Copyright (C) 2019 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -145,6 +145,14 @@ struct irdma_ureg_mr {
__u16 rq_pages;
__u16 sq_pages;
};
struct irdma_urereg_mr {
struct ibv_rereg_mr ibv_cmd;
__u16 reg_type; /* enum irdma_memreg_type */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct irdma_ucreate_ah_resp {
struct ibv_create_ah_resp ibv_resp;

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2020 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -50,11 +50,11 @@ enum i40iw_device_caps_const {
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
};
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
* Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -39,13 +39,15 @@
#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
#define MAKEMASK(m, s) ((m) << (s))
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
IRDMA_GEN_RSVD = 0,
IRDMA_GEN_1 = 1,
IRDMA_GEN_2 = 2,
IRDMA_GEN_MAX = 2,
};
struct irdma_uk_attrs {
@ -58,8 +60,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
u16 max_hw_wq_size;
u16 min_sw_wq_size;
u16 min_hw_wq_size;
u8 hw_rev;
};
@ -68,6 +69,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
u64 page_size_cap;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -75,7 +75,6 @@
#define IRDMA_CQE_QTYPE_RQ 0
#define IRDMA_CQE_QTYPE_SQ 1
#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
#define IRDMA_QP_WQE_MIN_SIZE 32
#define IRDMA_QP_WQE_MAX_SIZE 256
#define IRDMA_QP_WQE_MIN_QUANTA 1
@ -85,9 +84,11 @@
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
#define IRDMA_FEATURE_RTS_AE 1ULL
#define IRDMA_FEATURE_CQ_RESIZE 2ULL
#define IRDMA_FEATURE_RELAX_RQ_ORDER 4ULL
#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
#define IRDMA_FEATURE_RELAX_RQ_ORDER BIT_ULL(2)
#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
@ -106,262 +107,198 @@
#define LS_32_1(val, bits) ((u32)((val) << (bits)))
#define RS_32_1(val, bits) ((u32)((val) >> (bits)))
#endif
#define LS_64(val, field) (((u64)(val) << field ## _S) & (field ## _M))
#define RS_64(val, field) ((u64)((val) & field ## _M) >> field ## _S)
#define LS_32(val, field) (((val) << field ## _S) & (field ## _M))
#define RS_32(val, field) (((val) & field ## _M) >> field ## _S)
#ifndef GENMASK_ULL
#define GENMASK_ULL(high, low) ((0xFFFFFFFFFFFFFFFFULL >> (64ULL - ((high) - (low) + 1ULL))) << (low))
#endif /* GENMASK_ULL */
#ifndef GENMASK
#define GENMASK(high, low) ((0xFFFFFFFFUL >> (32UL - ((high) - (low) + 1UL))) << (low))
#endif /* GENMASK */
#ifndef FIELD_PREP
#define FIELD_PREP(mask, val) (((u64)(val) << mask##_S) & (mask))
#define FIELD_GET(mask, val) (((val) & mask) >> mask##_S)
#endif /* FIELD_PREP */
#define IRDMA_CQPHC_QPCTX_S 0
#define IRDMA_CQPHC_QPCTX_M \
(0xffffffffffffffffULL << IRDMA_CQPHC_QPCTX_S)
/* iWARP QP Doorbell shadow area */
#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
#define IRDMA_QP_DBSA_HW_SQ_TAIL_S 0
#define IRDMA_QP_DBSA_HW_SQ_TAIL_M \
(0x7fffULL << IRDMA_QP_DBSA_HW_SQ_TAIL_S)
/* Completion Queue Doorbell shadow area */
#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
#define IRDMA_CQ_DBSA_CQEIDX_S 0
#define IRDMA_CQ_DBSA_CQEIDX_M (0xfffffULL << IRDMA_CQ_DBSA_CQEIDX_S)
#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
#define IRDMA_CQ_DBSA_SW_CQ_SELECT_S 0
#define IRDMA_CQ_DBSA_SW_CQ_SELECT_M \
(0x3fffULL << IRDMA_CQ_DBSA_SW_CQ_SELECT_S)
#define IRDMA_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(13, 0)
#define IRDMA_CQ_DBSA_ARM_NEXT_S 14
#define IRDMA_CQ_DBSA_ARM_NEXT_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_S)
#define IRDMA_CQ_DBSA_ARM_NEXT BIT_ULL(14)
#define IRDMA_CQ_DBSA_ARM_NEXT_SE_S 15
#define IRDMA_CQ_DBSA_ARM_NEXT_SE_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_SE_S)
#define IRDMA_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15)
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_S 16
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_M \
(0x3ULL << IRDMA_CQ_DBSA_ARM_SEQ_NUM_S)
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(17, 16)
/* CQP and iWARP Completion Queue */
#define IRDMA_CQ_QPCTX_S IRDMA_CQPHC_QPCTX_S
#define IRDMA_CQ_QPCTX_M IRDMA_CQPHC_QPCTX_M
#define IRDMA_CQ_QPCTX IRDMA_CQPHC_QPCTX
#define IRDMA_CQ_MINERR_S 0
#define IRDMA_CQ_MINERR_M (0xffffULL << IRDMA_CQ_MINERR_S)
#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
#define IRDMA_CQ_MAJERR_S 16
#define IRDMA_CQ_MAJERR_M (0xffffULL << IRDMA_CQ_MAJERR_S)
#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
#define IRDMA_CQ_WQEIDX_S 32
#define IRDMA_CQ_WQEIDX_M (0x7fffULL << IRDMA_CQ_WQEIDX_S)
#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
#define IRDMA_CQ_EXTCQE_S 50
#define IRDMA_CQ_EXTCQE_M BIT_ULL(IRDMA_CQ_EXTCQE_S)
#define IRDMA_CQ_EXTCQE BIT_ULL(50)
#define IRDMA_OOO_CMPL_S 54
#define IRDMA_OOO_CMPL_M BIT_ULL(IRDMA_OOO_CMPL_S)
#define IRDMA_OOO_CMPL BIT_ULL(54)
#define IRDMA_CQ_ERROR_S 55
#define IRDMA_CQ_ERROR_M BIT_ULL(IRDMA_CQ_ERROR_S)
#define IRDMA_CQ_ERROR BIT_ULL(55)
#define IRDMA_CQ_SQ_S 62
#define IRDMA_CQ_SQ_M BIT_ULL(IRDMA_CQ_SQ_S)
#define IRDMA_CQ_SQ BIT_ULL(62)
#define IRDMA_CQ_VALID_S 63
#define IRDMA_CQ_VALID_M BIT_ULL(IRDMA_CQ_VALID_S)
#define IRDMA_CQ_IMMVALID_S 62
#define IRDMA_CQ_IMMVALID_M BIT_ULL(IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_VALID BIT_ULL(63)
#define IRDMA_CQ_IMMVALID BIT_ULL(62)
#define IRDMA_CQ_UDSMACVALID_S 61
#define IRDMA_CQ_UDSMACVALID_M BIT_ULL(IRDMA_CQ_UDSMACVALID_S)
#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
#define IRDMA_CQ_UDVLANVALID_S 60
#define IRDMA_CQ_UDVLANVALID_M BIT_ULL(IRDMA_CQ_UDVLANVALID_S)
#define IRDMA_CQ_UDVLANVALID BIT_ULL(60)
#define IRDMA_CQ_UDSMAC_S 0
#define IRDMA_CQ_UDSMAC_M (0xffffffffffffULL << IRDMA_CQ_UDSMAC_S)
#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
#define IRDMA_CQ_UDVLAN_S 48
#define IRDMA_CQ_UDVLAN_M (0xffffULL << IRDMA_CQ_UDVLAN_S)
#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
#define IRDMA_CQ_IMMDATA_S 0
#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_IMMVALID_S 62
#define IRDMA_CQ_IMMDATA GENMASK_ULL(125, 62)
#define IRDMA_CQ_IMMDATALOW32_S 0
#define IRDMA_CQ_IMMDATALOW32_M (0xffffffffULL << IRDMA_CQ_IMMDATALOW32_S)
#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
#define IRDMA_CQ_IMMDATAUP32_S 32
#define IRDMA_CQ_IMMDATAUP32_M (0xffffffffULL << IRDMA_CQ_IMMDATAUP32_S)
#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
#define IRDMACQ_PAYLDLEN_S 0
#define IRDMACQ_PAYLDLEN_M (0xffffffffULL << IRDMACQ_PAYLDLEN_S)
#define IRDMACQ_TCPSEQNUMRTT_S 32
#define IRDMACQ_TCPSEQNUMRTT_M (0xffffffffULL << IRDMACQ_TCPSEQNUMRTT_S)
#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
#define IRDMACQ_TCPSQN_ROCEPSN_RTT_TS_S 32
#define IRDMACQ_TCPSQN_ROCEPSN_RTT_TS GENMASK_ULL(63, 32)
#define IRDMACQ_INVSTAG_S 0
#define IRDMACQ_INVSTAG_M (0xffffffffULL << IRDMACQ_INVSTAG_S)
#define IRDMACQ_INVSTAG GENMASK_ULL(31, 0)
#define IRDMACQ_QPID_S 32
#define IRDMACQ_QPID_M (0xffffffULL << IRDMACQ_QPID_S)
#define IRDMACQ_QPID GENMASK_ULL(55, 32)
#define IRDMACQ_UDSRCQPN_S 0
#define IRDMACQ_UDSRCQPN_M (0xffffffffULL << IRDMACQ_UDSRCQPN_S)
#define IRDMACQ_UDSRCQPN GENMASK_ULL(31, 0)
#define IRDMACQ_PSHDROP_S 51
#define IRDMACQ_PSHDROP_M BIT_ULL(IRDMACQ_PSHDROP_S)
#define IRDMACQ_PSHDROP BIT_ULL(51)
#define IRDMACQ_STAG_S 53
#define IRDMACQ_STAG_M BIT_ULL(IRDMACQ_STAG_S)
#define IRDMACQ_STAG BIT_ULL(53)
#define IRDMACQ_IPV4_S 53
#define IRDMACQ_IPV4_M BIT_ULL(IRDMACQ_IPV4_S)
#define IRDMACQ_IPV4 BIT_ULL(53)
#define IRDMACQ_SOEVENT_S 54
#define IRDMACQ_SOEVENT_M BIT_ULL(IRDMACQ_SOEVENT_S)
#define IRDMACQ_SOEVENT BIT_ULL(54)
#define IRDMACQ_OP_S 56
#define IRDMACQ_OP_M (0x3fULL << IRDMACQ_OP_S)
#define IRDMACQ_OP GENMASK_ULL(61, 56)
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
/* iwarp QP SQ WQE common fields */
#define IRDMAQPSQ_OPCODE_S 32
#define IRDMAQPSQ_OPCODE_M (0x3fULL << IRDMAQPSQ_OPCODE_S)
#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMAQPSQ_COPY_HOST_PBL_S 43
#define IRDMAQPSQ_COPY_HOST_PBL_M BIT_ULL(IRDMAQPSQ_COPY_HOST_PBL_S)
#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
#define IRDMAQPSQ_ADDFRAGCNT_S 38
#define IRDMAQPSQ_ADDFRAGCNT_M (0xfULL << IRDMAQPSQ_ADDFRAGCNT_S)
#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
#define IRDMAQPSQ_PUSHWQE_S 56
#define IRDMAQPSQ_PUSHWQE_M BIT_ULL(IRDMAQPSQ_PUSHWQE_S)
#define IRDMAQPSQ_PUSHWQE BIT_ULL(56)
#define IRDMAQPSQ_STREAMMODE_S 58
#define IRDMAQPSQ_STREAMMODE_M BIT_ULL(IRDMAQPSQ_STREAMMODE_S)
#define IRDMAQPSQ_STREAMMODE BIT_ULL(58)
#define IRDMAQPSQ_WAITFORRCVPDU_S 59
#define IRDMAQPSQ_WAITFORRCVPDU_M BIT_ULL(IRDMAQPSQ_WAITFORRCVPDU_S)
#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59)
#define IRDMAQPSQ_READFENCE_S 60
#define IRDMAQPSQ_READFENCE_M BIT_ULL(IRDMAQPSQ_READFENCE_S)
#define IRDMAQPSQ_READFENCE BIT_ULL(60)
#define IRDMAQPSQ_LOCALFENCE_S 61
#define IRDMAQPSQ_LOCALFENCE_M BIT_ULL(IRDMAQPSQ_LOCALFENCE_S)
#define IRDMAQPSQ_LOCALFENCE BIT_ULL(61)
#define IRDMAQPSQ_UDPHEADER_S 61
#define IRDMAQPSQ_UDPHEADER_M BIT_ULL(IRDMAQPSQ_UDPHEADER_S)
#define IRDMAQPSQ_UDPHEADER BIT_ULL(61)
#define IRDMAQPSQ_L4LEN_S 42
#define IRDMAQPSQ_L4LEN_M ((u64)0xF << IRDMAQPSQ_L4LEN_S)
#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42)
#define IRDMAQPSQ_SIGCOMPL_S 62
#define IRDMAQPSQ_SIGCOMPL_M BIT_ULL(IRDMAQPSQ_SIGCOMPL_S)
#define IRDMAQPSQ_SIGCOMPL BIT_ULL(62)
#define IRDMAQPSQ_VALID_S 63
#define IRDMAQPSQ_VALID_M BIT_ULL(IRDMAQPSQ_VALID_S)
#define IRDMAQPSQ_VALID BIT_ULL(63)
#define IRDMAQPSQ_FRAG_TO_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_FRAG_TO_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPSQ_FRAG_TO IRDMA_CQPHC_QPCTX
#define IRDMAQPSQ_FRAG_VALID_S 63
#define IRDMAQPSQ_FRAG_VALID_M BIT_ULL(IRDMAQPSQ_FRAG_VALID_S)
#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63)
#define IRDMAQPSQ_FRAG_LEN_S 32
#define IRDMAQPSQ_FRAG_LEN_M (0x7fffffffULL << IRDMAQPSQ_FRAG_LEN_S)
#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32)
#define IRDMAQPSQ_FRAG_STAG_S 0
#define IRDMAQPSQ_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_FRAG_STAG_S)
#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0
#define IRDMAQPSQ_GEN1_FRAG_LEN_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_LEN_S)
#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0)
#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32
#define IRDMAQPSQ_GEN1_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_STAG_S)
#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32)
#define IRDMAQPSQ_REMSTAGINV_S 0
#define IRDMAQPSQ_REMSTAGINV_M (0xffffffffULL << IRDMAQPSQ_REMSTAGINV_S)
#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQKEY_S 0
#define IRDMAQPSQ_DESTQKEY_M (0xffffffffULL << IRDMAQPSQ_DESTQKEY_S)
#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQPN_S 32
#define IRDMAQPSQ_DESTQPN_M (0x00ffffffULL << IRDMAQPSQ_DESTQPN_S)
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
#define IRDMAQPSQ_AHID_S 0
#define IRDMAQPSQ_AHID_M (0x0001ffffULL << IRDMAQPSQ_AHID_S)
#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
#define IRDMAQPSQ_INLINEDATAFLAG_S 57
#define IRDMAQPSQ_INLINEDATAFLAG_M BIT_ULL(IRDMAQPSQ_INLINEDATAFLAG_S)
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_INLINE_VALID_S 7
#define IRDMAQPSQ_INLINEDATALEN_S 48
#define IRDMAQPSQ_INLINEDATALEN_M \
(0xffULL << IRDMAQPSQ_INLINEDATALEN_S)
#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
#define IRDMAQPSQ_IMMDATAFLAG_S 47
#define IRDMAQPSQ_IMMDATAFLAG_M \
BIT_ULL(IRDMAQPSQ_IMMDATAFLAG_S)
#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(47)
#define IRDMAQPSQ_REPORTRTT_S 46
#define IRDMAQPSQ_REPORTRTT_M \
BIT_ULL(IRDMAQPSQ_REPORTRTT_S)
#define IRDMAQPSQ_REPORTRTT BIT_ULL(46)
#define IRDMAQPSQ_IMMDATA_S 0
#define IRDMAQPSQ_IMMDATA_M \
(0xffffffffffffffffULL << IRDMAQPSQ_IMMDATA_S)
/* rdma write */
#define IRDMAQPSQ_IMMDATA GENMASK_ULL(63, 0)
#define IRDMAQPSQ_REMSTAG_S 0
#define IRDMAQPSQ_REMSTAG_M (0xffffffffULL << IRDMAQPSQ_REMSTAG_S)
#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_REMTO_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_REMTO_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
/* memory window */
#define IRDMAQPSQ_STAGRIGHTS_S 48
#define IRDMAQPSQ_STAGRIGHTS_M (0x1fULL << IRDMAQPSQ_STAGRIGHTS_S)
#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
#define IRDMAQPSQ_VABASEDTO_S 53
#define IRDMAQPSQ_VABASEDTO_M BIT_ULL(IRDMAQPSQ_VABASEDTO_S)
#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
#define IRDMAQPSQ_MEMWINDOWTYPE_S 54
#define IRDMAQPSQ_MEMWINDOWTYPE_M BIT_ULL(IRDMAQPSQ_MEMWINDOWTYPE_S)
#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
#define IRDMAQPSQ_MWLEN_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_MWLEN_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPSQ_MWLEN IRDMA_CQPHC_QPCTX
#define IRDMAQPSQ_PARENTMRSTAG_S 32
#define IRDMAQPSQ_PARENTMRSTAG_M \
(0xffffffffULL << IRDMAQPSQ_PARENTMRSTAG_S)
#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32)
#define IRDMAQPSQ_MWSTAG_S 0
#define IRDMAQPSQ_MWSTAG_M (0xffffffffULL << IRDMAQPSQ_MWSTAG_S)
#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_BASEVA_TO_FBO_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_BASEVA_TO_FBO_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
/* Local Invalidate */
#define IRDMAQPSQ_LOCSTAG_S 0
#define IRDMAQPSQ_LOCSTAG_M (0xffffffffULL << IRDMAQPSQ_LOCSTAG_S)
#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
/* iwarp QP RQ WQE common fields */
#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S
#define IRDMAQPRQ_ADDFRAGCNT_M IRDMAQPSQ_ADDFRAGCNT_M
#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT
#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S
#define IRDMAQPRQ_VALID_M IRDMAQPSQ_VALID_M
#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID
#define IRDMAQPRQ_COMPLCTX_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPRQ_COMPLCTX_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPRQ_COMPLCTX IRDMA_CQPHC_QPCTX
#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S
#define IRDMAQPRQ_FRAG_LEN_M IRDMAQPSQ_FRAG_LEN_M
#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN
#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S
#define IRDMAQPRQ_STAG_M IRDMAQPSQ_FRAG_STAG_M
#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG
#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S
#define IRDMAQPRQ_TO_M IRDMAQPSQ_FRAG_TO_M
#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO
#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
@ -500,6 +437,12 @@
IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
}
enum irdma_protocol_used {
IRDMA_ANY_PROTOCOL = 0,
IRDMA_IWARP_PROTOCOL_ONLY = 1,
IRDMA_ROCE_PROTOCOL_ONLY = 2,
};
enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_32 = 32,
IRDMA_WQE_SIZE_64 = 64,

File diff suppressed because it is too large Load Diff

View File

@ -39,6 +39,7 @@
#include <stdlib.h>
#include "irdma_umain.h"
#include "irdma-abi.h"
#include "irdma_uquery.h"
#include "ice_devids.h"
#include "i40e_devids.h"
@ -48,7 +49,7 @@
/**
* Driver version
*/
char libirdma_version[] = "0.0.51-k";
char libirdma_version[] = "1.1.5-k";
unsigned int irdma_dbg;
@ -118,6 +119,28 @@ static struct ibv_context_ops irdma_ctx_ops = {
.detach_mcast = irdma_udetach_mcast,
};
/**
* libirdma_query_device - fill libirdma_device structure
* @ctx_in - ibv_context identifying device
* @out - libirdma_device structure to fill quered info
*
* ctx_in is not used at the moment
*/
int
libirdma_query_device(struct ibv_context *ctx_in, struct libirdma_device *out)
{
if (!out)
return EIO;
if (sizeof(out->lib_ver) < sizeof(libirdma_version))
return ERANGE;
out->query_ver = 1;
snprintf(out->lib_ver, min(sizeof(libirdma_version), sizeof(out->lib_ver)),
"%s", libirdma_version);
return 0;
}
static int
irdma_init_context(struct verbs_device *vdev,
struct ibv_context *ctx, int cmd_fd)
@ -147,6 +170,7 @@ irdma_init_context(struct verbs_device *vdev,
iwvctx->uk_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
iwvctx->uk_attrs.min_hw_wq_size = IRDMA_MIN_WQ_SIZE_GEN2;
iwvctx->abi_ver = IRDMA_ABI_VER;
mmap_key = resp.db_mmap_key;
@ -180,8 +204,6 @@ irdma_cleanup_context(struct verbs_device *device,
{
struct irdma_uvcontext *iwvctx;
printf("%s %s CALL\n", __FILE__, __func__);
iwvctx = container_of(ibctx, struct irdma_uvcontext, ibv_ctx);
irdma_ufree_pd(&iwvctx->iwupd->ibv_pd);
munmap(iwvctx->db, IRDMA_HW_PAGE_SIZE);

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (C) 2019 - 2020 Intel Corporation
* Copyright (C) 2019 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -46,12 +46,6 @@
#include "i40iw_hw.h"
#include "irdma_user.h"
#ifndef likely
#define likely(x) __builtin_expect((x), 1)
#endif
#ifndef unlikely
#define unlikely(x) __builtin_expect((x), 0)
#endif
#define PFX "libirdma-"
#define IRDMA_BASE_PUSH_PAGE 1
@ -62,13 +56,12 @@
LIST_HEAD(list_head, irdma_cq_buf);
LIST_HEAD(list_head_cmpl, irdma_cmpl_gen);
enum irdma_supported_wc_flags {
IRDMA_CQ_SUPPORTED_WC_FLAGS = IBV_WC_EX_WITH_BYTE_LEN
enum irdma_supported_wc_flags_ex {
IRDMA_STANDARD_WC_FLAGS_EX = IBV_WC_EX_WITH_BYTE_LEN
| IBV_WC_EX_WITH_IMM
| IBV_WC_EX_WITH_QP_NUM
| IBV_WC_EX_WITH_SRC_QP
| IBV_WC_EX_WITH_SL
| IBV_WC_EX_WITH_COMPLETION_TIMESTAMP,
| IBV_WC_EX_WITH_SL,
};
struct irdma_udevice {
@ -103,8 +96,11 @@ struct irdma_cq_buf {
LIST_ENTRY(irdma_cq_buf) list;
struct irdma_cq_uk cq;
struct verbs_mr vmr;
size_t buf_size;
};
extern pthread_mutex_t sigusr1_wait_mutex;
struct verbs_cq {
union {
struct ibv_cq cq;
@ -128,7 +124,6 @@ struct irdma_ucq {
bool arm_sol;
bool skip_sol;
int comp_vector;
uint32_t report_rtt;
struct irdma_uqp *uqp;
struct irdma_cq_uk cq;
struct list_head resize_list;
@ -139,7 +134,6 @@ struct irdma_ucq {
struct irdma_uqp {
struct ibv_qp ibv_qp;
struct ibv_qp_attr attr;
struct irdma_ucq *send_cq;
struct irdma_ucq *recv_cq;
struct verbs_mr vmr;
@ -154,16 +148,10 @@ struct irdma_uqp {
struct ibv_recv_wr *pend_rx_wr;
struct irdma_qp_uk qp;
enum ibv_qp_type qp_type;
enum ibv_qp_attr_mask attr_mask;
struct irdma_sge *recv_sges;
pthread_t flush_thread;
};
struct irdma_umr {
struct verbs_mr vmr;
uint32_t acc_flags;
};
/* irdma_uverbs.c */
int irdma_uquery_device_ex(struct ibv_context *context,
const struct ibv_query_device_ex_input *input,
@ -176,6 +164,10 @@ int irdma_uquery_device(struct ibv_context *, struct ibv_device_attr *);
struct ibv_mr *irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int access);
int irdma_udereg_mr(struct ibv_mr *mr);
int irdma_urereg_mr(struct verbs_mr *mr, int flags, struct ibv_pd *pd, void *addr,
size_t length, int access);
struct ibv_mw *irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
int irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
struct ibv_mw_bind *mw_bind);

View File

@ -0,0 +1,50 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (C) 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_UQUERY_H
#define IRDMA_UQUERY_H
#include <infiniband/verbs.h>
#include "osdep.h"
struct libirdma_device {
uint32_t query_ver;
char lib_ver[32];
uint8_t rsvd[128];
};
int libirdma_query_device(struct ibv_context *ctx_in, struct libirdma_device *out);
#endif /* IRDMA_UQUERY_H */

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -53,7 +53,7 @@
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
@ -80,7 +80,97 @@
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
#define IRDMA_FLUSH_MAJOR_ERR 1
#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
/* Async Events codes */
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
#define IRDMA_AE_AMP_BAD_QP 0x0104
#define IRDMA_AE_AMP_BAD_PD 0x0105
#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
#define IRDMA_AE_AMP_TO_WRAP 0x010a
#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
#define IRDMA_AE_BAD_CLOSE 0x0201
#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
#define IRDMA_AE_DDP_NO_L_BIT 0x0308
#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
#define IRDMA_AE_RESET_NOT_SENT 0x0603
#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@ -121,6 +211,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@ -137,9 +228,15 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
FLUSH_RETRY_EXC_ERR,
};
enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR,
IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@ -195,7 +292,7 @@ struct irdma_sge {
struct irdma_ring {
volatile u32 head;
volatile u32 tail;
volatile u32 tail; /* effective tail */
u32 size;
};
@ -215,14 +312,6 @@ struct irdma_post_send {
u32 ah_id;
};
struct irdma_post_inline_send {
void *data;
u32 len;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
@ -235,12 +324,6 @@ struct irdma_rdma_write {
struct irdma_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
struct irdma_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
@ -283,8 +366,6 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
struct irdma_inline_rdma_write inline_rdma_write;
struct irdma_post_inline_send inline_send;
} op;
};
@ -292,7 +373,6 @@ struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
@ -303,6 +383,7 @@ struct irdma_cq_poll_info {
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
@ -312,6 +393,17 @@ struct irdma_cq_poll_info {
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
union {
u32 tcp_sqn;
u32 roce_psn;
u32 rtt;
u32 raw;
} stat;
};
struct qp_err_code {
enum irdma_flush_opcode flush_code;
enum irdma_qp_event_type event_type;
};
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
@ -336,7 +428,7 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool post_sq);
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
@ -354,6 +446,12 @@ int irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
u8 *rq_shift);
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@ -407,7 +505,6 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
pthread_spinlock_t *lock;
bool force_fence;
u8 dbg_rq_flushed;
u16 ord_cnt;
u8 sq_flush_seen;
@ -444,8 +541,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u32 sq_depth;
u32 rq_depth;
u8 first_sq_wq;
u8 type;
u8 sq_shift;
u8 rq_shift;
u8 rd_fence_rate;
int abi_ver;
bool legacy_mode;
@ -462,7 +563,7 @@ struct irdma_cq_uk_init_info {
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
u16 *quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
@ -471,9 +572,81 @@ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
{
struct qp_err_code qp_err = { 0 };
switch (ae_id) {
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG:
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD:
case IRDMA_AE_AMP_BAD_QP:
case IRDMA_AE_AMP_BAD_STAG_KEY:
case IRDMA_AE_AMP_BAD_STAG_INDEX:
case IRDMA_AE_AMP_TO_WRAP:
case IRDMA_AE_PRIV_OPERATION_DENIED:
qp_err.flush_code = FLUSH_PROT_ERR;
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_UDA_XMIT_BAD_PD:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
case IRDMA_AE_UDA_L4LEN_INVALID:
case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
qp_err.flush_code = FLUSH_LOC_LEN_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
qp_err.flush_code = FLUSH_MW_BIND_ERR;
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_IB_INVALID_REQUEST:
qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp_err.flush_code = FLUSH_REM_OP_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp_err.flush_code = FLUSH_FATAL_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
default:
qp_err.flush_code = FLUSH_GENERAL_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
}
return qp_err;
}
#endif /* IRDMA_USER_H */

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (C) 2019 - 2021 Intel Corporation
* Copyright (C) 2019 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -46,6 +46,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <stdbool.h>
#include <infiniband/opcode.h>
#include "irdma_umain.h"
#include "abi.h"
@ -137,7 +138,7 @@ irdma_ualloc_pd(struct ibv_context *context)
struct irdma_upd *iwupd;
int err;
iwupd = malloc(sizeof(*iwupd));
iwupd = calloc(1, sizeof(*iwupd));
if (!iwupd)
return NULL;
@ -163,6 +164,7 @@ irdma_ualloc_pd(struct ibv_context *context)
int
irdma_ufree_pd(struct ibv_pd *pd)
{
struct irdma_uvcontext *iwvctx = container_of(pd->context, struct irdma_uvcontext, ibv_ctx);
struct irdma_upd *iwupd;
int ret;
@ -188,27 +190,44 @@ struct ibv_mr *
irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int access)
{
struct irdma_umr *umr;
struct irdma_ureg_mr cmd;
struct verbs_mr *vmr;
struct irdma_ureg_mr cmd = {};
struct ibv_reg_mr_resp resp;
int err;
umr = malloc(sizeof(*umr));
if (!umr)
vmr = malloc(sizeof(*vmr));
if (!vmr)
return NULL;
cmd.reg_type = IRDMA_MEMREG_TYPE_MEM;
err = ibv_cmd_reg_mr(pd, addr, length,
(uintptr_t)addr, access, &umr->vmr.ibv_mr, &cmd.ibv_cmd,
(uintptr_t)addr, access, &vmr->ibv_mr, &cmd.ibv_cmd,
sizeof(cmd), &resp, sizeof(resp));
if (err) {
free(umr);
free(vmr);
errno = err;
return NULL;
}
umr->acc_flags = access;
return &umr->vmr.ibv_mr;
return &vmr->ibv_mr;
}
/*
* irdma_urereg_mr - re-register memory region @vmr: mr that was allocated @flags: bit mask to indicate which of the
* attr's of MR modified @pd: pd of the mr @addr: user address of the memory region @length: length of the memory
* @access: access allowed on this mr
*/
int
irdma_urereg_mr(struct verbs_mr *vmr, int flags, struct ibv_pd *pd,
void *addr, size_t length, int access)
{
struct irdma_urereg_mr cmd = {};
struct ibv_rereg_mr_resp resp;
cmd.reg_type = IRDMA_MEMREG_TYPE_MEM;
return ibv_cmd_rereg_mr(&vmr->ibv_mr, flags, addr, length, (uintptr_t)addr,
access, pd, &cmd.ibv_cmd, sizeof(cmd), &resp,
sizeof(resp));
}
/**
@ -218,19 +237,15 @@ irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int
irdma_udereg_mr(struct ibv_mr *mr)
{
struct irdma_umr *umr;
struct verbs_mr *vmr;
int ret;
vmr = container_of(mr, struct verbs_mr, ibv_mr);
umr = container_of(vmr, struct irdma_umr, vmr);
ret = ibv_cmd_dereg_mr(mr);
if (ret)
return ret;
free(umr);
return 0;
}
@ -245,6 +260,7 @@ irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type)
struct ibv_mw *mw;
struct ibv_alloc_mw cmd;
struct ibv_alloc_mw_resp resp;
int err;
mw = calloc(1, sizeof(*mw));
if (!mw)
@ -273,7 +289,6 @@ irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
{
struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info;
struct verbs_mr *vmr;
struct irdma_umr *umr;
struct ibv_send_wr wr = {};
struct ibv_send_wr *bad_wr;
@ -284,11 +299,10 @@ irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
if (bind_info->mr) {
vmr = verbs_get_mr(bind_info->mr);
umr = container_of(vmr, struct irdma_umr, vmr);
if (vmr->mr_type != IBV_MR_TYPE_MR)
return ENOTSUP;
if (umr->acc_flags & IBV_ACCESS_ZERO_BASED)
if (vmr->access & IBV_ACCESS_ZERO_BASED)
return EINVAL;
if (mw->pd != bind_info->mr->pd)
@ -356,14 +370,15 @@ irdma_free_hw_buf(void *buf, size_t size)
* get_cq_size - returns actual cqe needed by HW
* @ncqe: minimum cqes requested by application
* @hw_rev: HW generation
* @cqe_64byte_ena: enable 64byte cqe
*/
static inline int
get_cq_size(int ncqe, u8 hw_rev)
get_cq_size(int ncqe, u8 hw_rev, bool cqe_64byte_ena)
{
ncqe++;
/* Completions with immediate require 1 extra entry */
if (hw_rev > IRDMA_GEN_1)
if (!cqe_64byte_ena && hw_rev > IRDMA_GEN_1)
ncqe *= 2;
if (ncqe < IRDMA_U_MINCQ_SIZE)
@ -372,8 +387,11 @@ get_cq_size(int ncqe, u8 hw_rev)
return ncqe;
}
static inline size_t get_cq_total_bytes(u32 cq_size) {
return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
static inline size_t get_cq_total_bytes(u32 cq_size, bool cqe_64byte_ena){
if (cqe_64byte_ena)
return roundup(cq_size * sizeof(struct irdma_extended_cqe), IRDMA_HW_PAGE_SIZE);
else
return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
}
/**
@ -401,17 +419,22 @@ ucreate_cq(struct ibv_context *context,
u32 cq_pages;
int ret, ncqe;
u8 hw_rev;
bool cqe_64byte_ena;
iwvctx = container_of(context, struct irdma_uvcontext, ibv_ctx);
uk_attrs = &iwvctx->uk_attrs;
hw_rev = uk_attrs->hw_rev;
if (ext_cq && hw_rev == IRDMA_GEN_1) {
errno = EOPNOTSUPP;
return NULL;
if (ext_cq) {
u32 supported_flags = IRDMA_STANDARD_WC_FLAGS_EX;
if (hw_rev == IRDMA_GEN_1 || attr_ex->wc_flags & ~supported_flags) {
errno = EOPNOTSUPP;
return NULL;
}
}
if (attr_ex->cqe < IRDMA_MIN_CQ_SIZE || attr_ex->cqe > uk_attrs->max_hw_cq_size) {
if (attr_ex->cqe < uk_attrs->min_hw_cq_size || attr_ex->cqe > uk_attrs->max_hw_cq_size - 1) {
errno = EINVAL;
return NULL;
}
@ -428,11 +451,12 @@ ucreate_cq(struct ibv_context *context,
return NULL;
}
info.cq_size = get_cq_size(attr_ex->cqe, hw_rev);
cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
info.cq_size = get_cq_size(attr_ex->cqe, hw_rev, cqe_64byte_ena);
iwucq->comp_vector = attr_ex->comp_vector;
LIST_INIT(&iwucq->resize_list);
LIST_INIT(&iwucq->cmpl_generated);
total_size = get_cq_total_bytes(info.cq_size);
total_size = get_cq_total_bytes(info.cq_size, cqe_64byte_ena);
cq_pages = total_size >> IRDMA_HW_PAGE_SHIFT;
if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
@ -462,7 +486,7 @@ ucreate_cq(struct ibv_context *context,
if (uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE) {
info.shadow_area = irdma_alloc_hw_buf(IRDMA_DB_SHADOW_AREA_SIZE);
if (!info.shadow_area)
goto err_dereg_mr;
goto err_alloc_shadow;
memset(info.shadow_area, 0, IRDMA_DB_SHADOW_AREA_SIZE);
reg_mr_shadow_cmd.reg_type = IRDMA_MEMREG_TYPE_CQ;
@ -474,8 +498,9 @@ ucreate_cq(struct ibv_context *context,
&reg_mr_shadow_cmd.ibv_cmd, sizeof(reg_mr_shadow_cmd),
&reg_mr_shadow_resp, sizeof(reg_mr_shadow_resp));
if (ret) {
irdma_free_hw_buf(info.shadow_area, IRDMA_DB_SHADOW_AREA_SIZE);
errno = ret;
goto err_dereg_shadow;
goto err_alloc_shadow;
}
iwucq->vmr_shadow_area.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
@ -491,28 +516,30 @@ ucreate_cq(struct ibv_context *context,
ret = ibv_cmd_create_cq_ex(context, attr_ex, &iwucq->verbs_cq.cq_ex,
&cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd), &resp.ibv_resp,
sizeof(resp.ibv_resp), sizeof(resp));
attr_ex->cqe = ncqe;
if (ret) {
errno = ret;
goto err_dereg_shadow;
goto err_create_cq;
}
if (ext_cq)
irdma_ibvcq_ex_fill_priv_funcs(iwucq, attr_ex);
info.cq_id = resp.cq_id;
/* Do not report the cqe's burned by HW */
/* Do not report the CQE's reserved for immediate and burned by HW */
iwucq->verbs_cq.cq.cqe = ncqe;
if (cqe_64byte_ena)
info.avoid_mem_cflct = true;
info.cqe_alloc_db = (u32 *)((u8 *)iwvctx->db + IRDMA_DB_CQ_OFFSET);
irdma_uk_cq_init(&iwucq->cq, &info);
return &iwucq->verbs_cq.cq_ex;
err_dereg_shadow:
ibv_cmd_dereg_mr(&iwucq->vmr.ibv_mr);
err_create_cq:
if (iwucq->vmr_shadow_area.ibv_mr.handle) {
ibv_cmd_dereg_mr(&iwucq->vmr_shadow_area.ibv_mr);
irdma_free_hw_buf(info.shadow_area, IRDMA_HW_PAGE_SIZE);
irdma_free_hw_buf(info.shadow_area, IRDMA_DB_SHADOW_AREA_SIZE);
}
err_alloc_shadow:
ibv_cmd_dereg_mr(&iwucq->vmr.ibv_mr);
err_dereg_mr:
irdma_free_hw_buf(info.cq_base, total_size);
err_cq_base:
@ -545,11 +572,6 @@ struct ibv_cq_ex *
irdma_ucreate_cq_ex(struct ibv_context *context,
struct ibv_cq_init_attr_ex *attr_ex)
{
if (attr_ex->wc_flags & ~IRDMA_CQ_SUPPORTED_WC_FLAGS) {
errno = EOPNOTSUPP;
return NULL;
}
return ucreate_cq(context, attr_ex, true);
}
@ -561,7 +583,7 @@ static void
irdma_free_cq_buf(struct irdma_cq_buf *cq_buf)
{
ibv_cmd_dereg_mr(&cq_buf->vmr.ibv_mr);
irdma_free_hw_buf(cq_buf->cq.cq_base, get_cq_total_bytes(cq_buf->cq.cq_size));
irdma_free_hw_buf(cq_buf->cq.cq_base, cq_buf->buf_size);
free(cq_buf);
}
@ -645,7 +667,7 @@ irdma_cq_empty(struct irdma_ucq *iwucq)
ukcq = &iwucq->cq;
cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
get_64bit_val(cqe, 24, &qword3);
polarity = (__u8) RS_64(qword3, IRDMA_CQ_VALID);
polarity = (__u8) FIELD_GET(IRDMA_CQ_VALID, qword3);
return polarity != ukcq->polarity;
}
@ -680,7 +702,7 @@ irdma_generate_flush_completions(struct irdma_uqp *iwuqp)
cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
sw_wqe = qp->sq_base[wqe_idx].elem;
get_64bit_val(sw_wqe, 24, &wqe_qword);
cmpl->cpi.op_type = (__u8) RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
cmpl->cpi.op_type = (__u8) FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
/* remove the SQ WR by moving SQ tail */
IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
LIST_INSERT_HEAD(&iwuqp->send_cq->cmpl_generated, cmpl, list);
@ -794,6 +816,55 @@ irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
}
}
static inline void
set_ib_wc_op_sq(struct irdma_cq_poll_info *cur_cqe, struct ibv_wc *entry)
{
switch (cur_cqe->op_type) {
case IRDMA_OP_TYPE_RDMA_WRITE:
case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
entry->opcode = IBV_WC_RDMA_WRITE;
break;
case IRDMA_OP_TYPE_RDMA_READ:
entry->opcode = IBV_WC_RDMA_READ;
break;
case IRDMA_OP_TYPE_SEND_SOL:
case IRDMA_OP_TYPE_SEND_SOL_INV:
case IRDMA_OP_TYPE_SEND_INV:
case IRDMA_OP_TYPE_SEND:
entry->opcode = IBV_WC_SEND;
break;
case IRDMA_OP_TYPE_BIND_MW:
entry->opcode = IBV_WC_BIND_MW;
break;
case IRDMA_OP_TYPE_INV_STAG:
entry->opcode = IBV_WC_LOCAL_INV;
break;
default:
entry->status = IBV_WC_GENERAL_ERR;
printf("%s: Invalid opcode = %d in CQE\n",
__func__, cur_cqe->op_type);
}
}
static inline void
set_ib_wc_op_rq(struct irdma_cq_poll_info *cur_cqe,
struct ibv_wc *entry, bool send_imm_support)
{
if (!send_imm_support) {
entry->opcode = cur_cqe->imm_valid ? IBV_WC_RECV_RDMA_WITH_IMM :
IBV_WC_RECV;
return;
}
switch (cur_cqe->op_type) {
case IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
case IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
entry->opcode = IBV_WC_RECV_RDMA_WITH_IMM;
break;
default:
entry->opcode = IBV_WC_RECV;
}
}
/**
* irdma_process_cqe_ext - process current cqe for extended CQ
* @cur_cqe - current cqe info
@ -830,9 +901,8 @@ irdma_process_cqe(struct ibv_wc *entry, struct irdma_cq_poll_info *cur_cqe)
ib_qp = qp->back_qp;
if (cur_cqe->error) {
if (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
entry->status = (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
irdma_flush_err_to_ib_wc_status(cur_cqe->minor_err) : IBV_WC_GENERAL_ERR;
entry->status = (cur_cqe->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
irdma_flush_err_to_ib_wc_status(cur_cqe->minor_err) : IBV_WC_GENERAL_ERR;
entry->vendor_err = cur_cqe->major_err << 16 |
cur_cqe->minor_err;
} else {
@ -844,47 +914,17 @@ irdma_process_cqe(struct ibv_wc *entry, struct irdma_cq_poll_info *cur_cqe)
entry->wc_flags |= IBV_WC_WITH_IMM;
}
switch (cur_cqe->op_type) {
case IRDMA_OP_TYPE_RDMA_WRITE:
case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
entry->opcode = IBV_WC_RDMA_WRITE;
break;
case IRDMA_OP_TYPE_RDMA_READ:
entry->opcode = IBV_WC_RDMA_READ;
break;
case IRDMA_OP_TYPE_SEND_SOL:
case IRDMA_OP_TYPE_SEND_SOL_INV:
case IRDMA_OP_TYPE_SEND_INV:
case IRDMA_OP_TYPE_SEND:
entry->opcode = IBV_WC_SEND;
break;
case IRDMA_OP_TYPE_BIND_MW:
entry->opcode = IBV_WC_BIND_MW;
break;
case IRDMA_OP_TYPE_REC:
entry->opcode = IBV_WC_RECV;
if (cur_cqe->q_type == IRDMA_CQE_QTYPE_SQ) {
set_ib_wc_op_sq(cur_cqe, entry);
} else {
set_ib_wc_op_rq(cur_cqe, entry,
qp->qp_caps & IRDMA_SEND_WITH_IMM ?
true : false);
if (ib_qp->qp_type != IBV_QPT_UD &&
cur_cqe->stag_invalid_set) {
entry->invalidated_rkey = cur_cqe->inv_stag;
entry->wc_flags |= IBV_WC_WITH_INV;
}
break;
case IRDMA_OP_TYPE_REC_IMM:
entry->opcode = IBV_WC_RECV_RDMA_WITH_IMM;
if (ib_qp->qp_type != IBV_QPT_UD &&
cur_cqe->stag_invalid_set) {
entry->invalidated_rkey = cur_cqe->inv_stag;
entry->wc_flags |= IBV_WC_WITH_INV;
}
break;
case IRDMA_OP_TYPE_INV_STAG:
entry->opcode = IBV_WC_LOCAL_INV;
break;
default:
entry->status = IBV_WC_GENERAL_ERR;
printf("%s: Invalid opcode = %d in CQE\n",
__func__, cur_cqe->op_type);
return;
}
if (ib_qp->qp_type == IBV_QPT_UD) {
@ -1111,20 +1151,6 @@ irdma_end_poll(struct ibv_cq_ex *ibvcq_ex)
pthread_spin_unlock(&iwucq->lock);
}
/**
* irdma_wc_read_completion_ts - Get completion timestamp
* @ibvcq_ex: ibv extended CQ
*
* Get completion timestamp in HCA clock units
*/
static uint64_t irdma_wc_read_completion_ts(struct ibv_cq_ex *ibvcq_ex){
struct irdma_ucq *iwucq = container_of(ibvcq_ex, struct irdma_ucq,
verbs_cq.cq_ex);
#define HCA_CORE_CLOCK_800_MHZ 800
return iwucq->cur_cqe.tcp_seq_num_rtt / HCA_CORE_CLOCK_800_MHZ;
}
static enum ibv_wc_opcode
irdma_wc_read_opcode(struct ibv_cq_ex *ibvcq_ex)
{
@ -1255,11 +1281,6 @@ irdma_ibvcq_ex_fill_priv_funcs(struct irdma_ucq *iwucq,
ibvcq_ex->end_poll = irdma_end_poll;
ibvcq_ex->next_poll = irdma_next_poll;
if (attr_ex->wc_flags & IBV_WC_EX_WITH_COMPLETION_TIMESTAMP) {
ibvcq_ex->read_completion_ts = irdma_wc_read_completion_ts;
iwucq->report_rtt = true;
}
ibvcq_ex->read_opcode = irdma_wc_read_opcode;
ibvcq_ex->read_vendor_err = irdma_wc_read_vendor_err;
ibvcq_ex->read_wc_flags = irdma_wc_read_wc_flags;
@ -1403,15 +1424,13 @@ irdma_destroy_vmapped_qp(struct irdma_uqp *iwuqp)
* @pd: pd for the qp
* @attr: attributes of qp passed
* @resp: response back from create qp
* @sqdepth: depth of sq
* @rqdepth: depth of rq
* @info: info for initializing user level qp
* @info: uk info for initializing user level qp
* @abi_ver: abi version of the create qp command
*/
static int
irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
struct ibv_qp_init_attr *attr, int sqdepth,
int rqdepth, struct irdma_qp_uk_init_info *info,
struct ibv_qp_init_attr *attr,
struct irdma_qp_uk_init_info *info,
bool legacy_mode)
{
struct irdma_ucreate_qp cmd = {};
@ -1421,8 +1440,8 @@ irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
struct ibv_reg_mr_resp reg_mr_resp = {};
int ret;
sqsize = roundup(sqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
rqsize = roundup(rqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
sqsize = roundup(info->sq_depth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
rqsize = roundup(info->rq_depth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
totalqpsize = rqsize + sqsize + IRDMA_DB_SHADOW_AREA_SIZE;
info->sq = irdma_alloc_hw_buf(totalqpsize);
iwuqp->buf_size = totalqpsize;
@ -1491,8 +1510,6 @@ irdma_ucreate_qp(struct ibv_pd *pd,
struct irdma_uk_attrs *uk_attrs;
struct irdma_uvcontext *iwvctx;
struct irdma_uqp *iwuqp;
u32 sqdepth, rqdepth;
u8 sqshift, rqshift;
int status;
if (attr->qp_type != IBV_QPT_RC && attr->qp_type != IBV_QPT_UD) {
@ -1512,12 +1529,15 @@ irdma_ucreate_qp(struct ibv_pd *pd,
return NULL;
}
irdma_get_wqe_shift(uk_attrs,
uk_attrs->hw_rev > IRDMA_GEN_1 ? attr->cap.max_send_sge + 1 :
attr->cap.max_send_sge,
attr->cap.max_inline_data, &sqshift);
status = irdma_get_sqdepth(uk_attrs->max_hw_wq_quanta,
attr->cap.max_send_wr, sqshift, &sqdepth);
info.uk_attrs = uk_attrs;
info.sq_size = attr->cap.max_send_wr;
info.rq_size = attr->cap.max_recv_wr;
info.max_sq_frag_cnt = attr->cap.max_send_sge;
info.max_rq_frag_cnt = attr->cap.max_recv_sge;
info.max_inline_data = attr->cap.max_inline_data;
info.abi_ver = iwvctx->abi_ver;
status = irdma_uk_calc_depth_shift_sq(&info, &info.sq_depth, &info.sq_shift);
if (status) {
printf("%s: invalid SQ attributes, max_send_wr=%d max_send_sge=%d max_inline=%d\n",
__func__, attr->cap.max_send_wr, attr->cap.max_send_sge,
@ -1526,14 +1546,7 @@ irdma_ucreate_qp(struct ibv_pd *pd,
return NULL;
}
if (uk_attrs->hw_rev == IRDMA_GEN_1 && iwvctx->abi_ver > 4)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
else
irdma_get_wqe_shift(uk_attrs, attr->cap.max_recv_sge, 0,
&rqshift);
status = irdma_get_rqdepth(uk_attrs->max_hw_rq_quanta,
attr->cap.max_recv_wr, rqshift, &rqdepth);
status = irdma_uk_calc_depth_shift_rq(&info, &info.rq_depth, &info.rq_shift);
if (status) {
printf("%s: invalid RQ attributes, recv_wr=%d recv_sge=%d\n",
__func__, attr->cap.max_recv_wr, attr->cap.max_recv_sge);
@ -1550,31 +1563,35 @@ irdma_ucreate_qp(struct ibv_pd *pd,
if (pthread_spin_init(&iwuqp->lock, PTHREAD_PROCESS_PRIVATE))
goto err_free_qp;
info.sq_size = sqdepth >> sqshift;
info.rq_size = rqdepth >> rqshift;
attr->cap.max_send_wr = info.sq_size;
attr->cap.max_recv_wr = info.rq_size;
info.sq_size = info.sq_depth >> info.sq_shift;
info.rq_size = info.rq_depth >> info.rq_shift;
/**
* For older ABI version (less than 6) passes raw sq and rq
* quanta in cap.max_send_wr and cap.max_recv_wr.
* But then kernel had no way of calculating the actual qp size.
*/
if (iwvctx->abi_ver <= 5) {
attr->cap.max_send_wr = info.sq_size;
attr->cap.max_recv_wr = info.rq_size;
}
info.uk_attrs = uk_attrs;
info.max_sq_frag_cnt = attr->cap.max_send_sge;
info.max_rq_frag_cnt = attr->cap.max_recv_sge;
iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
if (!iwuqp->recv_sges)
goto err_destroy_lock;
info.wqe_alloc_db = (u32 *)iwvctx->db;
info.legacy_mode = iwvctx->legacy_mode;
info.sq_wrtrk_array = calloc(sqdepth, sizeof(*info.sq_wrtrk_array));
info.sq_wrtrk_array = calloc(info.sq_depth, sizeof(*info.sq_wrtrk_array));
if (!info.sq_wrtrk_array)
goto err_free_rsges;
info.rq_wrid_array = calloc(rqdepth, sizeof(*info.rq_wrid_array));
info.rq_wrid_array = calloc(info.rq_depth, sizeof(*info.rq_wrid_array));
if (!info.rq_wrid_array)
goto err_free_sq_wrtrk;
iwuqp->sq_sig_all = attr->sq_sig_all;
iwuqp->qp_type = attr->qp_type;
status = irdma_vmapped_qp(iwuqp, pd, attr, sqdepth, rqdepth, &info, iwvctx->legacy_mode);
status = irdma_vmapped_qp(iwuqp, pd, attr, &info, iwvctx->legacy_mode);
if (status) {
errno = status;
goto err_free_rq_wrid;
@ -1583,18 +1600,15 @@ irdma_ucreate_qp(struct ibv_pd *pd,
iwuqp->qp.back_qp = iwuqp;
iwuqp->qp.lock = &iwuqp->lock;
info.max_sq_frag_cnt = attr->cap.max_send_sge;
info.max_rq_frag_cnt = attr->cap.max_recv_sge;
info.max_inline_data = attr->cap.max_inline_data;
iwuqp->qp.force_fence = true;
status = irdma_uk_qp_init(&iwuqp->qp, &info);
if (status) {
errno = status;
goto err_free_vmap_qp;
}
attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
attr->cap.max_send_wr = (info.sq_depth - IRDMA_SQ_RSVD) >> info.sq_shift;
attr->cap.max_recv_wr = (info.rq_depth - IRDMA_RQ_RSVD) >> info.rq_shift;
return &iwuqp->ibv_qp;
err_free_vmap_qp:
@ -1649,8 +1663,6 @@ irdma_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
iwvctx = container_of(qp->context, struct irdma_uvcontext, ibv_ctx);
iwuqp->attr_mask = attr_mask;
memcpy(&iwuqp->attr, attr, sizeof(iwuqp->attr));
if (iwuqp->qp.qp_caps & IRDMA_PUSH_MODE && attr_mask & IBV_QP_STATE &&
iwvctx->uk_attrs.hw_rev > IRDMA_GEN_1) {
@ -1707,13 +1719,13 @@ irdma_issue_flush(struct ibv_qp *qp, bool sq_flush, bool rq_flush)
{
struct irdma_umodify_qp_resp resp = {};
struct irdma_modify_qp_cmd cmd_ex = {};
struct irdma_uqp *iwuqp;
struct ibv_qp_attr attr = {};
attr.qp_state = IBV_QPS_ERR;
cmd_ex.sq_flush = sq_flush;
cmd_ex.rq_flush = rq_flush;
iwuqp = container_of(qp, struct irdma_uqp, ibv_qp);
ibv_cmd_modify_qp_ex(qp, &iwuqp->attr, iwuqp->attr_mask,
ibv_cmd_modify_qp_ex(qp, &attr, IBV_QP_STATE,
&cmd_ex.ibv_cmd,
sizeof(cmd_ex.ibv_cmd),
sizeof(cmd_ex), &resp.ibv_resp,
@ -1857,8 +1869,6 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
info.signaled = true;
if (ib_wr->send_flags & IBV_SEND_FENCE)
info.read_fence = true;
if (iwuqp->send_cq->report_rtt)
info.report_rtt = true;
switch (ib_wr->opcode) {
case IBV_WR_SEND_WITH_IMM:
@ -1885,31 +1895,21 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
info.op_type = IRDMA_OP_TYPE_SEND_INV;
info.stag_to_inv = ib_wr->imm_data;
}
if (ib_wr->send_flags & IBV_SEND_INLINE) {
info.op.inline_send.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
info.op.inline_send.len = ib_wr->sg_list[0].length;
if (ib_qp->qp_type == IBV_QPT_UD) {
struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
struct irdma_uah, ibv_ah);
info.op.send.num_sges = ib_wr->num_sge;
info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
if (ib_qp->qp_type == IBV_QPT_UD) {
struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
struct irdma_uah, ibv_ah);
info.op.inline_send.ah_id = ah->ah_id;
info.op.inline_send.qkey = ib_wr->wr.ud.remote_qkey;
info.op.inline_send.dest_qp = ib_wr->wr.ud.remote_qpn;
}
err = irdma_uk_inline_send(&iwuqp->qp, &info, false);
} else {
info.op.send.num_sges = ib_wr->num_sge;
info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
if (ib_qp->qp_type == IBV_QPT_UD) {
struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
struct irdma_uah, ibv_ah);
info.op.inline_send.ah_id = ah->ah_id;
info.op.inline_send.qkey = ib_wr->wr.ud.remote_qkey;
info.op.inline_send.dest_qp = ib_wr->wr.ud.remote_qpn;
}
err = irdma_uk_send(&iwuqp->qp, &info, false);
info.op.send.ah_id = ah->ah_id;
info.op.send.qkey = ib_wr->wr.ud.remote_qkey;
info.op.send.dest_qp = ib_wr->wr.ud.remote_qpn;
}
if (ib_wr->send_flags & IBV_SEND_INLINE)
err = irdma_uk_inline_send(&iwuqp->qp, &info, false);
else
err = irdma_uk_send(&iwuqp->qp, &info, false);
break;
case IBV_WR_RDMA_WRITE_WITH_IMM:
if (iwuqp->qp.qp_caps & IRDMA_WRITE_WITH_IMM) {
@ -1926,19 +1926,14 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
else
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
if (ib_wr->send_flags & IBV_SEND_INLINE) {
info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
info.op.inline_rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
info.op.inline_rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
if (ib_wr->send_flags & IBV_SEND_INLINE)
err = irdma_uk_inline_rdma_write(&iwuqp->qp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
else
err = irdma_uk_rdma_write(&iwuqp->qp, &info, false);
}
break;
case IBV_WR_RDMA_READ:
if (ib_wr->num_sge > uk_attrs->max_hw_read_sges) {
@ -1965,9 +1960,8 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
info.op.bind_window.mw_stag = ib_wr->bind_mw.rkey;
} else {
struct verbs_mr *vmr = verbs_get_mr(ib_wr->bind_mw.bind_info.mr);
struct irdma_umr *umr = container_of(vmr, struct irdma_umr, vmr);
if (umr->acc_flags & IBV_ACCESS_ZERO_BASED) {
if (vmr->access & IBV_ACCESS_ZERO_BASED) {
err = EINVAL;
break;
}
@ -2085,7 +2079,7 @@ irdma_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr)
{
struct irdma_uah *ah;
union ibv_gid sgid;
struct irdma_ucreate_ah_resp resp;
struct irdma_ucreate_ah_resp resp = {};
int err;
err = ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
@ -2182,6 +2176,7 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
u32 cq_pages;
int cqe_needed;
int ret = 0;
bool cqe_64byte_ena;
iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
iwvctx = container_of(cq->context, struct irdma_uvcontext, ibv_ctx);
@ -2190,20 +2185,17 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
return EOPNOTSUPP;
if (cqe > IRDMA_MAX_CQ_SIZE)
if (cqe < uk_attrs->min_hw_cq_size || cqe > uk_attrs->max_hw_cq_size - 1)
return EINVAL;
cqe_needed = cqe + 1;
if (uk_attrs->hw_rev > IRDMA_GEN_1)
cqe_needed *= 2;
cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
if (cqe_needed < IRDMA_U_MINCQ_SIZE)
cqe_needed = IRDMA_U_MINCQ_SIZE;
cqe_needed = get_cq_size(cqe, uk_attrs->hw_rev, cqe_64byte_ena);
if (cqe_needed == iwucq->cq.cq_size)
return 0;
cq_size = get_cq_total_bytes(cqe_needed);
cq_size = get_cq_total_bytes(cqe_needed, cqe_64byte_ena);
cq_pages = cq_size >> IRDMA_HW_PAGE_SHIFT;
cq_base = irdma_alloc_hw_buf(cq_size);
if (!cq_base)
@ -2239,6 +2231,7 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
goto err_resize;
memcpy(&cq_buf->cq, &iwucq->cq, sizeof(cq_buf->cq));
cq_buf->buf_size = cq_size;
cq_buf->vmr = iwucq->vmr;
iwucq->vmr = new_mr;
irdma_uk_cq_resize(&iwucq->cq, cq_base, cqe_needed);

View File

@ -1,10 +1,8 @@
/* Export symbols should be added below according to
Documentation/versioning.md document. */
IRDMA_1.0 {
global: *;
local: *;
global:
libirdma_query_device;
local: *;
};
IRDMA_1.1 {
global: *;
} IRDMA_1.0;

View File

@ -91,6 +91,13 @@
#define SPEED_100000 100000
#define BIT_ULL(a) (1ULL << (a))
#define min(a, b) ((a) > (b) ? (b) : (a))
#ifndef likely
#define likely(x) __builtin_expect((x), 1)
#endif
#ifndef unlikely
#define unlikely(x) __builtin_expect((x), 0)
#endif
#define __aligned_u64 uint64_t __aligned(8)
@ -131,7 +138,7 @@ do { \
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
#define ibdev_err(ibdev, fmt, ...) dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
#define ibdev_err(ibdev, fmt, ...) printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
@ -166,8 +173,13 @@ struct irdma_sc_vsi;
#define irdma_usec_delay(x) DELAY(x)
#define mdelay(x) DELAY((x) * 1000)
#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
#define rt_tos2priority(tos) (tos >> 5)
#define ah_attr_to_dmac(attr) ((attr).dmac)
#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \
ib_modify_qp_is_ok(cur_state, next_state, type, mask)
#define kc_typeq_ib_wr const
#define kc_ifp_find ip_ifp_find
#define kc_ifp6_find ip6_ifp_find
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))
@ -207,6 +219,7 @@ enum ibv_mr_type {
struct verbs_mr {
struct ibv_mr ibv_mr;
enum ibv_mr_type mr_type;
int access;
};
#define verbs_get_mr(mr) container_of((mr), struct verbs_mr, ibv_mr)
#endif

View File

@ -39,8 +39,8 @@
.Nd RDMA FreeBSD driver for Intel(R) Ethernet Controller E810
.Sh SYNOPSIS
This module relies on
.Xr if_ice 4
.Bl -tag -nested-width indent
.Xr ice 4
.Bl -tag -width indent
.It The following kernel options should be included in the configuration:
.Cd options OFED
.Cd options OFED_DEBUG_INIT
@ -52,8 +52,9 @@ This module relies on
.Ss Features
The
.Nm
driver provides RDMA protocol support on RDMA-capable Intel Ethernet 800 Series NICs which are supported by
.Xr if_ice 4
driver provides RDMA protocol support on RDMA-capable Intel Ethernet 800 Series
NICs which are supported by
.Xr ice 4
.
.Pp
The driver supports both iWARP and RoCEv2 protocols.
@ -66,48 +67,65 @@ prompt before booting the kernel or stored in
.Bl -tag -width indent
.It Va dev.irdma<interface_number>.roce_enable
enables RoCEv2 protocol usage on <interface_numer> interface.
.Pp By default RoCEv2 protocol is used.
.It Va dev.irdma<interface_number>.dcqcn_cc_cfg_valid
indicates that all DCQCN parameters are valid and should be updated in registers or QP context.
.Pp
Setting this parameter to 1 means that settings in
.Em dcqcn_min_dec_factor, dcqcn_min_rate_MBps, dcqcn_F, dcqcn_T,
.Em dcqcn_B, dcqcn_rai_factor, dcqcn_hai_factor, dcqcn_rreduce_mperiod
are taken into account. Otherwise default values are used.
By default RoCEv2 protocol is used.
.It Va dev.irdma<interface_number>.dcqcn_cc_cfg_valid
indicates that all DCQCN parameters are valid and should be updated in
registers or QP context.
.Pp
Setting this parameter to 1 means that settings in
.Em dcqcn_min_dec_factor , dcqcn_min_rate_MBps , dcqcn_F , dcqcn_T ,
.Em dcqcn_B, dcqcn_rai_factor, dcqcn_hai_factor, dcqcn_rreduce_mperiod
are taken into account.
Otherwise default values are used.
.Pp
Note: "roce_enable" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_min_dec_factor
The minimum factor by which the current transmit rate can be changed when processing a CNP. Value is given as a percentage (1-100).
The minimum factor by which the current transmit rate can be changed when
processing a CNP.
Value is given as a percentage (1-100).
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_min_rate_MBps
The minimum value, in Mbits per second, for rate to limit.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_F
The number of times to stay in each stage of bandwidth recovery.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_T
The number of microseconds that should elapse before increasing the CWND in DCQCN mode.
The number of microseconds that should elapse before increasing the CWND
in DCQCN mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_B
The number of bytes to transmit before updating CWND in DCQCN mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_rai_factor
The number of MSS to add to the congestion window in additive increase mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_hai_factor
The number of MSS to add to the congestion window in hyperactive increase mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.It Va dev.irdma<interface_number>.dcqcn_rreduce_mperiod
The minimum time between 2 consecutive rate reductions for a single flow. Rate reduction will occur only if a CNP is received during the relevant time interval.
The minimum time between 2 consecutive rate reductions for a single flow.
Rate reduction will occur only if a CNP is received during the relevant time
interval.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable
to take effect.
.El
.Ss SYSCTL PROCEDURES
Sysctl controls are available for runtime adjustments.
.Bl -tag -width indent
@ -120,64 +138,60 @@ enables the DCQCN algorithm for RoCEv2.
.Pp
Note: "roce_enable" must also be set for this sysctl to take effect.
.Pp
Note: The change may be set at any time, but it will be applied only to newly created QPs.
Note: The change may be set at any time, but it will be applied only to newly
created QPs.
.El
.Ss TESTING
.Bl -enum
.It
To load the irdma driver, run:
.Bl -tag -width indent
.It
.Bd -literal -offset indent
kldload irdma
.El
If if_ice is not already loaded, the system will load it on its own. Please check whether the value of sysctl
.Ed
If if_ice is not already loaded, the system will load it on its own.
Please check whether the value of sysctl
.Va hw.ice.irdma
is 1, if the irdma driver is not loading. To change the value put:
.Bl -tag -width indent
.It
is 1, if the irdma driver is not loading.
To change the value put:
.Bd -literal -offset indent
hw.ice.irdma=1
.El
to
.Ed
in
.Pa /boot/loader.conf
and reboot.
.It
To check that the driver was loaded, run:
.Bl -tag -width indent
.It
.Bd -literal -offset indent
sysctl -a | grep infiniband
.El
.Ed
Typically, if everything goes well, around 190 entries per PF will appear.
.It
Each interface of the card may work in either iWARP or RoCEv2 mode. To enable RoCEv2 compatibility, add:
.Bl -tag -width indent
.It
Each interface of the card may work in either iWARP or RoCEv2 mode.
To enable RoCEv2 compatibility, add:
.Bd -literal -offset indent
dev.irdma<interface_number>.roce_enable=1
.El
.Ed
where <interface_number> is a desired ice interface number on which
RoCEv2 protocol needs to be enabled, to:
.Bl -tag -width indent
.It
RoCEv2 protocol needs to be enabled, into:
.Pa /boot/loader.conf
.El
for instance:
, for instance:
.Bl -tag -width indent
.It
dev.irdma0.roce_enable=0
.It
dev.irdma1.roce_enable=1
.It dev.irdma0.roce_enable=0
.It dev.irdma1.roce_enable=1
.El
will keep iWARP mode on ice0 and enable RoCEv2 mode on interface ice1. The RoCEv2 mode is the default.
.Dl
will keep iWARP mode on ice0 and enable RoCEv2 mode on interface ice1.
The RoCEv2 mode is the default.
.Pp
To check irdma roce_enable status, run:
.Bl -tag -width indent
.It
.Bd -literal -offset indent
sysctl dev.irdma<interface_number>.roce_enable
.El
.Ed
for instance:
.Bl -tag -width indent
.It
.Bd -literal -offset indent
sysctl dev.irdma2.roce_enable
.El
with returned value of '0' indicate the iWARP mode, and the value of '1' indicate the RoCEv2 mode.
.Ed
with returned value of '0' indicate the iWARP mode, and the value of '1'
indicate the RoCEv2 mode.
.Pp
Note: An interface configured in one mode will not be able to connect
to a node configured in another mode.
@ -187,44 +201,42 @@ DCB and Priority Flow Controller (PFC) are not currently supported which
may lead to significant performance loss or connectivity issues.
.It
Enable flow control in the ice driver:
.Bl -tag -width indent
.It
.Bd -literal -offset indent
sysctl dev.ice.<interface_number>.fc=3
.El
Enable flow control on the switch your system is connected to. See your
switch documentation for details.
.Ed
Enable flow control on the switch your system is connected to.
See your switch documentation for details.
.It
The source code for krping software is provided with the kernel in
/usr/src/sys/contrib/rdma/krping/. To compile the software, change
directory to /usr/src/sys/modules/rdma/krping/ and invoke the following:
/usr/src/sys/contrib/rdma/krping/.
To compile the software, change directory to
/usr/src/sys/modules/rdma/krping/ and invoke the following:
.Bl -tag -width indent
.It
make clean
.It
make
.It
make install
.It make clean
.It make
.It make install
.It kldload krping
.El
.It
Start a krping server on one machine:
.Bl -tag -width indent
.It
echo size=64,count=1,port=6601,addr=100.0.0.189,server > /dev/krping
.El
.Bd -literal -offset indent
echo size=64,count=1,port=6601,addr=100.0.0.189,server > /dev/krping
.Ed
.It
Connect a client from another machine:
.Bl -tag -width indent
.It
echo size=64,count=1,port=6601,addr=100.0.0.189,client > /dev/krping
.Bd -literal -offset indent
echo size=64,count=1,port=6601,addr=100.0.0.189,client > /dev/krping
.Ed
.El
.Sh SUPPORT
For general information and support, go to the Intel support website at:
.Lk http://support.intel.com/ .
.Pp
If an issue is identified with this driver with a supported adapter, email all the specific information related to the issue to
If an issue is identified with this driver with a supported adapter, email all
the specific information related to the issue to
.Mt freebsd@intel.com .
.Sh SEE ALSO
.Xr if_ice 4
.Xr ice 4
.Sh AUTHORS
.An -nosplit
The

View File

@ -41,6 +41,7 @@
#include <netinet/in_fib.h>
#include <netinet6/in6_fib.h>
#include <net/route/nhop.h>
#include <net/if_llatbl.h>
/* additional QP debuging option. Keep false unless needed */
bool irdma_upload_context = false;
@ -389,7 +390,8 @@ irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *d
if (dst_sin->sa_family == AF_INET) {
err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
} else if (dst_sin->sa_family == AF_INET6) {
err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
err = nd6_resolve(ifp, LLE_SF(AF_INET6, gateway), NULL, nexthop,
dst_mac, NULL, &lle);
} else {
err = -EPROTONOSUPPORT;
}
@ -467,15 +469,20 @@ int
irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
struct irdma_cm_info *cm_info)
{
#ifdef VIMAGE
struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
#endif
int arpindex;
int oldarpindex;
bool is_lpb = false;
if ((cm_node->ipv4 &&
irdma_ipv4_is_lpb(vnet, cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
(!cm_node->ipv4 &&
irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
CURVNET_SET_QUIET(vnet);
is_lpb = cm_node->ipv4 ?
irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0]) :
irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr);
CURVNET_RESTORE();
if (is_lpb) {
cm_node->do_lpb = true;
arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
NULL,

View File

@ -55,6 +55,7 @@
BUILD_BUG_ON_ZERO( \
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
#define set_ibdev_dma_device(ibdev, dev) \
ibdev.dma_device = (dev)
#define set_max_sge(props, rf) \
@ -72,7 +73,6 @@
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
#ifndef IB_QP_ATTR_STANDARD_BITS
#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
#endif
@ -80,12 +80,15 @@
#define IRDMA_QOS_MODE_VLAN 0x0
#define IRDMA_QOS_MODE_DSCP 0x1
#define IRDMA_VER_LEN 24
void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
struct irdma_tunable_info {
struct sysctl_ctx_list irdma_sysctl_ctx;
struct sysctl_oid *irdma_sysctl_tree;
char drv_ver[IRDMA_VER_LEN];
u8 roce_ena;
};
@ -167,6 +170,7 @@ struct irdma_device *kc_irdma_get_device(struct ifnet *netdev);
void kc_irdma_put_device(struct irdma_device *iwdev);
void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node);
u16 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn);
void irdma_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len);
@ -184,11 +188,19 @@ void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
u32 irdma_create_stag(struct irdma_device *iwdev);
void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
int irdma_hwdereg_mr(struct ib_mr *ib_mr);
int irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
u64 virt, int new_access, struct ib_pd *new_pd,
struct ib_udata *udata);
struct irdma_mr;
struct irdma_cq;
struct irdma_cq_buf;
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
u16 access);
struct ib_mr *irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
u64 virt, struct ib_udata *udata);
int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
@ -203,6 +215,11 @@ int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr);
int irdma_setup_umode_qp(struct ib_udata *udata,
struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr);
void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_qp_host_ctx_info *ctx_info);
void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,

View File

@ -53,7 +53,7 @@
/**
* Driver version
*/
char irdma_driver_version[] = "0.0.51-k";
char irdma_driver_version[] = "1.1.5-k";
#define pf_if_d(peer) peer->ifp->if_dunit
@ -103,6 +103,11 @@ irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
(rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? "iWARP" : "RoCEv2",
rf->tun_info.roce_ena);
snprintf(rf->tun_info.drv_ver, IRDMA_VER_LEN, "%s", irdma_driver_version);
SYSCTL_ADD_STRING(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "drv_ver", CTLFLAG_RDTUN, rf->tun_info.drv_ver,
IRDMA_VER_LEN, "driver version");
irdma_dcqcn_tunables_init(rf);
}

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
* Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -71,21 +71,23 @@ static u32 icrdma_regs[IRDMA_MAX_REGS] = {
};
static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE_M,
ICRDMA_CCQPSTATUS_CCQP_ERR_M,
ICRDMA_CQPSQ_STAG_PDID_M,
ICRDMA_CQPSQ_CQ_CEQID_M,
ICRDMA_CQPSQ_CQ_CQID_M,
ICRDMA_COMMIT_FPM_CQCNT_M,
ICRDMA_CCQPSTATUS_CCQP_DONE,
ICRDMA_CCQPSTATUS_CCQP_ERR,
ICRDMA_CQPSQ_STAG_PDID,
ICRDMA_CQPSQ_CQ_CEQID,
ICRDMA_CQPSQ_CQ_CQID,
ICRDMA_COMMIT_FPM_CQCNT,
ICRDMA_CQPSQ_UPESD_HMCFNID
};
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE_S,
ICRDMA_CCQPSTATUS_CCQP_ERR_S,
ICRDMA_CQPSQ_STAG_PDID_S,
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
ICRDMA_CQPSQ_UPESD_HMCFNID_S
};
/**
@ -101,9 +103,10 @@ icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
if (dev->ceq_itr && dev->aeq->msix_idx != idx)
interval = dev->ceq_itr >> 1; /* 2 usec units */
val = LS_64(0, IRDMA_GLINT_DYN_CTL_ITR_INDX) |
LS_64(interval, IRDMA_GLINT_DYN_CTL_INTERVAL) |
IRDMA_GLINT_DYN_CTL_INTENA_M | IRDMA_GLINT_DYN_CTL_CLEARPBA_M;
val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, IRDMA_IDX_ITR0) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, true) |
FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, true);
writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
}
@ -131,9 +134,9 @@ icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
{
u32 reg_val;
reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA_M : 0;
reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA : 0;
reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
IRDMA_GLINT_CEQCTL_ITR_INDX_M;
IRDMA_GLINT_CEQCTL_ITR_INDX;
writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
}
@ -224,7 +227,7 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops;
dev->hw_stats_map = icrdma_hw_stat_map;
dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
@ -232,8 +235,7 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
dev->hw_attrs.uk_attrs.max_hw_wq_size = IRDMA_QP_WQE_MAX_SIZE;
dev->hw_attrs.uk_attrs.min_sw_wq_size = IRDMA_QP_SW_MIN_WQSIZE;
dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
disable_tx_spad(dev->hw);
disable_prefetch(dev->hw);
@ -320,6 +322,9 @@ irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
#define IRDMA_CWND_NO_FC 0x1
#define IRDMA_CWND_FC 0x18
#define IRDMA_RTOMIN_NO_FC 0x5
#define IRDMA_RTOMIN_FC 0x32
#define IRDMA_ACKCREDS_NO_FC 0x02
#define IRDMA_ACKCREDS_FC 0x06
@ -405,7 +410,7 @@ disable_tx_spad(struct irdma_hw *hw)
wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
}
#define GL_RDPU_CNTRL 0x52054
#define GL_RDPU_CNTRL 0x52054
void
rdpu_ackreqpmthresh(struct irdma_hw *hw)
{

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2019 - 2020 Intel Corporation
* Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -101,28 +101,29 @@
#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
/* CCQSTATUS */
#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
#define ICRDMA_CCQPSTATUS_CCQP_DONE_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_DONE_S)
#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
#define ICRDMA_CCQPSTATUS_CCQP_ERR_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_ERR_S)
#define ICRDMA_CQPSQ_STAG_PDID_S 46
#define ICRDMA_CQPSQ_STAG_PDID_M (0x3ffffULL << ICRDMA_CQPSQ_STAG_PDID_S)
#define ICRDMA_CQPSQ_CQ_CEQID_S 22
#define ICRDMA_CQPSQ_CQ_CEQID_M (0x3ffULL << ICRDMA_CQPSQ_CQ_CEQID_S)
#define ICRDMA_CQPSQ_CQ_CQID_S 0
#define ICRDMA_CQPSQ_CQ_CQID_M (0x7ffffULL << ICRDMA_CQPSQ_CQ_CQID_S)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT_M (0xfffffULL << ICRDMA_COMMIT_FPM_CQCNT_S)
#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
#define ICRDMA_CQPSQ_STAG_PDID_S 46
#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46)
#define ICRDMA_CQPSQ_CQ_CEQID_S 22
#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(31, 22)
#define ICRDMA_CQPSQ_CQ_CQID_S 0
#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
enum icrdma_device_caps_const {
ICRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
ICRDMA_MAX_SGE_RD = 13,
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
ICRDMA_MAX_IRD_SIZE = 32,
ICRDMA_MAX_ORD_SIZE = 64,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
};

View File

@ -2,7 +2,7 @@
* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
*
*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2022 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
* Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -39,81 +39,74 @@
#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
#define MAKEMASK(m, s) ((m) << (s))
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
#define IRDMA_CQPTAIL_WQTAIL_S 0
#define IRDMA_CQPTAIL_WQTAIL_M (0x7ff << IRDMA_CQPTAIL_WQTAIL_S)
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
#define IRDMA_CQPTAIL_CQP_OP_ERR_S 31
#define IRDMA_CQPTAIL_CQP_OP_ERR_M (0x1 << IRDMA_CQPTAIL_CQP_OP_ERR_S)
#define IRDMA_CQPTAIL_WQTAIL_S 0
#define IRDMA_CQPTAIL_WQTAIL GENMASK(10, 0)
#define IRDMA_CQPTAIL_CQP_OP_ERR_S 31
#define IRDMA_CQPTAIL_CQP_OP_ERR BIT(31)
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_S 0
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MINOR_CODE_S)
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S 16
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S)
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_S 0
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0)
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S 16
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16)
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S 4
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4)
#define IRDMA_GLINT_RATE_INTERVAL_S 0
#define IRDMA_GLINT_RATE_INTERVAL GENMASK(4, 0)
#define IRDMA_GLINT_RATE_INTRL_ENA_S 6
#define IRDMA_GLINT_RATE_INTRL_ENA_M BIT(6)
#define IRDMA_GLINT_RATE_INTRL_ENA BIT(6)
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S 4
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_M (0x3 << IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S)
#define IRDMA_GLINT_DYN_CTL_INTENA_S 0
#define IRDMA_GLINT_DYN_CTL_INTENA BIT(0)
#define IRDMA_GLINT_DYN_CTL_CLEARPBA_S 1
#define IRDMA_GLINT_DYN_CTL_CLEARPBA BIT(1)
#define IRDMA_GLINT_DYN_CTL_ITR_INDX_S 3
#define IRDMA_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3)
#define IRDMA_GLINT_DYN_CTL_INTERVAL_S 5
#define IRDMA_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5)
#define IRDMA_GLINT_CEQCTL_ITR_INDX_S 11
#define IRDMA_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11)
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_S 30
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA BIT(30)
#define IRDMA_GLINT_CEQCTL_MSIX_INDX_S 0
#define IRDMA_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0)
#define IRDMA_PFINT_AEQCTL_MSIX_INDX_S 0
#define IRDMA_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0)
#define IRDMA_PFINT_AEQCTL_ITR_INDX_S 11
#define IRDMA_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11)
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_S 30
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA BIT(30)
#define IRDMA_PFHMC_PDINV_PMSDIDX_S 0
#define IRDMA_PFHMC_PDINV_PMSDIDX GENMASK(11, 0)
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_S 15
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL BIT(15)
#define IRDMA_PFHMC_PDINV_PMPDIDX_S 16
#define IRDMA_PFHMC_PDINV_PMPDIDX GENMASK(24, 16)
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_S 0
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID BIT(0)
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S 1
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE BIT(1)
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2)
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S 12
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
#define IRDMA_PFHMC_SDCMD_PMSDWR_S 31
#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
#define IRDMA_PFHMC_SDCMD_PMSDPARTSEL_S 15
#define IRDMA_PFHMC_SDCMD_PMSDPARTSEL BIT(15)
#define IRDMA_GLINT_RATE_INTERVAL_S 0
#define IRDMA_GLINT_RATE_INTERVAL_M (0x3c << IRDMA_GLINT_RATE_INTERVAL_S)
#define IRDMA_INVALID_CQ_IDX 0xffffffff
#define IRDMA_GLINT_RATE_INTRL_ENA_S 6
#define IRDMA_GLINT_RATE_INTRL_ENA_M BIT(6)
#define IRDMA_GLINT_DYN_CTL_INTENA_S 0
#define IRDMA_GLINT_DYN_CTL_INTENA_M (0x1 << IRDMA_GLINT_DYN_CTL_INTENA_S)
#define IRDMA_GLINT_DYN_CTL_CLEARPBA_S 1
#define IRDMA_GLINT_DYN_CTL_CLEARPBA_M (0x1 << IRDMA_GLINT_DYN_CTL_CLEARPBA_S)
#define IRDMA_GLINT_DYN_CTL_ITR_INDX_S 3
#define IRDMA_GLINT_DYN_CTL_ITR_INDX_M (0x3 << IRDMA_GLINT_DYN_CTL_ITR_INDX_S)
#define IRDMA_GLINT_DYN_CTL_INTERVAL_S 5
#define IRDMA_GLINT_DYN_CTL_INTERVAL_M (0xfff << IRDMA_GLINT_DYN_CTL_INTERVAL_S)
#define IRDMA_GLINT_CEQCTL_ITR_INDX_S 11
#define IRDMA_GLINT_CEQCTL_ITR_INDX_M (0x3 << IRDMA_GLINT_CEQCTL_ITR_INDX_S)
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_S 30
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_M (0x1 << IRDMA_GLINT_CEQCTL_CAUSE_ENA_S)
#define IRDMA_GLINT_CEQCTL_MSIX_INDX_S 0
#define IRDMA_GLINT_CEQCTL_MSIX_INDX_M (0x7ff << IRDMA_GLINT_CEQCTL_MSIX_INDX_S)
#define IRDMA_PFINT_AEQCTL_MSIX_INDX_S 0
#define IRDMA_PFINT_AEQCTL_MSIX_INDX_M (0x7ff << IRDMA_PFINT_AEQCTL_MSIX_INDX_S)
#define IRDMA_PFINT_AEQCTL_ITR_INDX_S 11
#define IRDMA_PFINT_AEQCTL_ITR_INDX_M (0x3 << IRDMA_PFINT_AEQCTL_ITR_INDX_S)
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_S 30
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_M (0x1 << IRDMA_PFINT_AEQCTL_CAUSE_ENA_S)
#define IRDMA_PFHMC_PDINV_PMSDIDX_S 0
#define IRDMA_PFHMC_PDINV_PMSDIDX_M (0xfff << IRDMA_PFHMC_PDINV_PMSDIDX_S)
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_S 15
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_M (0x1 << IRDMA_PFHMC_PDINV_PMSDPARTSEL_S)
#define IRDMA_PFHMC_PDINV_PMPDIDX_S 16
#define IRDMA_PFHMC_PDINV_PMPDIDX_M (0x1ff << IRDMA_PFHMC_PDINV_PMPDIDX_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_S 0
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S 1
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_M (0x3ff << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S 12
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_M (0xfffff << IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S)
#define IRDMA_PFHMC_SDCMD_PMSDWR_S 31
#define IRDMA_PFHMC_SDCMD_PMSDWR_M (0x1 << IRDMA_PFHMC_SDCMD_PMSDWR_S)
#define IRDMA_INVALID_CQ_IDX 0xffffffff
enum irdma_dyn_idx_t {
IRDMA_IDX_ITR0 = 0,
IRDMA_IDX_ITR1 = 1,
IRDMA_IDX_ITR2 = 2,
IRDMA_IDX_NOITR = 3,
};
enum irdma_registers {
IRDMA_CQPTAIL,
@ -149,6 +142,7 @@ enum irdma_shifts {
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
IRDMA_CQPSQ_UPESD_HMCFNID_S,
IRDMA_MAX_SHIFTS,
};
@ -159,6 +153,7 @@ enum irdma_masks {
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
IRDMA_CQPSQ_UPESD_HMCFNID_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
@ -174,7 +169,7 @@ struct irdma_mcast_grp_ctx_entry_info {
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
u8 hmc_fcn_id;
u16 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
@ -186,9 +181,10 @@ struct irdma_mcast_grp_info {
};
enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
IRDMA_GEN_RSVD = 0,
IRDMA_GEN_1 = 1,
IRDMA_GEN_2 = 2,
IRDMA_GEN_MAX = 2,
};
struct irdma_uk_attrs {
@ -201,8 +197,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
u16 max_hw_wq_size;
u16 min_sw_wq_size;
u16 min_hw_wq_size;
u8 hw_rev;
};
@ -211,6 +206,7 @@ struct irdma_hw_attrs {
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
u64 page_size_cap;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;

View File

@ -1628,6 +1628,7 @@ irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
struct ifnet *ip_dev = NULL;
struct in6_addr laddr6;
u16 scope_id = 0;
irdma_copy_ip_htonl(laddr6.__u6_addr.__u6_addr32, addr);
if (vlan_id)
@ -1635,7 +1636,11 @@ irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
if (mac)
eth_zero_addr(mac);
ip_dev = ip6_ifp_find(&init_net, laddr6, 0);
if (IN6_IS_SCOPE_LINKLOCAL(&laddr6) ||
IN6_IS_ADDR_MC_INTFACELOCAL(&laddr6))
scope_id = ntohs(laddr6.__u6_addr.__u6_addr16[1]);
ip_dev = ip6_ifp_find(&init_net, laddr6, scope_id);
if (ip_dev) {
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
@ -2055,15 +2060,9 @@ irdma_add_hte_node(struct irdma_cm_core *cm_core,
* @rem_addr: remote address
*/
bool
irdma_ipv4_is_lpb(struct vnet *vnet, u32 loc_addr, u32 rem_addr)
irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
{
bool ret;
CURVNET_SET_QUIET(vnet);
ret = ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
CURVNET_RESTORE();
return (ret);
return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
}
/**
@ -2089,10 +2088,12 @@ irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
static int
irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
{
struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
struct irdma_ah_info ah_info = {0};
struct irdma_device *iwdev = cm_node->iwdev;
#ifdef VIMAGE
struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
#endif
ether_addr_copy(ah_info.mac_addr, IF_LLADDR(iwdev->netdev));
@ -2104,9 +2105,12 @@ irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
ah_info.ipv4_valid = true;
ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
ah_info.do_lpbk = irdma_ipv4_is_lpb(vnet,
ah_info.src_ip_addr[0],
#ifdef VIMAGE
CURVNET_SET_QUIET(vnet);
ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
ah_info.dest_ip_addr[0]);
CURVNET_RESTORE();
#endif
} else {
memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
sizeof(ah_info.dest_ip_addr));
@ -2235,10 +2239,8 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
}
static void
irdma_cm_node_free_cb(struct rcu_head *rcu_head)
irdma_destroy_connection(struct irdma_cm_node *cm_node)
{
struct irdma_cm_node *cm_node =
container_of(rcu_head, struct irdma_cm_node, rcu_head);
struct irdma_cm_core *cm_core = cm_node->cm_core;
struct irdma_qp *iwqp;
struct irdma_cm_info nfo;
@ -2286,7 +2288,6 @@ irdma_cm_node_free_cb(struct rcu_head *rcu_head)
}
cm_core->cm_free_ah(cm_node);
kfree(cm_node);
}
/**
@ -2314,8 +2315,9 @@ irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
/* wait for all list walkers to exit their grace period */
call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
irdma_destroy_connection(cm_node);
kfree_rcu(cm_node, rcu_head);
}
/**
@ -3410,12 +3412,6 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
}
cm_id = iwqp->cm_id;
/* make sure we havent already closed this connection */
if (!cm_id) {
spin_unlock_irqrestore(&iwqp->lock, flags);
return;
}
original_hw_tcp_state = iwqp->hw_tcp_state;
original_ibqp_state = iwqp->ibqp_state;
last_ae = iwqp->last_aeq;
@ -3437,11 +3433,11 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
disconn_status = -ECONNRESET;
}
if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
@ -3453,10 +3449,6 @@ irdma_cm_disconn_true(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwqp->lock, flags);
if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
if (!iwqp->user_mode)
queue_delayed_work(iwqp->iwdev->cleanup_wq,
&iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
@ -4193,10 +4185,6 @@ irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node;
struct list_head teardown_list;
struct ib_qp_attr attr;
struct irdma_sc_vsi *vsi = &iwdev->vsi;
struct irdma_sc_qp *sc_qp;
struct irdma_qp *qp;
int i;
INIT_LIST_HEAD(&teardown_list);
@ -4213,50 +4201,4 @@ irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
if (!iwdev->roce_mode)
return;
INIT_LIST_HEAD(&teardown_list);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_lock(&vsi->qos[i].qos_mutex);
list_for_each_safe(list_node, list_core_temp,
&vsi->qos[i].qplist) {
u32 qp_ip[4];
sc_qp = container_of(list_node, struct irdma_sc_qp,
list);
if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
continue;
qp = sc_qp->qp_uk.back_qp;
if (!disconnect_all) {
if (nfo->ipv4)
qp_ip[0] = qp->udp_info.local_ipaddr[3];
else
memcpy(qp_ip,
&qp->udp_info.local_ipaddr[0],
sizeof(qp_ip));
}
if (disconnect_all ||
(nfo->vlan_id == (qp->udp_info.vlan_tag & EVL_VLID_MASK) &&
!memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
spin_lock(&iwdev->rf->qptable_lock);
if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
irdma_qp_add_ref(&qp->ibqp);
list_add(&qp->teardown_entry,
&teardown_list);
}
spin_unlock(&iwdev->rf->qptable_lock);
}
}
mutex_unlock(&vsi->qos[i].qos_mutex);
}
list_for_each_safe(list_node, list_core_temp, &teardown_list) {
qp = container_of(list_node, struct irdma_qp, teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
irdma_qp_rem_ref(&qp->ibqp);
}
}

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -74,7 +74,7 @@
#define TCP_OPTIONS_PADDING 3
#define IRDMA_DEFAULT_RETRYS 64
#define IRDMA_DEFAULT_RETRANS 8
#define IRDMA_DEFAULT_RETRANS 32
#define IRDMA_DEFAULT_TTL 0x40
#define IRDMA_DEFAULT_RTT_VAR 6
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
@ -192,14 +192,6 @@ enum irdma_cm_event_type {
IRDMA_CM_EVENT_ABORTED,
};
struct irdma_bth { /* Base Trasnport Header */
u8 opcode;
u8 flags;
__be16 pkey;
__be32 qpn;
__be32 apsn;
};
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
@ -426,8 +418,8 @@ int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
static inline u8 irdma_tos2dscp(u8 tos)
{
#define IRDMA_DSCP_S 2
#define IRDMA_DSCP_M (0x3f << IRDMA_DSCP_S)
return RS_32(tos, IRDMA_DSCP);
#define IRDMA_DSCP GENMASK(7, 2)
return FIELD_GET(IRDMA_DSCP, tos);
}
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
@ -435,16 +427,16 @@ int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac);
int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
int irdma_cm_start(struct irdma_device *dev);
int irdma_cm_stop(struct irdma_device *dev);
bool irdma_ipv4_is_lpb(struct vnet *, u32 loc_addr, u32 rem_addr);
bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr,
u8 *mac_addr, u32 action);
const u8 *mac_addr, u32 action);
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
void irdma_send_ack(struct irdma_cm_node *cm_node);
void irdma_lpb_nop(struct irdma_sc_qp *qp);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -106,10 +106,14 @@ static void
irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
entry->data = pa | (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
(((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S) |
(1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S);
entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
entry->data = pa |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
}
/**
@ -122,9 +126,12 @@ static void
irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
entry->data = (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
(((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S);
entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
}
/**
@ -137,9 +144,9 @@ static inline void
irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
u32 pd_idx)
{
u32 val = LS_32(sd_idx, IRDMA_PFHMC_PDINV_PMSDIDX) |
LS_32(1, IRDMA_PFHMC_PDINV_PMSDPARTSEL) |
LS_32(pd_idx, IRDMA_PFHMC_PDINV_PMPDIDX);
u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
}
@ -154,7 +161,7 @@ irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
* @setsd: flag to set or clear sd
*/
int
irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type, bool setsd)
{
struct irdma_update_sds_info sdinfo;
@ -534,7 +541,7 @@ irdma_add_sd_table_entry(struct irdma_hw *hw,
&sd_entry->u.pd_table.pd_entry_virt_mem;
vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
vmem->va = kzalloc(vmem->size, GFP_ATOMIC);
vmem->va = kzalloc(vmem->size, GFP_KERNEL);
if (!vmem->va) {
irdma_free_dma_mem(hw, &dma_mem);
return -ENOMEM;

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -128,7 +128,7 @@ struct irdma_hmc_sd_table {
struct irdma_hmc_info {
u32 signature;
u8 hmc_fn_id;
u16 hmc_fn_id;
u16 first_sd_index;
struct irdma_hmc_obj_info *hmc_obj;
struct irdma_virt_mem hmc_obj_virt_mem;
@ -143,7 +143,7 @@ struct irdma_update_sd_entry {
struct irdma_update_sds_info {
u32 cnt;
u8 hmc_fn_id;
u16 hmc_fn_id;
struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
};
@ -180,15 +180,15 @@ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info);
int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info, bool reset);
int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type,
bool setsd);
int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
u16 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
u16 hmc_fn_id);
int irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info, u32 sd_index,
enum irdma_sd_entry_type type, u64 direct_mode_sz);

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -74,6 +74,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_XFFL,
IRDMA_HMC_IW_Q1,
IRDMA_HMC_IW_Q1FL,
IRDMA_HMC_IW_PBLE,
IRDMA_HMC_IW_TIMER,
IRDMA_HMC_IW_FSIMC,
IRDMA_HMC_IW_FSIAV,
@ -95,7 +96,7 @@ irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
struct irdma_cq *cq = iwcq->back_cq;
if (!cq->user_mode)
cq->armed = false;
atomic_set(&cq->armed, 0);
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@ -171,68 +172,36 @@ static void
irdma_set_flush_fields(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
struct qp_err_code qp_err;
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
switch (info->ae_id) {
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
/* fallthrough */
case IRDMA_AE_UDA_XMIT_BAD_PD:
qp->flush_code = FLUSH_PROT_ERR;
break;
case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD:
qp->flush_code = FLUSH_PROT_ERR;
break;
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
case IRDMA_AE_AMP_BAD_QP:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
break;
case IRDMA_AE_AMP_BAD_STAG_KEY:
case IRDMA_AE_AMP_BAD_STAG_INDEX:
case IRDMA_AE_AMP_TO_WRAP:
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
case IRDMA_AE_UDA_L4LEN_INVALID:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
qp->flush_code = FLUSH_LOC_LEN_ERR;
break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp->flush_code = FLUSH_FATAL_ERR;
break;
case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR;
break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp->flush_code = FLUSH_RETRY_EXC_ERR;
break;
case IRDMA_AE_IB_INVALID_REQUEST:
qp->flush_code = FLUSH_REM_INV_REQ_ERR;
break;
default:
qp->flush_code = FLUSH_FATAL_ERR;
break;
qp_err = irdma_ae_to_qp_err_code(info->ae_id);
qp->flush_code = qp_err.flush_code;
qp->event_type = qp_err.event_type;
}
/**
* irdma_complete_cqp_request - perform post-completion cleanup
* @cqp: device CQP
* @cqp_request: CQP request
*
* Mark CQP request as done, wake up waiting thread or invoke
* callback function and release/free CQP request.
*/
static void
irdma_complete_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request)
{
if (cqp_request->waiting) {
cqp_request->request_done = true;
wake_up(&cqp_request->waitq);
} else if (cqp_request->callback_fcn) {
cqp_request->callback_fcn(cqp_request);
}
irdma_put_cqp_request(cqp, cqp_request);
}
/**
@ -315,13 +284,11 @@ irdma_process_aeq(struct irdma_pci_f *rf)
wake_up_interruptible(&iwqp->waitq);
break;
case IRDMA_AE_LLP_FIN_RECEIVED:
case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
if (qp->term_flags)
break;
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
iwqp->ibqp_state == IB_QPS_RTS) {
if (iwqp->ibqp_state == IB_QPS_RTS) {
irdma_next_iw_state(iwqp,
IRDMA_QP_STATE_CLOSING,
0, 0, 0);
@ -394,6 +361,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_RESOURCE_EXHAUSTION:
break;
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
case IRDMA_AE_STAG_ZERO_INVALID:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
@ -442,10 +410,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
irdma_cm_disconn(iwqp);
} else {
iwqp->sc_qp.term_flags = 1;
irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
0);
irdma_cm_disconn(iwqp);
irdma_terminate_connection(qp, info);
}
break;
}
@ -513,8 +478,10 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
u32 i;
u32 size;
if (!rf->msix_count)
if (!rf->msix_count) {
irdma_dev_err(&rf->sc_dev, "No MSI-X vectors reserved for RDMA.\n");
return -EINVAL;
}
size = sizeof(struct irdma_msix_vector) * rf->msix_count;
size += sizeof(struct irdma_qvlist_info);
@ -546,7 +513,7 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
iw_qvinfo->ceq_idx = ceq_idx++;
}
iw_qvinfo->itr_idx = 3;
iw_qvinfo->itr_idx = IRDMA_IDX_NOITR;
iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
}
@ -636,8 +603,7 @@ irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
if (rf->cqp_cmpl_wq)
destroy_workqueue(rf->cqp_cmpl_wq);
if (free_hwcqp)
status = irdma_sc_cqp_destroy(dev->cqp);
status = irdma_sc_cqp_destroy(dev->cqp, free_hwcqp);
if (status)
irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n",
status);
@ -898,6 +864,8 @@ irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
info.entry_type = rf->sd_type;
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
continue;
if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
@ -992,8 +960,8 @@ irdma_create_cqp(struct irdma_pci_f *rf)
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
memset(cqp->scratch_array, 0, sqsize * sizeof(*cqp->scratch_array));
if (!cqp->scratch_array) {
kfree(cqp->cqp_requests);
return -ENOMEM;
status = -ENOMEM;
goto err_scratch;
}
dev->cqp = &cqp->sc_cqp;
@ -1002,15 +970,14 @@ irdma_create_cqp(struct irdma_pci_f *rf)
cqp->sq.va = irdma_allocate_dma_mem(dev->hw, &cqp->sq, cqp->sq.size,
IRDMA_CQP_ALIGNMENT);
if (!cqp->sq.va) {
kfree(cqp->scratch_array);
kfree(cqp->cqp_requests);
return -ENOMEM;
status = -ENOMEM;
goto err_sq;
}
status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
IRDMA_HOST_CTX_ALIGNMENT_M);
if (status)
goto exit;
goto err_ctx;
dev->cqp->host_ctx_pa = mem.pa;
dev->cqp->host_ctx = mem.va;
@ -1040,7 +1007,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
if (status) {
irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n",
status);
goto exit;
goto err_ctx;
}
spin_lock_init(&cqp->req_lock);
@ -1051,7 +1018,7 @@ irdma_create_cqp(struct irdma_pci_f *rf)
irdma_debug(dev, IRDMA_DEBUG_ERR,
"cqp create failed - status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
goto exit;
goto err_create;
}
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
@ -1065,8 +1032,15 @@ irdma_create_cqp(struct irdma_pci_f *rf)
init_waitqueue_head(&cqp->remove_wq);
return 0;
exit:
irdma_destroy_cqp(rf, false);
err_create:
err_ctx:
irdma_free_dma_mem(dev->hw, &cqp->sq);
err_sq:
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
err_scratch:
kfree(cqp->cqp_requests);
cqp->cqp_requests = NULL;
return status;
}
@ -1224,12 +1198,6 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
return status;
bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "CEQ");
}
status = bus_bind_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->cpu_affinity);
if (status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"ceq irq config fail\n");
return status;
}
msix_vec->ceq_id = ceq_id;
rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
@ -1655,10 +1623,7 @@ irdma_hmc_setup(struct irdma_pci_f *rf)
struct irdma_sc_dev *dev = &rf->sc_dev;
u32 qpcnt;
if (rf->rdma_ver == IRDMA_GEN_1)
qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
else
qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
rf->sd_type = IRDMA_SD_TYPE_DIRECT;
status = irdma_cfg_fpm_val(dev, qpcnt);
@ -1687,8 +1652,8 @@ irdma_del_init_mem(struct irdma_pci_f *rf)
if (rf->rdma_ver != IRDMA_GEN_1) {
kfree(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
mutex_destroy(&dev->ws_mutex);
}
mutex_destroy(&dev->ws_mutex);
kfree(rf->ceqlist);
rf->ceqlist = NULL;
kfree(rf->iw_msixtbl);
@ -1696,7 +1661,6 @@ irdma_del_init_mem(struct irdma_pci_f *rf)
kfree(rf->hmc_info_mem);
rf->hmc_info_mem = NULL;
}
/**
* irdma_initialize_dev - initialize device
* @rf: RDMA PCI function
@ -1746,7 +1710,7 @@ irdma_initialize_dev(struct irdma_pci_f *rf)
info.bar0 = rf->hw.hw_addr;
info.hmc_fn_id = rf->peer_info->pf_id;
info.hw = &rf->hw;
status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
status = irdma_sc_dev_init(&rf->sc_dev, &info);
if (status)
goto error;
@ -1996,10 +1960,6 @@ irdma_rt_init_hw(struct irdma_device *iwdev,
rf->rsrc_created = true;
}
iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev);
@ -2233,28 +2193,20 @@ irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
break;
cqp_request = (struct irdma_cqp_request *)
(unsigned long)info.scratch;
(uintptr_t)info.scratch;
if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
info.maj_err_code,
info.min_err_code))
irdma_dev_err(dev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
info.op_code, info.maj_err_code,
info.min_err_code);
if (cqp_request) {
cqp_request->compl_info.maj_err_code = info.maj_err_code;
cqp_request->compl_info.min_err_code = info.min_err_code;
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
if (cqp_request->waiting) {
cqp_request->request_done = true;
wake_up(&cqp_request->waitq);
irdma_put_cqp_request(&rf->cqp, cqp_request);
} else {
if (cqp_request->callback_fcn)
cqp_request->callback_fcn(cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
}
irdma_complete_cqp_request(&rf->cqp, cqp_request);
}
cqe_count++;
@ -2545,7 +2497,7 @@ irdma_del_apbvt(struct irdma_device *iwdev,
* @action: add, delete or modify
*/
void
irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
u32 *ip_addr, u32 action)
{
struct irdma_add_arp_cache_entry_info *info;
@ -2798,29 +2750,30 @@ irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
info.sq = flush_mask & IRDMA_FLUSH_SQ;
info.rq = flush_mask & IRDMA_FLUSH_RQ;
if (flush_mask & IRDMA_REFLUSH) {
if (info.sq)
iwqp->sc_qp.flush_sq = false;
if (info.rq)
iwqp->sc_qp.flush_rq = false;
}
/* Generate userflush errors in CQE */
info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.sq_minor_code = FLUSH_GENERAL_ERR;
info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.rq_minor_code = FLUSH_GENERAL_ERR;
info.userflushcode = true;
if (flush_code) {
if (info.sq && iwqp->sc_qp.sq_flush_code)
info.sq_minor_code = flush_code;
if (info.rq && iwqp->sc_qp.rq_flush_code)
info.rq_minor_code = flush_code;
}
if (irdma_upload_context && !(flush_mask & IRDMA_REFLUSH) &&
irdma_upload_qp_context(iwqp, 0, 1))
irdma_print("failed to upload QP context\n");
if (flush_mask & IRDMA_REFLUSH) {
if (info.sq)
iwqp->sc_qp.flush_sq = false;
if (info.rq)
iwqp->sc_qp.flush_rq = false;
} else {
if (flush_code) {
if (info.sq && iwqp->sc_qp.sq_flush_code)
info.sq_minor_code = flush_code;
if (info.rq && iwqp->sc_qp.rq_flush_code)
info.rq_minor_code = flush_code;
}
if (irdma_upload_context && irdma_upload_qp_context(iwqp, 0, 1))
irdma_print("failed to upload QP context\n");
if (!iwqp->user_mode)
irdma_sched_qp_flush_work(iwqp);
}
/* Issue flush */
(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,

View File

@ -35,6 +35,36 @@
#include "irdma_main.h"
#define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) {
u32 fl_low = fl & 0x03FFF;
u32 fl_high = fl & 0xFC000;
fl_low ^= fl_high >> 14;
return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN);
}
#define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF)
static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) {
u64 fl = (u64)lqpn * rqpn;
fl ^= fl >> 20;
fl ^= fl >> 40;
return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK);
}
u16
kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
{
if (!fl)
fl = kc_rdma_calc_flow_label(lqpn, rqpn);
return kc_rdma_flow_label_to_udp_sport(fl);
}
void
irdma_get_dev_fw_str(struct ib_device *dev,
char *str,
@ -106,8 +136,10 @@ irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
/* Assume system PAGE_SIZE as the sg page sizes are unknown. */
iwmr->len = max_num_sg * PAGE_SIZE;
status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
true);
false);
if (status)
goto err_get_pble;
@ -128,6 +160,8 @@ irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
return ERR_PTR(err_code);
}
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
/**
* irdma_alloc_ucontext - Allocate the user context data structure
* @uctx: context
@ -141,11 +175,15 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_alloc_ucontext_req req;
struct irdma_alloc_ucontext_req req = {0};
struct irdma_alloc_ucontext_resp uresp = {0};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
return -EINVAL;
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return -EINVAL;
@ -157,7 +195,7 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 support for libi40iw */
if (udata->outlen < sizeof(uresp)) {
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP;
@ -169,15 +207,8 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
return -EFAULT;
} else {
u64 bar_off =
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
ucontext->db_mmap_entry =
irdma_user_mmap_entry_insert(ucontext, bar_off,
IRDMA_MMAP_IO_NC,
&uresp.db_mmap_key);
if (!ucontext->db_mmap_entry) {
return -ENOMEM;
}
u64 bar_off;
uresp.kernel_ver = IRDMA_ABI_VER;
uresp.feature_flags = uk_attrs->feature_flags;
uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
@ -189,6 +220,17 @@ irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
bar_off =
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
ucontext->db_mmap_entry =
irdma_user_mmap_entry_insert(ucontext, bar_off,
IRDMA_MMAP_IO_NC,
&uresp.db_mmap_key);
if (!ucontext->db_mmap_entry) {
return -ENOMEM;
}
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@ -226,6 +268,7 @@ irdma_dealloc_ucontext(struct ib_ucontext *context)
return;
}
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
/**
* irdma_alloc_pd - allocate protection domain
* @pd: protection domain
@ -243,6 +286,9 @@ irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
u32 pd_id = 0;
int err;
if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
return -EINVAL;
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
@ -284,8 +330,7 @@ irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
}
static void
irdma_fill_ah_info(struct vnet *vnet,
struct irdma_ah_info *ah_info,
irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
const struct ib_gid_attr *sgid_attr,
struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
u8 *dmac, u8 net_type)
@ -296,9 +341,12 @@ irdma_fill_ah_info(struct vnet *vnet,
ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
ah_info->src_ip_addr[0] =
ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
ah_info->do_lpbk = irdma_ipv4_is_lpb(vnet,
ah_info->src_ip_addr[0],
#ifdef VIMAGE
CURVNET_SET_QUIET(vnet);
ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
ah_info->dest_ip_addr[0]);
CURVNET_RESTORE();
#endif
if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
}
@ -337,8 +385,13 @@ irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
if (ah_info->vlan_tag < VLAN_N_VID) {
ah_info->insert_vlan_tag = true;
ah_info->vlan_tag |=
rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
(u16)rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
}
if (iwdev->roce_dcqcn_en) {
ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
ah_info->tc_tos |= ECN_CODE_PT_VAL;
}
return 0;
}
@ -347,7 +400,8 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
struct irdma_sc_ah *sc_ah, bool sleep)
{
if (!sleep) {
int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
CQP_TIMEOUT_THRESHOLD;
do {
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
@ -360,6 +414,8 @@ irdma_create_ah_wait(struct irdma_pci_f *rf,
return 0;
}
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
/**
* irdma_create_ah - create address handle
* @ib_ah: ptr to AH
@ -391,7 +447,10 @@ irdma_create_ah(struct ib_ah *ib_ah,
} sgid_addr, dgid_addr;
int err;
u8 dmac[ETH_ALEN];
bool sleep;
bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0;
if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
return -EINVAL;
err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
rf->max_ah, &ah_id, &rf->next_ah);
@ -415,7 +474,7 @@ irdma_create_ah(struct ib_ah *ib_ah,
"GID lookup at idx=%d with port=%d failed\n",
attr->grh.sgid_index, attr->port_num);
err = -EINVAL;
goto error;
goto err_gid_l2;
}
rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
@ -442,38 +501,42 @@ irdma_create_ah(struct ib_ah *ib_ah,
ether_addr_copy(dmac, attr->dmac);
irdma_fill_ah_info(iwdev->netdev->if_vnet,
ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
dmac, ah->av.net_type);
err = irdma_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr, dmac);
if (err)
goto error;
sleep = flags & RDMA_CREATE_AH_SLEEPABLE;
goto err_gid_l2;
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP-OP Create AH fail");
goto error;
goto err_gid_l2;
}
err = irdma_create_ah_wait(rf, sc_ah, sleep);
if (err) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"CQP create AH timed out");
goto error;
goto err_gid_l2;
}
if (udata) {
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (err) {
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
IRDMA_OP_AH_DESTROY, false, NULL, ah);
goto err_gid_l2;
}
}
return 0;
error:
err_gid_l2:
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
return err;
}
@ -539,35 +602,34 @@ irdma_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp;
struct irdma_create_qp_req req;
struct irdma_create_qp_resp uresp = {0};
u32 qp_num = 0;
int ret;
int err_code;
int sq_size;
int rq_size;
struct irdma_sc_qp *qp;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {{0}};
struct irdma_qp_host_ctx_info *ctx_info;
unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
return ERR_PTR(err_code);
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
return ERR_PTR(-EINVAL);
init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
init_info.qp_uk_init_info.sq_size = sq_size;
init_info.qp_uk_init_info.rq_size = rq_size;
init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
@ -630,35 +692,8 @@ irdma_create_qp(struct ib_pd *ibpd,
init_waitqueue_head(&iwqp->mod_qp_waitq);
if (udata) {
err_code = ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen));
if (err_code) {
irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
"ib_copy_from_data fail\n");
goto error;
}
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
iwqp->user_mode = 1;
if (req.user_wqe_bufs) {
struct irdma_ucontext *ucontext = to_ucontext(ibpd->uobject->context);
init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
&ucontext->qp_reg_mem_list);
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
if (!iwqp->iwpbl) {
err_code = -ENODATA;
irdma_debug(iwdev_to_idev(iwdev),
IRDMA_DEBUG_VERBS,
"no pbl info\n");
goto error;
}
}
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
irdma_setup_virt_qp(iwdev, iwqp, &init_info);
err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
} else {
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
@ -713,8 +748,6 @@ irdma_create_qp(struct ib_pd *ibpd,
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
rf->qp_table[qp_num] = iwqp;
iwqp->max_send_wr = sq_size;
iwqp->max_recv_wr = rq_size;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (dev->ws_add(&iwdev->vsi, 0)) {
@ -735,8 +768,8 @@ irdma_create_qp(struct ib_pd *ibpd,
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
uresp.lsmm = 1;
}
uresp.actual_sq_size = sq_size;
uresp.actual_rq_size = rq_size;
uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
uresp.qp_id = qp_num;
uresp.qp_caps = qp->qp_uk.qp_caps;
@ -776,9 +809,6 @@ irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
irdma_modify_qp_to_err(&iwqp->sc_qp);
if (!iwqp->user_mode)
cancel_delayed_work_sync(&iwqp->dwork_flush);
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
@ -810,6 +840,8 @@ irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
@ -825,10 +857,15 @@ irdma_create_cq(struct ib_cq *ibcq,
unsigned long flags;
int err_code;
int entries = attr->cqe;
bool cqe_64byte_ena;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
return -EINVAL;
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
@ -842,6 +879,8 @@ irdma_create_cq(struct ib_cq *ibcq,
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
ukinfo->avoid_mem_cflct = cqe_64byte_ena;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
@ -860,6 +899,7 @@ irdma_create_cq(struct ib_cq *ibcq,
iwcq->user_mode = true;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
if (ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen))) {
err_code = -EFAULT;
@ -913,14 +953,17 @@ irdma_create_cq(struct ib_cq *ibcq,
}
entries++;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
ukinfo->cq_size = entries;
rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
iwcq->kmem.size = round_up(rsize, 256);
if (cqe_64byte_ena)
rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
else
rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
iwcq->kmem.size, 256);
iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
if (!iwcq->kmem.va) {
err_code = -ENOMEM;
goto cq_free_rsrc;
@ -1058,61 +1101,97 @@ irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
int
irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
{
struct ib_pd *ibpd = ib_mr->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
struct irdma_dealloc_stag_info *info;
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
int status;
int ret;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext,
ibucontext);
irdma_del_memlist(iwmr, ucontext);
}
goto done;
}
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request)
return -ENOMEM;
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.dealloc_stag.info;
memset(info, 0, sizeof(*info));
info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
info->mr = true;
if (iwpbl->pbl_allocated)
info->dealloc_pbl = true;
cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
cqp_info->post_sq = 1;
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
if (status)
return status;
ret = irdma_hwdereg_mr(ib_mr);
if (ret)
return ret;
irdma_free_stag(iwdev, iwmr->stag);
done:
if (iwpbl->pbl_allocated)
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
if (iwmr->region)
ib_umem_release(iwmr->region);
kfree(iwmr);
return 0;
}
/*
* irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
* indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
* @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data
*/
int
irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
u64 virt, int new_access, struct ib_pd *new_pd,
struct ib_udata *udata)
{
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
int ret;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return -EINVAL;
if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
return -EOPNOTSUPP;
ret = irdma_hwdereg_mr(ib_mr);
if (ret)
return ret;
if (flags & IB_MR_REREG_ACCESS)
iwmr->access = new_access;
if (flags & IB_MR_REREG_PD) {
iwmr->ibmr.pd = new_pd;
iwmr->ibmr.device = new_pd->device;
}
if (flags & IB_MR_REREG_TRANS) {
if (iwpbl->pbl_allocated) {
irdma_free_pble(iwdev->rf->pble_rsrc,
&iwpbl->pble_alloc);
iwpbl->pbl_allocated = false;
}
if (iwmr->region) {
ib_umem_release(iwmr->region);
iwmr->region = NULL;
}
ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata);
if (IS_ERR(ib_mr))
return PTR_ERR(ib_mr);
} else {
ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
if (ret)
return ret;
}
return 0;
}
int
kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
u16 *vlan_id)
@ -1134,8 +1213,8 @@ kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
}
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
dev_put(sgid_attr.ndev);
iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
return 0;
}
@ -1167,11 +1246,11 @@ irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
wait_for_completion(&iwcq->free_cq);
irdma_cq_wq_destroy(iwdev->rf, cq);
irdma_cq_free_rsrc(iwdev->rf, iwcq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
irdma_cq_free_rsrc(iwdev->rf, iwcq);
}
/**
@ -1441,7 +1520,59 @@ irdma_query_port(struct ib_device *ibdev, u8 port,
return 0;
}
extern const char *const irdma_hw_stat_names[];
static const char *const irdma_hw_stat_names[] = {
/* gen1 - 32-bit */
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
[IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
/* gen1 - 64-bit */
[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
[IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads",
[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends",
[IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites",
[IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads",
[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends",
[IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites",
[IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd",
[IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv",
/* gen2 - 32-bit */
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
/* gen2 - 64-bit */
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs",
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors",
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors",
[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs",
[IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs",
};
/**
* irdma_alloc_hw_stats - Allocate a hw stats structure
@ -1546,6 +1677,7 @@ kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |

View File

@ -44,6 +44,7 @@
#include <netinet/if_ether.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@ -52,7 +53,6 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
#include "osdep.h"
#include "irdma_defs.h"
#include "irdma_hmc.h"
@ -101,7 +101,7 @@ extern bool irdma_upload_context;
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
#define IRDMA_MAX_PAGES_PER_FMR 512
#define IRDMA_MAX_PAGES_PER_FMR 262144
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
@ -123,9 +123,6 @@ extern bool irdma_upload_context;
#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define VSI_RXSWCTRL(_VSI) (0x00205000 + ((_VSI) * 4))
#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_M BIT(8)
#define VSI_RXSWCTRL_SRCPRUNEENABLE_M BIT(13)
#define IRDMA_ROCE_CWND_DEFAULT 0x400
#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
@ -278,6 +275,8 @@ struct irdma_pci_f {
u8 *mem_rsrc;
u8 rdma_ver;
u8 rst_to;
/* Not used in SRIOV VF mode */
u8 pf_id;
enum irdma_protocol_used protocol_used;
bool en_rem_endpoint_trk:1;
bool dcqcn_ena:1;
@ -360,6 +359,7 @@ struct irdma_device {
struct ib_device ibdev;
struct irdma_pci_f *rf;
struct ifnet *netdev;
struct notifier_block nb_netdevice_event;
struct irdma_handler *hdl;
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
@ -368,7 +368,6 @@ struct irdma_device {
u32 roce_ackcreds;
u32 vendor_id;
u32 vendor_part_id;
u32 device_cap_flags;
u32 push_mode;
u32 rcv_wnd;
u16 mac_ip_table_idx;
@ -376,6 +375,12 @@ struct irdma_device {
u8 rcv_wscale;
u8 iw_status;
u8 rd_fence_rate;
bool override_rcv_wnd:1;
bool override_cwnd:1;
bool override_ackcreds:1;
bool override_ooo:1;
bool override_rd_fence_rate:1;
bool override_rtomin:1;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
@ -508,7 +513,7 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
void irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
u32 *ip_addr, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
void irdma_del_apbvt(struct irdma_device *iwdev,
@ -581,6 +586,10 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
void irdma_unregister_notifiers(struct irdma_device *iwdev);
int irdma_register_notifiers(struct irdma_device *iwdev);
void irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf);
void irdma_add_ip(struct irdma_device *iwdev);
void irdma_add_handler(struct irdma_handler *hdl);

View File

@ -265,7 +265,7 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
return -EINVAL;
chunkmem.size = sizeof(*chunk);
chunkmem.va = kzalloc(chunkmem.size, GFP_ATOMIC);
chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
if (!chunkmem.va)
return -ENOMEM;
@ -394,7 +394,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
lvl2->leaf_cnt = total;
lvl2->leafmem.size = (sizeof(*leaf) * total);
lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_ATOMIC);
lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
if (!lvl2->leafmem.va)
return -ENOMEM;

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2016 - 2021 Intel Corporation
* Copyright (c) 2016 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -45,8 +45,7 @@
#define CQP_TIMEOUT_THRESHOLD 500
/* init operations */
int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
struct irdma_device_init_info *info);
int irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
@ -56,7 +55,7 @@ void irdma_init_config_check(struct irdma_config_check *cc,
u8 traffic_class,
u16 qs_handle);
/* HMC/FPM functions */
int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id);
/* stats misc */
int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat, bool wait);
@ -114,7 +113,7 @@ int irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem);
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
u16 hmc_fn_id, bool post_sq,
bool poll_registers);
int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
int irdma_get_rdma_features(struct irdma_sc_dev *dev);
@ -129,9 +128,9 @@ void dumpcls(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
struct irdma_dma_mem *val_mem, u16 hmc_fn_id);
int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
struct irdma_dma_mem *val_mem, u16 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -129,17 +129,17 @@ irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
if (!initial)
get_64bit_val(wqe, IRDMA_BYTE_24, &offset24);
offset24 = (offset24) ? 0 : LS_64(1, IRDMAQPSQ_VALID);
offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
set_64bit_val(wqe, 0, buf->mem.pa);
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, IRDMA_BYTE_8,
LS_64(buf->mem.size, IRDMAQPSQ_GEN1_FRAG_LEN));
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
} else {
set_64bit_val(wqe, IRDMA_BYTE_8,
LS_64(buf->mem.size,
IRDMAQPSQ_FRAG_LEN) | (offset24 & IRDMAQPSQ_VALID_M));
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
offset24);
}
irdma_wmb(); /* make sure WQE is written before valid bit is set */
@ -183,7 +183,7 @@ irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
struct irdma_virt_mem buf_mem;
buf_mem.size = sizeof(struct irdma_puda_buf);
buf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC);
buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
if (!buf_mem.va)
return NULL;
@ -269,18 +269,18 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
get_64bit_val(cqe, IRDMA_BYTE_24, &qword3);
valid_bit = (bool)RS_64(qword3, IRDMA_CQ_VALID);
valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
if (valid_bit != cq_uk->polarity)
return -ENOENT;
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
ext_valid = (bool)RS_64(qword3, IRDMA_CQ_EXTCQE);
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
if (ext_valid) {
peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
ext_cqe = cq_uk->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, IRDMA_BYTE_24, &qword7);
polarity = (u8)RS_64(qword7, IRDMA_CQ_VALID);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
if (polarity != cq_uk->polarity)
@ -298,11 +298,11 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
irdma_debug_buf(cq->dev, IRDMA_DEBUG_PUDA, "PUDA EXT-CQE",
ext_cqe, 32);
error = (bool)RS_64(qword3, IRDMA_CQ_ERROR);
error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
if (error) {
irdma_debug(cq->dev, IRDMA_DEBUG_PUDA, "receive error\n");
major_err = (u32)(RS_64(qword3, IRDMA_CQ_MAJERR));
minor_err = (u32)(RS_64(qword3, IRDMA_CQ_MINERR));
major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
info->compl_error = major_err << 16 | minor_err;
return -EIO;
}
@ -310,23 +310,23 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
get_64bit_val(cqe, IRDMA_BYTE_0, &qword0);
get_64bit_val(cqe, IRDMA_BYTE_16, &qword2);
info->q_type = (u8)RS_64(qword3, IRDMA_CQ_SQ);
info->qp_id = (u32)RS_64(qword2, IRDMACQ_QPID);
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
info->ipv4 = (bool)RS_64(qword3, IRDMACQ_IPV4);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
info->qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
info->wqe_idx = (u32)RS_64(qword3, IRDMA_CQ_WQEIDX);
info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
if (ext_valid) {
info->vlan_valid = (bool)RS_64(qword7, IRDMA_CQ_UDVLANVALID);
info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
if (info->vlan_valid) {
get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
info->vlan = (u16)RS_64(qword6, IRDMA_CQ_UDVLAN);
info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
}
info->smac_valid = (bool)RS_64(qword7, IRDMA_CQ_UDSMACVALID);
info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
if (info->smac_valid) {
get_64bit_val(ext_cqe, IRDMA_BYTE_16, &qword6);
info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
@ -339,12 +339,12 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq,
}
if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
info->vlan_valid = (bool)RS_64(qword3, IRDMA_VLAN_TAG_VALID);
info->l4proto = (u8)RS_64(qword2, IRDMA_UDA_L4PROTO);
info->l3proto = (u8)RS_64(qword2, IRDMA_UDA_L3PROTO);
info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
}
info->payload_len = (u32)RS_64(qword0, IRDMACQ_PAYLDLEN);
info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
}
return 0;
@ -486,35 +486,36 @@ irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
hdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) |
LS_64(l4len, IRDMA_UDA_QPSQ_L4LEN) |
LS_64(info->ah_id, IRDMAQPSQ_AHID) |
LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) |
LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID);
hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
qp->qp_uk.swqe_polarity);
/* Forth line of WQE descriptor */
set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
set_64bit_val(wqe, IRDMA_BYTE_8,
LS_64(info->len, IRDMAQPSQ_FRAG_LEN) |
LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID));
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
} else {
hdr[0] = LS_64((info->maclen >> 1), IRDMA_UDA_QPSQ_MACLEN) |
LS_64(iplen, IRDMA_UDA_QPSQ_IPLEN) |
LS_64(1, IRDMA_UDA_QPSQ_L4T) |
LS_64(iipt, IRDMA_UDA_QPSQ_IIPT) |
LS_64(l4len, IRDMA_GEN1_UDA_QPSQ_L4LEN);
hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
hdr[1] = LS_64(IRDMA_OP_TYPE_SEND, IRDMA_UDA_QPSQ_OPCODE) |
LS_64(1, IRDMA_UDA_QPSQ_SIGCOMPL) |
LS_64(info->do_lpb, IRDMA_UDA_QPSQ_DOLOOPBACK) |
LS_64(qp->qp_uk.swqe_polarity, IRDMA_UDA_QPSQ_VALID);
hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
/* Forth line of WQE descriptor */
set_64bit_val(wqe, IRDMA_BYTE_0, info->paddr);
set_64bit_val(wqe, IRDMA_BYTE_8,
LS_64(info->len, IRDMAQPSQ_GEN1_FRAG_LEN));
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
}
set_64bit_val(wqe, IRDMA_BYTE_16, hdr[0]);
@ -606,27 +607,27 @@ irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
set_64bit_val(qp_ctx, IRDMA_BYTE_24,
LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) |
LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE));
FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
set_64bit_val(qp_ctx, IRDMA_BYTE_48,
LS_64(rsrc->buf_size, IRDMAQPC_SNDMSS));
FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
set_64bit_val(qp_ctx, IRDMA_BYTE_56, 0);
if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
set_64bit_val(qp_ctx, IRDMA_BYTE_64, 1);
set_64bit_val(qp_ctx, IRDMA_BYTE_136,
LS_64(rsrc->cq_id, IRDMAQPC_TXCQNUM) |
LS_64(rsrc->cq_id, IRDMAQPC_RXCQNUM));
FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
LS_64(rsrc->stats_idx, IRDMAQPC_STAT_INDEX));
FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
LS_64(1, IRDMAQPC_PRIVEN) |
LS_64(rsrc->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE));
FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
set_64bit_val(qp_ctx, IRDMA_BYTE_168,
LS_64((uintptr_t)qp, IRDMAQPC_QPCOMPCTX));
FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
set_64bit_val(qp_ctx, IRDMA_BYTE_176,
LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) |
LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) |
LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE));
FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
irdma_debug_buf(rsrc->dev, IRDMA_DEBUG_PUDA, "PUDA QP CONTEXT", qp_ctx,
IRDMA_QP_CTX_SIZE);
@ -655,11 +656,11 @@ irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) |
LS_64(IRDMA_QP_TYPE_UDA, IRDMA_CQPSQ_QP_QPTYPE) |
LS_64(1, IRDMA_CQPSQ_QP_CQNUMVALID) |
LS_64(2, IRDMA_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@ -768,20 +769,19 @@ irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
set_64bit_val(wqe, IRDMA_BYTE_16,
LS_64(cq->shadow_read_threshold,
IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
set_64bit_val(wqe, IRDMA_BYTE_32, cq->cq_pa);
set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
set_64bit_val(wqe, IRDMA_BYTE_56,
LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) |
LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX));
FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
hdr = cq->cq_uk.cq_id |
LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) |
LS_64(1, IRDMA_CQPSQ_CQ_CHKOVERFLOW) |
LS_64(1, IRDMA_CQPSQ_CQ_ENCEQEMASK) |
LS_64(1, IRDMA_CQPSQ_CQ_CEQIDVALID) |
LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID);
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
@ -962,13 +962,13 @@ irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
case PUDA_QP_CREATED:
irdma_qp_rem_qos(&rsrc->qp);
if (!(reset || dev->no_cqp))
if (!reset)
irdma_puda_free_qp(rsrc);
irdma_free_dma_mem(dev->hw, &rsrc->qpmem);
/* fallthrough */
case PUDA_CQ_CREATED:
if (!(reset || dev->no_cqp))
if (!reset)
irdma_puda_free_cq(rsrc);
irdma_free_dma_mem(dev->hw, &rsrc->cqmem);
@ -1007,7 +1007,7 @@ irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
unsigned long flags;
buf_mem.size = count * sizeof(struct irdma_puda_buf);
buf_mem.va = kzalloc(buf_mem.size, GFP_ATOMIC);
buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
if (!buf_mem.va) {
irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
"error virt_mem for buf\n");
@ -1115,7 +1115,7 @@ irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
return -EOPNOTSUPP;
}
vmem->size = pudasize + sqwridsize + rqwridsize;
vmem->va = kzalloc(vmem->size, GFP_ATOMIC);
vmem->va = kzalloc(vmem->size, GFP_KERNEL);
if (!vmem->va)
return -ENOMEM;
@ -1224,16 +1224,16 @@ irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
get_64bit_val(wqe, IRDMA_BYTE_8, &offset8);
if (offset24)
offset8 &= ~LS_64(1, IRDMAQPSQ_VALID);
offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
else
offset8 |= LS_64(1, IRDMAQPSQ_VALID);
offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
set_64bit_val(wqe, IRDMA_BYTE_8, offset8);
irdma_wmb(); /* make sure WQE is written before valid bit is set */
}
if (offset24)
offset24 = 0;
else
offset24 = LS_64(1, IRDMAQPSQ_VALID);
offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
set_64bit_val(wqe, IRDMA_BYTE_24, offset24);
}

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -35,7 +35,9 @@
#ifndef IRDMA_TYPE_H
#define IRDMA_TYPE_H
#include "osdep.h"
#include "irdma.h"
#include "irdma_user.h"
#include "irdma_hmc.h"
@ -132,11 +134,6 @@ enum irdma_term_mpa_errors {
MPA_REQ_RSP = 0x04,
};
enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR,
};
enum irdma_hw_stats_index {
/* gen1 - 32-bit */
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
@ -176,22 +173,21 @@ enum irdma_hw_stats_index {
IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 36, /* Must be same value as next entry */
/* gen2 - 64-bit */
IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
/* gen2 - 32-bit */
IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 36,
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 37,
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 38,
/* gen2 - 64-bit */
IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 39,
IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 40,
IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 41,
IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 42,
IRDMA_HW_STAT_INDEX_UDPRXPKTS = 43,
IRDMA_HW_STAT_INDEX_UDPTXPKTS = 44,
IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 45,
IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
};
@ -331,8 +327,8 @@ struct irdma_hw_stat_map {
struct irdma_stats_gather_info {
bool use_hmc_fcn_index:1;
bool use_stats_inst:1;
u8 hmc_fcn_index;
u8 stats_inst_index;
u16 hmc_fcn_index;
u16 stats_inst_index;
struct irdma_dma_mem stats_buff_mem;
void *gather_stats_va;
void *last_gather_stats_va;
@ -524,14 +520,14 @@ struct irdma_sc_qp {
struct irdma_stats_inst_info {
bool use_hmc_fcn_index;
u8 hmc_fn_id;
u8 stats_idx;
u16 hmc_fn_id;
u16 stats_idx;
};
struct irdma_up_info {
u8 map[8];
u8 cnp_up_override;
u8 hmc_fcn_idx;
u16 hmc_fcn_idx;
bool use_vlan:1;
bool use_cnp_up_override:1;
};
@ -585,7 +581,7 @@ struct irdma_config_check {
u16 qs_handle;
};
#define IRDMA_INVALID_FCN_ID 0xff
#define IRDMA_INVALID_STATS_IDX 0xff
struct irdma_sc_vsi {
u16 vsi_idx;
struct irdma_sc_dev *dev;
@ -598,11 +594,10 @@ struct irdma_sc_vsi {
struct irdma_puda_rsrc *ieq;
u32 exception_lan_q;
u16 mtu;
u16 vm_id;
u8 fcn_id;
enum irdma_vm_vf_type vm_vf_type;
bool stats_fcn_id_alloc:1;
bool stats_inst_alloc:1;
bool tc_change_pending:1;
bool mtu_change_pending:1;
struct irdma_vsi_pestat *pestat;
ATOMIC qp_suspend_reqs;
int (*register_qset)(struct irdma_sc_vsi *vsi,
@ -611,18 +606,17 @@ struct irdma_sc_vsi {
struct irdma_ws_node *tc_node);
struct irdma_config_check cfg_check[IRDMA_MAX_USER_PRIORITY];
bool tc_print_warning[IRDMA_MAX_TRAFFIC_CLASS];
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
u8 qos_rel_bw;
u8 qos_prio_type;
u16 stats_idx;
u8 dscp_map[IRDMA_DSCP_NUM_VAL];
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
bool dscp_mode:1;
};
struct irdma_sc_dev {
struct list_head cqp_cmd_head; /* head of the CQP command list */
bool volatile no_cqp;
spinlock_t cqp_lock; /* protect CQP list access */
bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
u64 fpm_query_buf_pa;
u64 fpm_commit_buf_pa;
@ -640,7 +634,7 @@ struct irdma_sc_dev {
u32 IOMEM *hw_regs[IRDMA_MAX_REGS];
u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
u64 hw_masks[IRDMA_MAX_MASKS];
u64 hw_shifts[IRDMA_MAX_SHIFTS];
u8 hw_shifts[IRDMA_MAX_SHIFTS];
const struct irdma_hw_stat_map *hw_stats_map;
u64 feature_info[IRDMA_MAX_FEATURES];
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
@ -656,7 +650,7 @@ struct irdma_sc_dev {
struct mutex ws_mutex; /* ws tree mutex */
u32 debug_mask;
u16 num_vfs;
u8 hmc_fn_id;
u16 hmc_fn_id;
u8 vf_id;
bool vchnl_up:1;
bool ceq_valid:1;
@ -750,7 +744,6 @@ struct irdma_vsi_init_info {
u16 exception_lan_q;
u16 pf_data_vsi_num;
enum irdma_vm_vf_type vm_vf_type;
u16 vm_id;
int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
@ -760,7 +753,7 @@ struct irdma_vsi_init_info {
struct irdma_vsi_stats_info {
struct irdma_vsi_pestat *pestat;
u8 fcn_id;
bool alloc_fcn_id;
bool alloc_stats_inst;
};
struct irdma_device_init_info {
@ -771,7 +764,7 @@ struct irdma_device_init_info {
struct irdma_hw *hw;
void IOMEM *bar0;
u16 max_vfs;
u8 hmc_fn_id;
u16 hmc_fn_id;
u32 debug_mask;
};
@ -852,10 +845,9 @@ struct irdma_udp_offload_info {
struct irdma_roce_offload_info {
u16 p_key;
u16 err_rq_idx;
u32 err_rq_idx;
u32 qkey;
u32 dest_qp;
u32 local_qp;
u8 roce_tver;
u8 ack_credits;
u8 err_rq_idx_valid;
@ -888,7 +880,7 @@ struct irdma_iwarp_offload_info {
u8 ddp_ver;
u8 rdmap_ver;
u8 iwarp_mode;
u16 err_rq_idx;
u32 err_rq_idx;
u32 pd_id;
u16 ord_size;
u16 ird_size;
@ -976,7 +968,7 @@ struct irdma_qp_host_ctx_info {
u32 send_cq_num;
u32 rcv_cq_num;
u32 rem_endpoint_idx;
u8 stats_idx;
u16 stats_idx;
bool srq_valid:1;
bool tcp_info_valid:1;
bool iwarp_info_valid:1;
@ -987,8 +979,8 @@ struct irdma_qp_host_ctx_info {
struct irdma_aeqe_info {
u64 compl_ctx;
u32 qp_cq_id;
u32 wqe_idx;
u16 ae_id;
u16 wqe_idx;
u8 tcp_state;
u8 iwarp_state;
bool qp:1;
@ -1013,7 +1005,8 @@ struct irdma_allocate_stag_info {
bool remote_access:1;
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
u8 hmc_fcn_index;
bool all_memory:1;
u16 hmc_fcn_index;
};
struct irdma_mw_alloc_info {
@ -1038,8 +1031,9 @@ struct irdma_reg_ns_stag_info {
u32 pd_id;
irdma_stag_key stag_key;
bool use_hmc_fcn_index:1;
u8 hmc_fcn_index;
u16 hmc_fcn_index;
bool use_pf_rid:1;
bool all_memory:1;
};
struct irdma_fast_reg_stag_info {
@ -1061,7 +1055,7 @@ struct irdma_fast_reg_stag_info {
bool signaled:1;
bool push_wqe:1;
bool use_hmc_fcn_index:1;
u8 hmc_fcn_index;
u16 hmc_fcn_index;
bool use_pf_rid:1;
bool defer_flag:1;
};
@ -1231,7 +1225,7 @@ void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev);
int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp);
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
struct irdma_cqp_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
@ -1249,10 +1243,10 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
struct irdma_modify_qp_info *info, u64 scratch,
bool post_sq);
int irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag);
int irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
int irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag);
void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
@ -1260,8 +1254,9 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq);
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
u16 hmc_fn_id, bool post_sq,
bool poll_registers);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
@ -1411,7 +1406,7 @@ struct cqp_info {
struct irdma_sc_cqp *cqp;
void *fpm_val_va;
u64 fpm_val_pa;
u8 hmc_fn_id;
u16 hmc_fn_id;
u64 scratch;
} query_fpm_val;
@ -1419,7 +1414,7 @@ struct cqp_info {
struct irdma_sc_cqp *cqp;
void *fpm_val_va;
u64 fpm_val_pa;
u8 hmc_fn_id;
u16 hmc_fn_id;
u64 scratch;
} commit_fpm_val;

View File

@ -66,35 +66,35 @@ irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
LS_64_1(info->mac_addr[1], 48) |
LS_64_1(info->mac_addr[0], 56));
qw1 = LS_64(info->pd_idx, IRDMA_UDA_CQPSQ_MAV_PDINDEXLO) |
LS_64(info->tc_tos, IRDMA_UDA_CQPSQ_MAV_TC) |
LS_64(info->vlan_tag, IRDMA_UDAQPC_VLANTAG);
qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
qw2 = LS_64(info->dst_arpindex, IRDMA_UDA_CQPSQ_MAV_ARPINDEX) |
LS_64(info->flow_label, IRDMA_UDA_CQPSQ_MAV_FLOWLABEL) |
LS_64(info->hop_ttl, IRDMA_UDA_CQPSQ_MAV_HOPLIMIT) |
LS_64(info->pd_idx >> 16, IRDMA_UDA_CQPSQ_MAV_PDINDEXHI);
qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_40,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
LS_64(info->dest_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
set_64bit_val(wqe, IRDMA_BYTE_32,
LS_64(info->dest_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
LS_64(info->dest_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
set_64bit_val(wqe, IRDMA_BYTE_56,
LS_64(info->src_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
LS_64(info->src_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->src_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
LS_64(info->src_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
} else {
set_64bit_val(wqe, IRDMA_BYTE_32,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->src_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
}
set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
@ -104,13 +104,12 @@ irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
set_64bit_val(
wqe, IRDMA_BYTE_24,
LS_64(cqp->polarity, IRDMA_UDA_CQPSQ_MAV_WQEVALID) |
LS_64(op, IRDMA_UDA_CQPSQ_MAV_OPCODE) |
LS_64(info->do_lpbk, IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK) |
LS_64(info->ipv4_valid, IRDMA_UDA_CQPSQ_MAV_IPV4VALID) |
LS_64(info->ah_idx, IRDMA_UDA_CQPSQ_MAV_AVIDX) |
LS_64(info->insert_vlan_tag,
IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_AH WQE", wqe,
IRDMA_CQP_WQE_SIZE * 8);
@ -137,9 +136,9 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
if (entry_info->valid_entry) {
set_64bit_val((__le64 *) info->dma_mem_mc.va,
ctx_idx * sizeof(u64),
LS_64(entry_info->dest_port, IRDMA_UDA_MGCTX_DESTPORT) |
LS_64(entry_info->valid_entry, IRDMA_UDA_MGCTX_VALIDENT) |
LS_64(entry_info->qp_id, IRDMA_UDA_MGCTX_QPID));
FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
ctx_idx++;
}
}
@ -179,8 +178,8 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
set_64bit_val(wqe, IRDMA_BYTE_32, info->dma_mem_mc.pa);
set_64bit_val(wqe, IRDMA_BYTE_16,
LS_64(info->vlan_id, IRDMA_UDA_CQPSQ_MG_VLANID) |
LS_64(info->qs_handle, IRDMA_UDA_CQPSQ_QS_HANDLE));
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->dest_mac_addr[5], 0) |
LS_64_1(info->dest_mac_addr[4], 8) |
LS_64_1(info->dest_mac_addr[3], 16) |
@ -188,28 +187,28 @@ irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
LS_64_1(info->dest_mac_addr[1], 32) |
LS_64_1(info->dest_mac_addr[0], 40));
set_64bit_val(wqe, IRDMA_BYTE_8,
LS_64(info->hmc_fcn_id, IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID));
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_56,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
LS_64(info->dest_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->dest_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
LS_64(info->dest_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
} else {
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
}
irdma_wmb(); /* need write memory block before writing the WQE header. */
set_64bit_val(wqe, IRDMA_BYTE_24,
LS_64(cqp->polarity, IRDMA_UDA_CQPSQ_MG_WQEVALID) |
LS_64(op, IRDMA_UDA_CQPSQ_MG_OPCODE) |
LS_64(info->mg_id, IRDMA_UDA_CQPSQ_MG_MGIDX) |
LS_64(info->vlan_valid, IRDMA_UDA_CQPSQ_MG_VLANVALID) |
LS_64(info->ipv4_valid, IRDMA_UDA_CQPSQ_MG_IPV4VALID));
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_MCG WQE", wqe,
IRDMA_CQP_WQE_SIZE * 8);

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2019 Intel Corporation
* Copyright (c) 2016 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -35,381 +35,220 @@
#ifndef IRDMA_UDA_D_H
#define IRDMA_UDA_D_H
/* L4 packet type */
#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
#define IRDMA_E_UDA_SQ_L4T_TCP 1
#define IRDMA_E_UDA_SQ_L4T_SCTP 2
#define IRDMA_E_UDA_SQ_L4T_UDP 3
/* Inner IP header type */
#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
/* UDA defined fields for transmit descriptors */
#define IRDMA_UDA_QPSQ_PUSHWQE_S 56
#define IRDMA_UDA_QPSQ_PUSHWQE_M BIT_ULL(IRDMA_UDA_QPSQ_PUSHWQE_S)
#define IRDMA_UDA_QPSQ_PUSHWQE BIT_ULL(56)
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG_S 57
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG_M \
BIT_ULL(IRDMA_UDA_QPSQ_INLINEDATAFLAG_S)
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_UDA_QPSQ_INLINEDATALEN_S 48
#define IRDMA_UDA_QPSQ_INLINEDATALEN_M \
((u64)0xff << IRDMA_UDA_QPSQ_INLINEDATALEN_S)
#define IRDMA_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
#define IRDMA_UDA_QPSQ_ADDFRAGCNT_S 38
#define IRDMA_UDA_QPSQ_ADDFRAGCNT_M \
((u64)0x0F << IRDMA_UDA_QPSQ_ADDFRAGCNT_S)
#define IRDMA_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS_S 42
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS_M \
((u64)0x3 << IRDMA_UDA_QPSQ_IPFRAGFLAGS_S)
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42)
#define IRDMA_UDA_QPSQ_NOCHECKSUM_S 45
#define IRDMA_UDA_QPSQ_NOCHECKSUM_M \
BIT_ULL(IRDMA_UDA_QPSQ_NOCHECKSUM_S)
#define IRDMA_UDA_QPSQ_NOCHECKSUM BIT_ULL(45)
#define IRDMA_UDA_QPSQ_AHIDXVALID_S 46
#define IRDMA_UDA_QPSQ_AHIDXVALID_M \
BIT_ULL(IRDMA_UDA_QPSQ_AHIDXVALID_S)
#define IRDMA_UDA_QPSQ_AHIDXVALID BIT_ULL(46)
#define IRDMA_UDA_QPSQ_LOCAL_FENCE_S 61
#define IRDMA_UDA_QPSQ_LOCAL_FENCE_M \
BIT_ULL(IRDMA_UDA_QPSQ_LOCAL_FENCE_S)
#define IRDMA_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61)
#define IRDMA_UDA_QPSQ_AHIDX_S 0
#define IRDMA_UDA_QPSQ_AHIDX_M ((u64)0x1ffff << IRDMA_UDA_QPSQ_AHIDX_S)
#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_QPSQ_PROTOCOL_S 16
#define IRDMA_UDA_QPSQ_PROTOCOL_M \
((u64)0xff << IRDMA_UDA_QPSQ_PROTOCOL_S)
#define IRDMA_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_EXTHDRLEN_S 32
#define IRDMA_UDA_QPSQ_EXTHDRLEN_M \
((u64)0x1ff << IRDMA_UDA_QPSQ_EXTHDRLEN_S)
#define IRDMA_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32)
#define IRDMA_UDA_QPSQ_MULTICAST_S 63
#define IRDMA_UDA_QPSQ_MULTICAST_M \
BIT_ULL(IRDMA_UDA_QPSQ_MULTICAST_S)
#define IRDMA_UDA_QPSQ_MULTICAST BIT_ULL(63)
#define IRDMA_UDA_QPSQ_MACLEN_S 56
#define IRDMA_UDA_QPSQ_MACLEN_M \
((u64)0x7f << IRDMA_UDA_QPSQ_MACLEN_S)
#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
#define IRDMA_UDA_QPSQ_IPLEN_S 48
#define IRDMA_UDA_QPSQ_IPLEN_M \
((u64)0x7f << IRDMA_UDA_QPSQ_IPLEN_S)
#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
#define IRDMA_UDA_QPSQ_L4T_S 30
#define IRDMA_UDA_QPSQ_L4T_M ((u64)0x3 << IRDMA_UDA_QPSQ_L4T_S)
#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
#define IRDMA_UDA_QPSQ_L4T_LINE 2
#define IRDMA_UDA_QPSQ_IIPT_S 28
#define IRDMA_UDA_QPSQ_IIPT_M ((u64)0x3 << IRDMA_UDA_QPSQ_IIPT_S)
#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
#define IRDMA_UDA_QPSQ_IIPT_LINE 2
#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_S 45
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_M \
BIT_ULL(IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_S)
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45)
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
#define IRDMA_UDA_QPSQ_IMMDATA_S 0
#define IRDMA_UDA_QPSQ_IMMDATA_M \
((u64)0xffffffffffffffff << IRDMA_UDA_QPSQ_IMMDATA_S)
#define IRDMA_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0)
/* Byte Offset 0 */
#define IRDMA_UDAQPC_IPV4_S 3
#define IRDMA_UDAQPC_IPV4_M BIT_ULL(IRDMAQPC_IPV4_S)
#define IRDMA_UDAQPC_IPV4 BIT_ULL(3)
#define IRDMA_UDAQPC_INSERTVLANTAG_S 5
#define IRDMA_UDAQPC_INSERTVLANTAG_M BIT_ULL(IRDMA_UDAQPC_INSERTVLANTAG_S)
#define IRDMA_UDAQPC_INSERTVLANTAG BIT_ULL(5)
#define IRDMA_UDAQPC_ISQP1_S 6
#define IRDMA_UDAQPC_ISQP1_M BIT_ULL(IRDMA_UDAQPC_ISQP1_S)
#define IRDMA_UDAQPC_ISQP1 BIT_ULL(6)
#define IRDMA_UDAQPC_RQWQESIZE_S IRDMAQPC_RQWQESIZE_S
#define IRDMA_UDAQPC_RQWQESIZE_M IRDMAQPC_RQWQESIZE_M
#define IRDMA_UDAQPC_RQWQESIZE IRDMAQPC_RQWQESIZE
#define IRDMA_UDAQPC_ECNENABLE_S 14
#define IRDMA_UDAQPC_ECNENABLE_M BIT_ULL(IRDMA_UDAQPC_ECNENABLE_S)
#define IRDMA_UDAQPC_ECNENABLE BIT_ULL(14)
#define IRDMA_UDAQPC_PDINDEXHI_S 20
#define IRDMA_UDAQPC_PDINDEXHI_M ((u64)3 << IRDMA_UDAQPC_PDINDEXHI_S)
#define IRDMA_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20)
#define IRDMA_UDAQPC_DCTCPENABLE_S 25
#define IRDMA_UDAQPC_DCTCPENABLE_M BIT_ULL(IRDMA_UDAQPC_DCTCPENABLE_S)
#define IRDMA_UDAQPC_DCTCPENABLE BIT_ULL(25)
#define IRDMA_UDAQPC_RCVTPHEN_S IRDMAQPC_RCVTPHEN_S
#define IRDMA_UDAQPC_RCVTPHEN_M IRDMAQPC_RCVTPHEN_M
#define IRDMA_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN
#define IRDMA_UDAQPC_XMITTPHEN_S IRDMAQPC_XMITTPHEN_S
#define IRDMA_UDAQPC_XMITTPHEN_M IRDMAQPC_XMITTPHEN_M
#define IRDMA_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN
#define IRDMA_UDAQPC_RQTPHEN_S IRDMAQPC_RQTPHEN_S
#define IRDMA_UDAQPC_RQTPHEN_M IRDMAQPC_RQTPHEN_M
#define IRDMA_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN
#define IRDMA_UDAQPC_SQTPHEN_S IRDMAQPC_SQTPHEN_S
#define IRDMA_UDAQPC_SQTPHEN_M IRDMAQPC_SQTPHEN_M
#define IRDMA_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN
#define IRDMA_UDAQPC_PPIDX_S IRDMAQPC_PPIDX_S
#define IRDMA_UDAQPC_PPIDX_M IRDMAQPC_PPIDX_M
#define IRDMA_UDAQPC_PPIDX IRDMAQPC_PPIDX
#define IRDMA_UDAQPC_PMENA_S IRDMAQPC_PMENA_S
#define IRDMA_UDAQPC_PMENA_M IRDMAQPC_PMENA_M
#define IRDMA_UDAQPC_PMENA IRDMAQPC_PMENA
#define IRDMA_UDAQPC_INSERTTAG2_S 11
#define IRDMA_UDAQPC_INSERTTAG2_M BIT_ULL(IRDMA_UDAQPC_INSERTTAG2_S)
#define IRDMA_UDAQPC_INSERTTAG2 BIT_ULL(11)
#define IRDMA_UDAQPC_INSERTTAG3_S 14
#define IRDMA_UDAQPC_INSERTTAG3_M BIT_ULL(IRDMA_UDAQPC_INSERTTAG3_S)
#define IRDMA_UDAQPC_INSERTTAG3 BIT_ULL(14)
#define IRDMA_UDAQPC_RQSIZE_S IRDMAQPC_RQSIZE_S
#define IRDMA_UDAQPC_RQSIZE_M IRDMAQPC_RQSIZE_M
#define IRDMA_UDAQPC_RQSIZE IRDMAQPC_RQSIZE
#define IRDMA_UDAQPC_SQSIZE_S IRDMAQPC_SQSIZE_S
#define IRDMA_UDAQPC_SQSIZE_M IRDMAQPC_SQSIZE_M
#define IRDMA_UDAQPC_SQSIZE IRDMAQPC_SQSIZE
#define IRDMA_UDAQPC_TXCQNUM_S IRDMAQPC_TXCQNUM_S
#define IRDMA_UDAQPC_TXCQNUM_M IRDMAQPC_TXCQNUM_M
#define IRDMA_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM
#define IRDMA_UDAQPC_RXCQNUM_S IRDMAQPC_RXCQNUM_S
#define IRDMA_UDAQPC_RXCQNUM_M IRDMAQPC_RXCQNUM_M
#define IRDMA_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM
#define IRDMA_UDAQPC_QPCOMPCTX_S IRDMAQPC_QPCOMPCTX_S
#define IRDMA_UDAQPC_QPCOMPCTX_M IRDMAQPC_QPCOMPCTX_M
#define IRDMA_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX
#define IRDMA_UDAQPC_SQTPHVAL_S IRDMAQPC_SQTPHVAL_S
#define IRDMA_UDAQPC_SQTPHVAL_M IRDMAQPC_SQTPHVAL_M
#define IRDMA_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL
#define IRDMA_UDAQPC_RQTPHVAL_S IRDMAQPC_RQTPHVAL_S
#define IRDMA_UDAQPC_RQTPHVAL_M IRDMAQPC_RQTPHVAL_M
#define IRDMA_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL
#define IRDMA_UDAQPC_QSHANDLE_S IRDMAQPC_QSHANDLE_S
#define IRDMA_UDAQPC_QSHANDLE_M IRDMAQPC_QSHANDLE_M
#define IRDMA_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE_S 48
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE_M \
((u64)0x3 << IRDMA_UDAQPC_RQHDRRINGBUFSIZE_S)
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48)
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE_S 32
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE_M \
((u64)0x3 << IRDMA_UDAQPC_SQHDRRINGBUFSIZE_S)
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32)
#define IRDMA_UDAQPC_PRIVILEGEENABLE_S 25
#define IRDMA_UDAQPC_PRIVILEGEENABLE_M \
BIT_ULL(IRDMA_UDAQPC_PRIVILEGEENABLE_S)
#define IRDMA_UDAQPC_PRIVILEGEENABLE BIT_ULL(25)
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_S 26
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_M \
BIT_ULL(IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_S)
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26)
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_S 0
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_M \
((u64)0x7F << IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_S)
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0)
#define IRDMA_UDAQPC_PRIVHDRGENENABLE_S 0
#define IRDMA_UDAQPC_PRIVHDRGENENABLE_M \
BIT_ULL(IRDMA_UDAQPC_PRIVHDRGENENABLE_S)
#define IRDMA_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0)
#define IRDMA_UDAQPC_RQHDRSPLITENABLE_S 3
#define IRDMA_UDAQPC_RQHDRSPLITENABLE_M \
BIT_ULL(IRDMA_UDAQPC_RQHDRSPLITENABLE_S)
#define IRDMA_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3)
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE_S 2
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE_M \
BIT_ULL(IRDMA_UDAQPC_RQHDRRINGBUFENABLE_S)
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2)
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE_S 1
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE_M \
BIT_ULL(IRDMA_UDAQPC_SQHDRRINGBUFENABLE_S)
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1)
#define IRDMA_UDAQPC_IPID_S 32
#define IRDMA_UDAQPC_IPID_M ((u64)0xffff << IRDMA_UDAQPC_IPID_S)
#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
#define IRDMA_UDAQPC_SNDMSS_S 16
#define IRDMA_UDAQPC_SNDMSS_M ((u64)0x3fff << IRDMA_UDAQPC_SNDMSS_S)
#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMA_UDAQPC_VLANTAG_S 0
#define IRDMA_UDAQPC_VLANTAG_M ((u64)0xffff << IRDMA_UDAQPC_VLANTAG_S)
/* Address Handle */
#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_S 20
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_M \
((u64)0x3 << IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_S)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_S 48
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_S)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S 24
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_M \
((u64)0x3f << IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX_S 48
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_MAV_ARPINDEX_S)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_TC_S 32
#define IRDMA_UDA_CQPSQ_MAV_TC_M ((u64)0xff << IRDMA_UDA_CQPSQ_MAV_TC_S)
#define IRDMA_UDA_CQPSQ_MAV_TC GENMASK_ULL(39, 32)
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_S 32
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_M \
((u64)0xff << IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_S)
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32)
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_S 0
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_M \
((u64)0xfffff << IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_S)
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0)
#define IRDMA_UDA_CQPSQ_MAV_ADDR0_S 32
#define IRDMA_UDA_CQPSQ_MAV_ADDR0_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR0_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_MAV_ADDR1_S 0
#define IRDMA_UDA_CQPSQ_MAV_ADDR1_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR1_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_MAV_ADDR2_S 32
#define IRDMA_UDA_CQPSQ_MAV_ADDR2_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR2_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_MAV_ADDR3_S 0
#define IRDMA_UDA_CQPSQ_MAV_ADDR3_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR3_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID_S 63
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MAV_WQEVALID_S)
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MAV_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_MAV_OPCODE_M \
((u64)0x3f << IRDMA_UDA_CQPSQ_MAV_OPCODE_S)
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S 62
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S 59
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_MAV_AVIDX_S 0
#define IRDMA_UDA_CQPSQ_MAV_AVIDX_M \
((u64)0x1ffff << IRDMA_UDA_CQPSQ_MAV_AVIDX_S)
#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_S 60
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_M BIT_ULL(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_S)
/* UDA multicast group */
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
#define IRDMA_UDA_MGCTX_VFFLAG_S 29
#define IRDMA_UDA_MGCTX_VFFLAG_M BIT_ULL(IRDMA_UDA_MGCTX_VFFLAG_S)
#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
#define IRDMA_UDA_MGCTX_DESTPORT_S 32
#define IRDMA_UDA_MGCTX_DESTPORT_M ((u64)0xffff << IRDMA_UDA_MGCTX_DESTPORT_S)
#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
#define IRDMA_UDA_MGCTX_VFID_S 22
#define IRDMA_UDA_MGCTX_VFID_M ((u64)0x7f << IRDMA_UDA_MGCTX_VFID_S)
#define IRDMA_UDA_MGCTX_VFID GENMASK_ULL(28, 22)
#define IRDMA_UDA_MGCTX_VALIDENT_S 31
#define IRDMA_UDA_MGCTX_VALIDENT_M BIT_ULL(IRDMA_UDA_MGCTX_VALIDENT_S)
#define IRDMA_UDA_MGCTX_VALIDENT BIT_ULL(31)
#define IRDMA_UDA_MGCTX_PFID_S 18
#define IRDMA_UDA_MGCTX_PFID_M ((u64)0xf << IRDMA_UDA_MGCTX_PFID_S)
#define IRDMA_UDA_MGCTX_PFID GENMASK_ULL(21, 18)
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_S 30
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_M \
BIT_ULL(IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_S)
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30)
#define IRDMA_UDA_MGCTX_QPID_S 0
#define IRDMA_UDA_MGCTX_QPID_M ((u64)0x3ffff << IRDMA_UDA_MGCTX_QPID_S)
/* multicast group create CQP command */
#define IRDMA_UDA_MGCTX_QPID GENMASK_ULL(17, 0)
#define IRDMA_UDA_CQPSQ_MG_WQEVALID_S 63
#define IRDMA_UDA_CQPSQ_MG_WQEVALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MG_WQEVALID_S)
#define IRDMA_UDA_CQPSQ_MG_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_MG_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_MG_OPCODE_M ((u64)0x3f << IRDMA_UDA_CQPSQ_MG_OPCODE_S)
#define IRDMA_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MG_MGIDX_S 0
#define IRDMA_UDA_CQPSQ_MG_MGIDX_M ((u64)0x1fff << IRDMA_UDA_CQPSQ_MG_MGIDX_S)
#define IRDMA_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(12, 0)
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID_S 60
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID_M BIT_ULL(IRDMA_UDA_CQPSQ_MG_IPV4VALID_S)
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(60)
#define IRDMA_UDA_CQPSQ_MG_VLANVALID_S 59
#define IRDMA_UDA_CQPSQ_MG_VLANVALID_M BIT_ULL(IRDMA_UDA_CQPSQ_MG_VLANVALID_S)
#define IRDMA_UDA_CQPSQ_MG_VLANVALID BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_S 0
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_M ((u64)0x3F << IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_S)
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0)
#define IRDMA_UDA_CQPSQ_MG_VLANID_S 32
#define IRDMA_UDA_CQPSQ_MG_VLANID_M ((u64)0xFFF << IRDMA_UDA_CQPSQ_MG_VLANID_S)
#define IRDMA_UDA_CQPSQ_MG_VLANID GENMASK_ULL(43, 32)
#define IRDMA_UDA_CQPSQ_QS_HANDLE_S 0
#define IRDMA_UDA_CQPSQ_QS_HANDLE_M ((u64)0x3FF << IRDMA_UDA_CQPSQ_QS_HANDLE_S)
/* Quad hash table */
#define IRDMA_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0)
#define IRDMA_UDA_CQPSQ_QHASH_QPN_S 32
#define IRDMA_UDA_CQPSQ_QHASH_QPN_M \
((u64)0x3ffff << IRDMA_UDA_CQPSQ_QHASH_QPN_S)
#define IRDMA_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
#define IRDMA_UDA_CQPSQ_QHASH__S 0
#define IRDMA_UDA_CQPSQ_QHASH__M BIT_ULL(IRDMA_UDA_CQPSQ_QHASH__S)
#define IRDMA_UDA_CQPSQ_QHASH_ BIT_ULL(0)
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_S 16
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_S)
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_S 0
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_S)
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0_S 32
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR0_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1_S 0
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR1_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2_S 32
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR2_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3_S 0
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR3_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID_S 63
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_QHASH_WQEVALID_S)
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE_M \
((u64)0x3f << IRDMA_UDA_CQPSQ_QHASH_OPCODE_S)
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE_S 61
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE_M \
((u64)0x3 << IRDMA_UDA_CQPSQ_QHASH_MANAGE_S)
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_S 60
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_M \
((u64)0x1 << IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_S)
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60)
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD_S 59
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD_M \
((u64)0x1 << IRDMA_UDA_CQPSQ_QHASH_LANFWD_S)
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD BIT_ULL(59)
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_S 42
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_M \
((u64)0x7 << IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_S)
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
#endif /* IRDMA_UDA_D_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -51,7 +51,7 @@
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
@ -78,7 +78,97 @@
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
#define IRDMA_FLUSH_MAJOR_ERR 1
#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
/* Async Events codes */
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
#define IRDMA_AE_AMP_BAD_QP 0x0104
#define IRDMA_AE_AMP_BAD_PD 0x0105
#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
#define IRDMA_AE_AMP_TO_WRAP 0x010a
#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
#define IRDMA_AE_BAD_CLOSE 0x0201
#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
#define IRDMA_AE_DDP_NO_L_BIT 0x0308
#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
#define IRDMA_AE_RESET_NOT_SENT 0x0603
#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@ -119,6 +209,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@ -135,9 +226,15 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
FLUSH_RETRY_EXC_ERR,
};
enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR,
IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@ -193,7 +290,7 @@ struct irdma_sge {
struct irdma_ring {
volatile u32 head;
volatile u32 tail;
volatile u32 tail; /* effective tail */
u32 size;
};
@ -213,14 +310,6 @@ struct irdma_post_send {
u32 ah_id;
};
struct irdma_post_inline_send {
void *data;
u32 len;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
@ -233,12 +322,6 @@ struct irdma_rdma_write {
struct irdma_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
struct irdma_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
@ -281,8 +364,6 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
struct irdma_inline_rdma_write inline_rdma_write;
struct irdma_post_inline_send inline_send;
} op;
};
@ -290,7 +371,6 @@ struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
@ -301,6 +381,7 @@ struct irdma_cq_poll_info {
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
@ -310,6 +391,17 @@ struct irdma_cq_poll_info {
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
union {
u32 tcp_sqn;
u32 roce_psn;
u32 rtt;
u32 raw;
} stat;
};
struct qp_err_code {
enum irdma_flush_opcode flush_code;
enum irdma_qp_event_type event_type;
};
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
@ -334,7 +426,7 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool post_sq);
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
@ -352,6 +444,12 @@ int irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
u8 *rq_shift);
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@ -405,7 +503,6 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
spinlock_t *lock;
bool force_fence;
u8 dbg_rq_flushed;
u16 ord_cnt;
u8 sq_flush_seen;
@ -442,8 +539,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u32 sq_depth;
u32 rq_depth;
u8 first_sq_wq;
u8 type;
u8 sq_shift;
u8 rq_shift;
u8 rd_fence_rate;
int abi_ver;
bool legacy_mode;
@ -460,7 +561,7 @@ struct irdma_cq_uk_init_info {
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
u16 *quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
@ -469,9 +570,81 @@ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, u32 *srqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
{
struct qp_err_code qp_err = { 0 };
switch (ae_id) {
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG:
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD:
case IRDMA_AE_AMP_BAD_QP:
case IRDMA_AE_AMP_BAD_STAG_KEY:
case IRDMA_AE_AMP_BAD_STAG_INDEX:
case IRDMA_AE_AMP_TO_WRAP:
case IRDMA_AE_PRIV_OPERATION_DENIED:
qp_err.flush_code = FLUSH_PROT_ERR;
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_UDA_XMIT_BAD_PD:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
case IRDMA_AE_UDA_L4LEN_INVALID:
case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
qp_err.flush_code = FLUSH_LOC_LEN_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
qp_err.flush_code = FLUSH_MW_BIND_ERR;
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_IB_INVALID_REQUEST:
qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp_err.flush_code = FLUSH_REM_OP_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp_err.flush_code = FLUSH_FATAL_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
default:
qp_err.flush_code = FLUSH_GENERAL_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
}
return qp_err;
}
#endif /* IRDMA_USER_H */

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -46,7 +46,7 @@ DEFINE_SPINLOCK(irdma_handler_lock);
* @action: modify, delete or add
*/
int
irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, u8 *mac_addr,
irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, const u8 *mac_addr,
u32 action)
{
unsigned long flags;
@ -110,7 +110,7 @@ irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, u8 *mac_addr,
* @mac: MAC address
*/
int
irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac)
irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, const u8 *mac)
{
int arpidx;
@ -128,6 +128,57 @@ irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac)
return irdma_arp_table(rf, ip, NULL, IRDMA_ARP_RESOLVE);
}
/**
* irdma_netdevice_event - system notifier for netdev events
* @notifier: not used
* @event: event for notifier
* @ptr: netdev
*/
int
irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr)
{
struct irdma_device *iwdev;
struct ifnet *netdev = netdev_notifier_info_to_ifp(ptr);
iwdev = container_of(notifier, struct irdma_device, nb_netdevice_event);
if (iwdev->netdev != netdev)
return NOTIFY_DONE;
iwdev->iw_status = 1;
switch (event) {
case NETDEV_DOWN:
iwdev->iw_status = 0;
/* fallthrough */
case NETDEV_UP:
irdma_port_ibevent(iwdev);
break;
default:
break;
}
return NOTIFY_DONE;
}
void
irdma_unregister_notifiers(struct irdma_device *iwdev)
{
unregister_netdevice_notifier(&iwdev->nb_netdevice_event);
}
int
irdma_register_notifiers(struct irdma_device *iwdev)
{
int ret;
iwdev->nb_netdevice_event.notifier_call = irdma_netdevice_event;
ret = register_netdevice_notifier(&iwdev->nb_netdevice_event);
if (ret) {
ibdev_err(&iwdev->ibdev, "register_netdevice_notifier failed\n");
return ret;
}
return ret;
}
/**
* irdma_alloc_and_get_cqp_request - get cqp struct
* @cqp: device cqp ptr
@ -252,7 +303,7 @@ irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
for (i = 0; i < pending_work; i++) {
cqp_request = (struct irdma_cqp_request *)(unsigned long)
cqp_request = (struct irdma_cqp_request *)(uintptr_t)
cqp->scratch_array[wqe_idx];
if (cqp_request)
irdma_free_pending_cqp_request(cqp, cqp_request);
@ -278,20 +329,23 @@ irdma_wait_event(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {0};
int timeout_threshold = CQP_TIMEOUT_THRESHOLD;
bool cqp_error = false;
int err_code = 0;
cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
do {
int wait_time_ms = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms;
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
if (wait_event_timeout(cqp_request->waitq,
cqp_request->request_done,
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
msecs_to_jiffies(wait_time_ms)))
break;
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
if (cqp_timeout.count < timeout_threshold)
continue;
if (!rf->reset) {
@ -304,11 +358,14 @@ irdma_wait_event(struct irdma_pci_f *rf,
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
err_code = -EIO;
if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
cqp_request->compl_info.min_err_code == 0x8029) {
if (!rf->reset) {
rf->reset = true;
rf->gen_ops.request_reset(rf);
if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
if (cqp_request->compl_info.min_err_code == 0x8002) {
err_code = -EBUSY;
} else if (cqp_request->compl_info.min_err_code == 0x8029) {
if (!rf->reset) {
rf->reset = true;
rf->gen_ops.request_reset(rf);
}
}
}
}
@ -366,10 +423,12 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
{0xffff, 0x8002, "Invalid State"},
{0xffff, 0x8006, "Flush No Wqe Pending"},
{0xffff, 0x8007, "Modify QP Bad Close"},
{0xffff, 0x8009, "LLP Closed"},
{0xffff, 0x800a, "Reset Not Sent"}
{0xffff, 0x800a, "Reset Not Sent"},
{0xffff, 0x200, "Failover Pending"}
};
/**
@ -521,16 +580,6 @@ irdma_get_qp(struct ib_device *device, int qpn)
return &iwdev->rf->qp_table[qpn]->ibqp;
}
/**
* irdma_get_hw_addr - return hw addr
* @par: points to shared dev
*/
u8 __iomem * irdma_get_hw_addr(void *par){
struct irdma_sc_dev *dev = par;
return dev->hw->hw_addr;
}
/**
* irdma_remove_cqp_head - return head entry and remove
* @dev: device
@ -713,7 +762,7 @@ irdma_terminate_del_timer(struct irdma_sc_qp *qp)
*/
int
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@ -748,7 +797,7 @@ irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
*/
int
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
struct irdma_dma_mem *val_mem, u16 hmc_fn_id)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@ -1803,7 +1852,7 @@ irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
pchunk->bitmapmem.size = sizeofbitmap >> 3;
pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_ATOMIC);
pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
if (!pchunk->bitmapmem.va)
return -ENOMEM;
@ -2056,6 +2105,9 @@ irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
case IRDMA_QP_EVENT_ACCESS_ERR:
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
break;
case IRDMA_QP_EVENT_REQ_ERR:
ibevent.event = IB_EVENT_QP_REQ_ERR;
break;
}
ibevent.device = iwqp->ibqp.device;
ibevent.element.qp = &iwqp->ibqp;
@ -2168,7 +2220,7 @@ irdma_cq_empty(struct irdma_cq *iwcq)
ukcq = &iwcq->sc_cq.cq_uk;
cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
get_64bit_val(cqe, 24, &qword3);
polarity = (u8)RS_64(qword3, IRDMA_CQ_VALID);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
return polarity != ukcq->polarity;
}
@ -2191,7 +2243,7 @@ irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_
{
struct irdma_cmpl_gen *cmpl;
if (!iwcq || list_empty(&iwcq->cmpl_generated))
if (list_empty(&iwcq->cmpl_generated))
return -ENOENT;
cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
list_del(&cmpl->list);
@ -2226,7 +2278,10 @@ irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
static inline void
irdma_comp_handler(struct irdma_cq *cq)
{
if (cq->sc_cq.cq_uk.armed && cq->ibcq.comp_handler)
if (!cq->ibcq.comp_handler)
return;
if (atomic_cmpxchg(&cq->armed, 1, 0))
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@ -2246,15 +2301,20 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
u32 wqe_idx;
u8 compl_generated = 0;
unsigned long flags;
bool reschedule = false;
#define SQ_COMPL_GENERATED (0x01)
#define RQ_COMPL_GENERATED (0x02)
spin_lock_irqsave(&iwqp->iwscq->lock, flags);
if (irdma_cq_empty(iwqp->iwscq)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
while (IRDMA_RING_MORE_WORK(*sq_ring)) {
cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
return;
}
@ -2266,7 +2326,8 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
sw_wqe = qp->sq_base[wqe_idx].elem;
get_64bit_val(sw_wqe, IRDMA_BYTE_24, &wqe_qword);
cmpl->cpi.op_type = (u8)RS_64(wqe_qword, IRDMAQPSQ_OPCODE);
cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
/* remove the SQ WR by moving SQ tail */
IRDMA_RING_SET_TAIL(*sq_ring, sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
@ -2275,16 +2336,22 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
compl_generated |= SQ_COMPL_GENERATED;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
} else {
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS / 2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
reschedule = true;
}
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags);
spin_lock_irqsave(&iwqp->iwrcq->lock, flags);
if (irdma_cq_empty(iwqp->iwrcq)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
while (IRDMA_RING_MORE_WORK(*rq_ring)) {
cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
return;
}
@ -2295,6 +2362,7 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
cmpl->cpi.signaled = 1;
cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
/* remove the RQ WR by moving RQ tail */
IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_DEV,
@ -2304,22 +2372,26 @@ irdma_generate_flush_completions(struct irdma_qp *iwqp)
compl_generated |= RQ_COMPL_GENERATED;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
} else {
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, IRDMA_FLUSH_DELAY_MS / 2);
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
reschedule = true;
}
spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags);
if (iwqp->iwscq == iwqp->iwrcq) {
if (compl_generated)
if (reschedule)
irdma_sched_qp_flush_work(iwqp);
if (compl_generated) {
if (iwqp->iwscq == iwqp->iwrcq) {
irdma_comp_handler(iwqp->iwscq);
return;
}
if (compl_generated & SQ_COMPL_GENERATED)
irdma_comp_handler(iwqp->iwscq);
if (compl_generated & RQ_COMPL_GENERATED)
irdma_comp_handler(iwqp->iwrcq);
if (compl_generated)
} else {
if (compl_generated & SQ_COMPL_GENERATED)
irdma_comp_handler(iwqp->iwscq);
if (compl_generated & RQ_COMPL_GENERATED)
irdma_comp_handler(iwqp->iwrcq);
}
irdma_debug(iwqp->sc_qp.dev, IRDMA_DEBUG_VERBS,
"0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n",
compl_generated, iwqp->ibqp.qp_num);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -37,7 +37,7 @@
#define IRDMA_VERBS_H
#define IRDMA_MAX_SAVED_PHY_PGADDR 4
#define IRDMA_FLUSH_DELAY_MS 1500
#define IRDMA_FLUSH_DELAY_MS 20
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
@ -132,6 +132,8 @@ struct irdma_mr {
struct ib_mw ibmw;
};
struct ib_umem *region;
int access;
u8 is_hwreg;
u16 type;
u32 page_cnt;
u64 page_size;
@ -150,7 +152,7 @@ struct irdma_cq {
u16 cq_size;
u16 cq_num;
bool user_mode;
bool armed;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
u32 polled_cmpls;
u32 cq_mem_size;
@ -224,13 +226,6 @@ struct irdma_qp {
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
u8 active_conn : 1;
u8 user_mode : 1;
u8 hte_added : 1;
u8 flush_issued : 1;
u8 sig_all : 1;
u8 pau_mode : 1;
u8 rsvd : 1;
u8 iwarp_state;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
@ -247,6 +242,12 @@ struct irdma_qp {
wait_queue_head_t waitq;
wait_queue_head_t mod_qp_waitq;
u8 rts_ae_rcvd;
u8 active_conn : 1;
u8 user_mode : 1;
u8 hte_added : 1;
u8 flush_issued : 1;
u8 sig_all : 1;
u8 pau_mode : 1;
};
enum irdma_mmap_flag {
@ -262,12 +263,12 @@ struct irdma_user_mmap_entry {
static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
{
return (u16)RS_64(dev->feature_info[IRDMA_FEATURE_FW_INFO], IRDMA_FW_VER_MAJOR);
return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
{
return (u16)RS_64(dev->feature_info[IRDMA_FEATURE_FW_INFO], IRDMA_FW_VER_MINOR);
return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
}
/**
@ -304,10 +305,10 @@ irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_dealloc_device(struct ib_device *ibdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
void irdma_generate_flush_completions(struct irdma_qp *iwqp);
void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
void irdma_sched_qp_flush_work(struct irdma_qp *iwqp);
void irdma_flush_worker(struct work_struct *work);
#endif /* IRDMA_VERBS_H */

View File

@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
* Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -59,7 +59,7 @@ irdma_alloc_node(struct irdma_sc_vsi *vsi,
u16 node_index = 0;
ws_mem.size = sizeof(struct irdma_ws_node);
ws_mem.va = kzalloc(ws_mem.size, GFP_ATOMIC);
ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
if (!ws_mem.va)
return NULL;

View File

@ -133,7 +133,7 @@ do { \
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
#define ibdev_err(ibdev, fmt, ...) irdma_dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
#define ibdev_err(ibdev, fmt, ...) printf("%s:"fmt, (ibdev)->name, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
@ -183,8 +183,10 @@ struct irdma_dev_ctx {
#define irdma_usec_delay(x) DELAY(x)
#define mdelay(x) DELAY((x) * 1000)
#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
#define rt_tos2priority(tos) (tos >> 5)
#define ah_attr_to_dmac(attr) ((attr).dmac)
#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \
ib_modify_qp_is_ok(cur_state, next_state, type, mask)
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))