irdma: Add RDMA driver for Intel(R) Ethernet Controller E810

This is an initial commit for RDMA FreeBSD driver for Intel(R) Ethernet
Controller E810, called irdma.  Supporting both RoCEv2 and iWARP
protocols in per-PF manner, RoCEv2 being the default.

Testing has been done using krping tool, perftest, ucmatose, rping,
ud_pingpong, rc_pingpong and others.

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:	#manpages (pauamma_gundo.com) [documentation]
MFC after:	1 week
Relnotes:	yes
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D34690
This commit is contained in:
Bartosz Sobczak 2022-05-23 16:39:27 -07:00 committed by Eric Joyner
parent 8a13362d49
commit cdcd52d41e
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
59 changed files with 42551 additions and 2 deletions

180
contrib/ofed/libirdma/abi.h Normal file
View File

@ -0,0 +1,180 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (C) 2019 - 2020 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef PROVIDER_IRDMA_ABI_H
#define PROVIDER_IRDMA_ABI_H
#include "irdma.h"
#include <infiniband/kern-abi.h>
#include "irdma-abi.h"
#define IRDMA_MIN_ABI_VERSION 0
#define IRDMA_MAX_ABI_VERSION 5
struct irdma_ualloc_pd_resp {
struct ibv_alloc_pd_resp ibv_resp;
__u32 pd_id;
__u8 rsvd[4];
};
struct irdma_ucreate_cq {
struct ibv_create_cq ibv_cmd;
__aligned_u64 user_cq_buf;
__aligned_u64 user_shadow_area;
};
struct irdma_ucreate_cq_resp {
struct ibv_create_cq_resp ibv_resp;
__u32 cq_id;
__u32 cq_size;
};
struct irdma_ucreate_cq_ex {
struct ibv_create_cq_ex ibv_cmd;
__aligned_u64 user_cq_buf;
__aligned_u64 user_shadow_area;
};
struct irdma_ucreate_cq_ex_resp {
struct ibv_create_cq_resp_ex ibv_resp;
__u32 cq_id;
__u32 cq_size;
};
struct irdma_uresize_cq {
struct ibv_resize_cq ibv_cmd;
__aligned_u64 user_cq_buffer;
};
struct irdma_uresize_cq_resp {
struct ibv_resize_cq_resp ibv_resp;
};
struct irdma_ucreate_qp {
struct ibv_create_qp ibv_cmd;
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
};
struct irdma_ucreate_qp_resp {
struct ibv_create_qp_resp ibv_resp;
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 irdma_drv_opt;
__u16 push_idx;
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
};
struct irdma_umodify_qp_resp {
struct ibv_modify_qp_resp_ex ibv_resp;
__aligned_u64 push_wqe_mmap_key;
__aligned_u64 push_db_mmap_key;
__u16 push_offset;
__u8 push_valid;
__u8 rd_fence_rate;
__u8 rsvd[4];
};
struct irdma_get_context {
struct ibv_get_context ibv_cmd;
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
};
struct irdma_get_context_resp {
struct ibv_get_context_resp ibv_resp;
__u32 max_pds;
__u32 max_qps;
__u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
__u8 kernel_ver;
__u8 rsvd[3];
__aligned_u64 feature_flags;
__aligned_u64 db_mmap_key;
__u32 max_hw_wq_frags;
__u32 max_hw_read_sges;
__u32 max_hw_inline;
__u32 max_hw_rq_quanta;
__u32 max_hw_wq_quanta;
__u32 min_hw_cq_size;
__u32 max_hw_cq_size;
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
};
struct irdma_ureg_mr {
struct ibv_reg_mr ibv_cmd;
__u16 reg_type; /* enum irdma_memreg_type */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct irdma_ucreate_ah_resp {
struct ibv_create_ah_resp ibv_resp;
__u32 ah_id;
__u8 rsvd[4];
};
struct irdma_modify_qp_cmd {
struct ibv_modify_qp_ex ibv_cmd;
__u8 sq_flush;
__u8 rq_flush;
__u8 rsvd[6];
};
struct irdma_query_device_ex {
struct ibv_query_device_ex ibv_cmd;
};
struct irdma_query_device_ex_resp {
struct ibv_query_device_resp_ex ibv_resp;
__u32 comp_mask;
__u32 response_length;
struct ibv_odp_caps_resp odp_caps;
__u64 timestamp_mask;
__u64 hca_core_clock;
__u64 device_cap_flags_ex;
struct ibv_rss_caps_resp rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ibv_tso_caps tso_caps;
};
#endif /* PROVIDER_IRDMA_ABI_H */

View File

@ -0,0 +1,69 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2019 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef I40E_DEVIDS_H
#define I40E_DEVIDS_H
/* Vendor ID */
#define I40E_INTEL_VENDOR_ID 0x8086
/* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_X722_A0 0x374C
#define I40E_DEV_ID_X722_A0_VF 0x374D
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
#endif /* I40E_DEVIDS_H */

View File

@ -0,0 +1,64 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2020 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef I40IW_HW_H
#define I40IW_HW_H
enum i40iw_device_caps_const {
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_PUSH_PAGE_COUNT = 0,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 63,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_MAX_WQE_SIZE_RQ = 128,
I40IW_MAX_PDS = 32768,
I40IW_MAX_STATS_COUNT = 16,
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
};
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2
#define I40IW_QP_SW_MAX_SQ_QUANTA 2048
#define I40IW_QP_SW_MAX_RQ_QUANTA 16384
#define I40IW_QP_SW_MAX_WQ_QUANTA 2048
#endif /* I40IW_HW_H */

View File

@ -0,0 +1,92 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2019 - 2020 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef ICE_DEVIDS_H
#define ICE_DEVIDS_H
#define PCI_VENDOR_ID_INTEL 0x8086
/* Device IDs */
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
#define ICE_DEV_ID_E823L_SFP 0x124D
/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
/* Intel(R) Ethernet Connection E823-L 1GbE */
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
/* Intel(R) Ethernet Connection E823-C for QSFP */
#define ICE_DEV_ID_E823C_QSFP 0x188B
/* Intel(R) Ethernet Connection E823-C for SFP */
#define ICE_DEV_ID_E823C_SFP 0x188C
/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
/* Intel(R) Ethernet Connection E823-C 1GbE */
#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection C822N for backplane */
#define ICE_DEV_ID_C822N_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection C822N for QSFP */
#define ICE_DEV_ID_C822N_QSFP 0x1891
/* Intel(R) Ethernet Connection C822N for SFP */
#define ICE_DEV_ID_C822N_SFP 0x1892
/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
/* Intel(R) Ethernet Connection E822-L for backplane */
#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* ICE_DEVIDS_H */

View File

@ -0,0 +1,143 @@
/*-
* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
*
*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_ABI_H
#define IRDMA_ABI_H
#include <infiniband/types.h>
/* irdma must support legacy GEN_1 i40iw kernel
* and user-space whose last ABI ver is 5
*/
#define IRDMA_ABI_VER 5
enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_MEM = 0,
IRDMA_MEMREG_TYPE_QP = 1,
IRDMA_MEMREG_TYPE_CQ = 2,
};
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
};
struct irdma_alloc_ucontext_resp {
__u32 max_pds;
__u32 max_qps;
__u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
__u8 kernel_ver;
__u8 rsvd[3];
__aligned_u64 feature_flags;
__aligned_u64 db_mmap_key;
__u32 max_hw_wq_frags;
__u32 max_hw_read_sges;
__u32 max_hw_inline;
__u32 max_hw_rq_quanta;
__u32 max_hw_wq_quanta;
__u32 min_hw_cq_size;
__u32 max_hw_cq_size;
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
};
struct irdma_alloc_pd_resp {
__u32 pd_id;
__u8 rsvd[4];
};
struct irdma_resize_cq_req {
__aligned_u64 user_cq_buffer;
};
struct irdma_create_cq_req {
__aligned_u64 user_cq_buf;
__aligned_u64 user_shadow_area;
};
struct irdma_create_qp_req {
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
};
struct irdma_mem_reg_req {
__u16 reg_type; /* enum irdma_memreg_type */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct irdma_modify_qp_req {
__u8 sq_flush;
__u8 rq_flush;
__u8 rsvd[6];
};
struct irdma_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
};
struct irdma_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 irdma_drv_opt;
__u16 push_idx;
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
};
struct irdma_modify_qp_resp {
__aligned_u64 push_wqe_mmap_key;
__aligned_u64 push_db_mmap_key;
__u16 push_offset;
__u8 push_valid;
__u8 rd_fence_rate;
__u8 rsvd[4];
};
struct irdma_create_ah_resp {
__u32 ah_id;
__u8 rsvd[4];
};
#endif /* IRDMA_ABI_H */

View File

@ -0,0 +1,93 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_H
#define IRDMA_H
#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
#define MAKEMASK(m, s) ((m) << (s))
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
};
struct irdma_uk_attrs {
u64 feature_flags;
u32 max_hw_wq_frags;
u32 max_hw_read_sges;
u32 max_hw_inline;
u32 max_hw_rq_quanta;
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
u16 max_hw_wq_size;
u16 min_sw_wq_size;
u8 hw_rev;
};
struct irdma_hw_attrs {
struct irdma_uk_attrs uk_attrs;
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
u32 min_hw_ceq_size;
u32 max_hw_ceq_size;
u32 max_hw_device_pages;
u32 max_hw_vf_fpm_id;
u32 first_hw_vf_fpm_id;
u32 max_hw_ird;
u32 max_hw_ord;
u32 max_hw_wqes;
u32 max_hw_pds;
u32 max_hw_ena_vf_count;
u32 max_qp_wr;
u32 max_pe_ready_count;
u32 max_done_count;
u32 max_sleep_count;
u32 max_cqp_compl_wait_time_ms;
u16 max_stat_inst;
u16 max_stat_idx;
};
#endif /* IRDMA_H*/

View File

@ -0,0 +1,554 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_DEFS_H
#define IRDMA_DEFS_H
#define IRDMA_BYTE_0 0
#define IRDMA_BYTE_8 8
#define IRDMA_BYTE_16 16
#define IRDMA_BYTE_24 24
#define IRDMA_BYTE_32 32
#define IRDMA_BYTE_40 40
#define IRDMA_BYTE_48 48
#define IRDMA_BYTE_56 56
#define IRDMA_BYTE_64 64
#define IRDMA_BYTE_72 72
#define IRDMA_BYTE_80 80
#define IRDMA_BYTE_88 88
#define IRDMA_BYTE_96 96
#define IRDMA_BYTE_104 104
#define IRDMA_BYTE_112 112
#define IRDMA_BYTE_120 120
#define IRDMA_BYTE_128 128
#define IRDMA_BYTE_136 136
#define IRDMA_BYTE_144 144
#define IRDMA_BYTE_152 152
#define IRDMA_BYTE_160 160
#define IRDMA_BYTE_168 168
#define IRDMA_BYTE_176 176
#define IRDMA_BYTE_184 184
#define IRDMA_BYTE_192 192
#define IRDMA_BYTE_200 200
#define IRDMA_BYTE_208 208
#define IRDMA_BYTE_216 216
#define IRDMA_QP_TYPE_IWARP 1
#define IRDMA_QP_TYPE_UDA 2
#define IRDMA_QP_TYPE_ROCE_RC 3
#define IRDMA_QP_TYPE_ROCE_UD 4
#define IRDMA_HW_PAGE_SIZE 4096
#define IRDMA_HW_PAGE_SHIFT 12
#define IRDMA_CQE_QTYPE_RQ 0
#define IRDMA_CQE_QTYPE_SQ 1
#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
#define IRDMA_QP_WQE_MIN_SIZE 32
#define IRDMA_QP_WQE_MAX_SIZE 256
#define IRDMA_QP_WQE_MIN_QUANTA 1
#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
#define IRDMA_FEATURE_RTS_AE 1ULL
#define IRDMA_FEATURE_CQ_RESIZE 2ULL
#define IRDMA_FEATURE_RELAX_RQ_ORDER 4ULL
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
#define IRDMAQP_OP_RDMA_SEND_INV 0x04
#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05
#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06
#define IRDMAQP_OP_BIND_MW 0x08
#define IRDMAQP_OP_FAST_REGISTER 0x09
#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a
#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
#define IRDMAQP_OP_NOP 0x0c
#ifndef LS_64_1
#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits))
#define RS_64_1(val, bits) ((u64)(uintptr_t)(val) >> (bits))
#define LS_32_1(val, bits) ((u32)((val) << (bits)))
#define RS_32_1(val, bits) ((u32)((val) >> (bits)))
#endif
#define LS_64(val, field) (((u64)(val) << field ## _S) & (field ## _M))
#define RS_64(val, field) ((u64)((val) & field ## _M) >> field ## _S)
#define LS_32(val, field) (((val) << field ## _S) & (field ## _M))
#define RS_32(val, field) (((val) & field ## _M) >> field ## _S)
#define IRDMA_CQPHC_QPCTX_S 0
#define IRDMA_CQPHC_QPCTX_M \
(0xffffffffffffffffULL << IRDMA_CQPHC_QPCTX_S)
/* iWARP QP Doorbell shadow area */
#define IRDMA_QP_DBSA_HW_SQ_TAIL_S 0
#define IRDMA_QP_DBSA_HW_SQ_TAIL_M \
(0x7fffULL << IRDMA_QP_DBSA_HW_SQ_TAIL_S)
/* Completion Queue Doorbell shadow area */
#define IRDMA_CQ_DBSA_CQEIDX_S 0
#define IRDMA_CQ_DBSA_CQEIDX_M (0xfffffULL << IRDMA_CQ_DBSA_CQEIDX_S)
#define IRDMA_CQ_DBSA_SW_CQ_SELECT_S 0
#define IRDMA_CQ_DBSA_SW_CQ_SELECT_M \
(0x3fffULL << IRDMA_CQ_DBSA_SW_CQ_SELECT_S)
#define IRDMA_CQ_DBSA_ARM_NEXT_S 14
#define IRDMA_CQ_DBSA_ARM_NEXT_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_S)
#define IRDMA_CQ_DBSA_ARM_NEXT_SE_S 15
#define IRDMA_CQ_DBSA_ARM_NEXT_SE_M BIT_ULL(IRDMA_CQ_DBSA_ARM_NEXT_SE_S)
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_S 16
#define IRDMA_CQ_DBSA_ARM_SEQ_NUM_M \
(0x3ULL << IRDMA_CQ_DBSA_ARM_SEQ_NUM_S)
/* CQP and iWARP Completion Queue */
#define IRDMA_CQ_QPCTX_S IRDMA_CQPHC_QPCTX_S
#define IRDMA_CQ_QPCTX_M IRDMA_CQPHC_QPCTX_M
#define IRDMA_CQ_MINERR_S 0
#define IRDMA_CQ_MINERR_M (0xffffULL << IRDMA_CQ_MINERR_S)
#define IRDMA_CQ_MAJERR_S 16
#define IRDMA_CQ_MAJERR_M (0xffffULL << IRDMA_CQ_MAJERR_S)
#define IRDMA_CQ_WQEIDX_S 32
#define IRDMA_CQ_WQEIDX_M (0x7fffULL << IRDMA_CQ_WQEIDX_S)
#define IRDMA_CQ_EXTCQE_S 50
#define IRDMA_CQ_EXTCQE_M BIT_ULL(IRDMA_CQ_EXTCQE_S)
#define IRDMA_OOO_CMPL_S 54
#define IRDMA_OOO_CMPL_M BIT_ULL(IRDMA_OOO_CMPL_S)
#define IRDMA_CQ_ERROR_S 55
#define IRDMA_CQ_ERROR_M BIT_ULL(IRDMA_CQ_ERROR_S)
#define IRDMA_CQ_SQ_S 62
#define IRDMA_CQ_SQ_M BIT_ULL(IRDMA_CQ_SQ_S)
#define IRDMA_CQ_VALID_S 63
#define IRDMA_CQ_VALID_M BIT_ULL(IRDMA_CQ_VALID_S)
#define IRDMA_CQ_IMMVALID_S 62
#define IRDMA_CQ_IMMVALID_M BIT_ULL(IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_UDSMACVALID_S 61
#define IRDMA_CQ_UDSMACVALID_M BIT_ULL(IRDMA_CQ_UDSMACVALID_S)
#define IRDMA_CQ_UDVLANVALID_S 60
#define IRDMA_CQ_UDVLANVALID_M BIT_ULL(IRDMA_CQ_UDVLANVALID_S)
#define IRDMA_CQ_UDSMAC_S 0
#define IRDMA_CQ_UDSMAC_M (0xffffffffffffULL << IRDMA_CQ_UDSMAC_S)
#define IRDMA_CQ_UDVLAN_S 48
#define IRDMA_CQ_UDVLAN_M (0xffffULL << IRDMA_CQ_UDVLAN_S)
#define IRDMA_CQ_IMMDATA_S 0
#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_IMMDATALOW32_S 0
#define IRDMA_CQ_IMMDATALOW32_M (0xffffffffULL << IRDMA_CQ_IMMDATALOW32_S)
#define IRDMA_CQ_IMMDATAUP32_S 32
#define IRDMA_CQ_IMMDATAUP32_M (0xffffffffULL << IRDMA_CQ_IMMDATAUP32_S)
#define IRDMACQ_PAYLDLEN_S 0
#define IRDMACQ_PAYLDLEN_M (0xffffffffULL << IRDMACQ_PAYLDLEN_S)
#define IRDMACQ_TCPSEQNUMRTT_S 32
#define IRDMACQ_TCPSEQNUMRTT_M (0xffffffffULL << IRDMACQ_TCPSEQNUMRTT_S)
#define IRDMACQ_INVSTAG_S 0
#define IRDMACQ_INVSTAG_M (0xffffffffULL << IRDMACQ_INVSTAG_S)
#define IRDMACQ_QPID_S 32
#define IRDMACQ_QPID_M (0xffffffULL << IRDMACQ_QPID_S)
#define IRDMACQ_UDSRCQPN_S 0
#define IRDMACQ_UDSRCQPN_M (0xffffffffULL << IRDMACQ_UDSRCQPN_S)
#define IRDMACQ_PSHDROP_S 51
#define IRDMACQ_PSHDROP_M BIT_ULL(IRDMACQ_PSHDROP_S)
#define IRDMACQ_STAG_S 53
#define IRDMACQ_STAG_M BIT_ULL(IRDMACQ_STAG_S)
#define IRDMACQ_IPV4_S 53
#define IRDMACQ_IPV4_M BIT_ULL(IRDMACQ_IPV4_S)
#define IRDMACQ_SOEVENT_S 54
#define IRDMACQ_SOEVENT_M BIT_ULL(IRDMACQ_SOEVENT_S)
#define IRDMACQ_OP_S 56
#define IRDMACQ_OP_M (0x3fULL << IRDMACQ_OP_S)
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
/* iwarp QP SQ WQE common fields */
#define IRDMAQPSQ_OPCODE_S 32
#define IRDMAQPSQ_OPCODE_M (0x3fULL << IRDMAQPSQ_OPCODE_S)
#define IRDMAQPSQ_COPY_HOST_PBL_S 43
#define IRDMAQPSQ_COPY_HOST_PBL_M BIT_ULL(IRDMAQPSQ_COPY_HOST_PBL_S)
#define IRDMAQPSQ_ADDFRAGCNT_S 38
#define IRDMAQPSQ_ADDFRAGCNT_M (0xfULL << IRDMAQPSQ_ADDFRAGCNT_S)
#define IRDMAQPSQ_PUSHWQE_S 56
#define IRDMAQPSQ_PUSHWQE_M BIT_ULL(IRDMAQPSQ_PUSHWQE_S)
#define IRDMAQPSQ_STREAMMODE_S 58
#define IRDMAQPSQ_STREAMMODE_M BIT_ULL(IRDMAQPSQ_STREAMMODE_S)
#define IRDMAQPSQ_WAITFORRCVPDU_S 59
#define IRDMAQPSQ_WAITFORRCVPDU_M BIT_ULL(IRDMAQPSQ_WAITFORRCVPDU_S)
#define IRDMAQPSQ_READFENCE_S 60
#define IRDMAQPSQ_READFENCE_M BIT_ULL(IRDMAQPSQ_READFENCE_S)
#define IRDMAQPSQ_LOCALFENCE_S 61
#define IRDMAQPSQ_LOCALFENCE_M BIT_ULL(IRDMAQPSQ_LOCALFENCE_S)
#define IRDMAQPSQ_UDPHEADER_S 61
#define IRDMAQPSQ_UDPHEADER_M BIT_ULL(IRDMAQPSQ_UDPHEADER_S)
#define IRDMAQPSQ_L4LEN_S 42
#define IRDMAQPSQ_L4LEN_M ((u64)0xF << IRDMAQPSQ_L4LEN_S)
#define IRDMAQPSQ_SIGCOMPL_S 62
#define IRDMAQPSQ_SIGCOMPL_M BIT_ULL(IRDMAQPSQ_SIGCOMPL_S)
#define IRDMAQPSQ_VALID_S 63
#define IRDMAQPSQ_VALID_M BIT_ULL(IRDMAQPSQ_VALID_S)
#define IRDMAQPSQ_FRAG_TO_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_FRAG_TO_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPSQ_FRAG_VALID_S 63
#define IRDMAQPSQ_FRAG_VALID_M BIT_ULL(IRDMAQPSQ_FRAG_VALID_S)
#define IRDMAQPSQ_FRAG_LEN_S 32
#define IRDMAQPSQ_FRAG_LEN_M (0x7fffffffULL << IRDMAQPSQ_FRAG_LEN_S)
#define IRDMAQPSQ_FRAG_STAG_S 0
#define IRDMAQPSQ_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_FRAG_STAG_S)
#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0
#define IRDMAQPSQ_GEN1_FRAG_LEN_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_LEN_S)
#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32
#define IRDMAQPSQ_GEN1_FRAG_STAG_M (0xffffffffULL << IRDMAQPSQ_GEN1_FRAG_STAG_S)
#define IRDMAQPSQ_REMSTAGINV_S 0
#define IRDMAQPSQ_REMSTAGINV_M (0xffffffffULL << IRDMAQPSQ_REMSTAGINV_S)
#define IRDMAQPSQ_DESTQKEY_S 0
#define IRDMAQPSQ_DESTQKEY_M (0xffffffffULL << IRDMAQPSQ_DESTQKEY_S)
#define IRDMAQPSQ_DESTQPN_S 32
#define IRDMAQPSQ_DESTQPN_M (0x00ffffffULL << IRDMAQPSQ_DESTQPN_S)
#define IRDMAQPSQ_AHID_S 0
#define IRDMAQPSQ_AHID_M (0x0001ffffULL << IRDMAQPSQ_AHID_S)
#define IRDMAQPSQ_INLINEDATAFLAG_S 57
#define IRDMAQPSQ_INLINEDATAFLAG_M BIT_ULL(IRDMAQPSQ_INLINEDATAFLAG_S)
#define IRDMA_INLINE_VALID_S 7
#define IRDMAQPSQ_INLINEDATALEN_S 48
#define IRDMAQPSQ_INLINEDATALEN_M \
(0xffULL << IRDMAQPSQ_INLINEDATALEN_S)
#define IRDMAQPSQ_IMMDATAFLAG_S 47
#define IRDMAQPSQ_IMMDATAFLAG_M \
BIT_ULL(IRDMAQPSQ_IMMDATAFLAG_S)
#define IRDMAQPSQ_REPORTRTT_S 46
#define IRDMAQPSQ_REPORTRTT_M \
BIT_ULL(IRDMAQPSQ_REPORTRTT_S)
#define IRDMAQPSQ_IMMDATA_S 0
#define IRDMAQPSQ_IMMDATA_M \
(0xffffffffffffffffULL << IRDMAQPSQ_IMMDATA_S)
/* rdma write */
#define IRDMAQPSQ_REMSTAG_S 0
#define IRDMAQPSQ_REMSTAG_M (0xffffffffULL << IRDMAQPSQ_REMSTAG_S)
#define IRDMAQPSQ_REMTO_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_REMTO_M IRDMA_CQPHC_QPCTX_M
/* memory window */
#define IRDMAQPSQ_STAGRIGHTS_S 48
#define IRDMAQPSQ_STAGRIGHTS_M (0x1fULL << IRDMAQPSQ_STAGRIGHTS_S)
#define IRDMAQPSQ_VABASEDTO_S 53
#define IRDMAQPSQ_VABASEDTO_M BIT_ULL(IRDMAQPSQ_VABASEDTO_S)
#define IRDMAQPSQ_MEMWINDOWTYPE_S 54
#define IRDMAQPSQ_MEMWINDOWTYPE_M BIT_ULL(IRDMAQPSQ_MEMWINDOWTYPE_S)
#define IRDMAQPSQ_MWLEN_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_MWLEN_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPSQ_PARENTMRSTAG_S 32
#define IRDMAQPSQ_PARENTMRSTAG_M \
(0xffffffffULL << IRDMAQPSQ_PARENTMRSTAG_S)
#define IRDMAQPSQ_MWSTAG_S 0
#define IRDMAQPSQ_MWSTAG_M (0xffffffffULL << IRDMAQPSQ_MWSTAG_S)
#define IRDMAQPSQ_BASEVA_TO_FBO_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPSQ_BASEVA_TO_FBO_M IRDMA_CQPHC_QPCTX_M
/* Local Invalidate */
#define IRDMAQPSQ_LOCSTAG_S 0
#define IRDMAQPSQ_LOCSTAG_M (0xffffffffULL << IRDMAQPSQ_LOCSTAG_S)
/* iwarp QP RQ WQE common fields */
#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S
#define IRDMAQPRQ_ADDFRAGCNT_M IRDMAQPSQ_ADDFRAGCNT_M
#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S
#define IRDMAQPRQ_VALID_M IRDMAQPSQ_VALID_M
#define IRDMAQPRQ_COMPLCTX_S IRDMA_CQPHC_QPCTX_S
#define IRDMAQPRQ_COMPLCTX_M IRDMA_CQPHC_QPCTX_M
#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S
#define IRDMAQPRQ_FRAG_LEN_M IRDMAQPSQ_FRAG_LEN_M
#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S
#define IRDMAQPRQ_STAG_M IRDMAQPSQ_FRAG_STAG_M
#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S
#define IRDMAQPRQ_TO_M IRDMAQPSQ_FRAG_TO_M
#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
( \
(_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
)
#define IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \
( \
((struct irdma_extended_cqe *) \
((_cq)->cq_base))[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
)
#define IRDMA_RING_INIT(_ring, _size) \
{ \
(_ring).head = 0; \
(_ring).tail = 0; \
(_ring).size = (_size); \
}
#define IRDMA_RING_SIZE(_ring) ((_ring).size)
#define IRDMA_RING_CURRENT_HEAD(_ring) ((_ring).head)
#define IRDMA_RING_CURRENT_TAIL(_ring) ((_ring).tail)
#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
{ \
register u32 size; \
size = (_ring).size; \
if (!IRDMA_RING_FULL_ERR(_ring)) { \
(_ring).head = ((_ring).head + 1) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = ENOSPC; \
} \
}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
register u32 size; \
size = (_ring).size; \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
(_ring).head = ((_ring).head + (_count)) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = ENOSPC; \
} \
}
#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
{ \
register u32 size; \
size = (_ring).size; \
if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
(_ring).head = ((_ring).head + 1) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = ENOSPC; \
} \
}
#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
register u32 size; \
size = (_ring).size; \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
(_ring).head = ((_ring).head + (_count)) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = ENOSPC; \
} \
}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
(_ring).head = ((_ring).head + (_count)) % (_ring).size
#define IRDMA_RING_MOVE_TAIL(_ring) \
(_ring).tail = ((_ring).tail + 1) % (_ring).size
#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
(_ring).head = ((_ring).head + 1) % (_ring).size
#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
(_ring).tail = ((_ring).tail + (_count)) % (_ring).size
#define IRDMA_RING_SET_TAIL(_ring, _pos) \
(_ring).tail = (_pos) % (_ring).size
#define IRDMA_RING_FULL_ERR(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
)
#define IRDMA_ERR_RING_FULL2(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
)
#define IRDMA_ERR_RING_FULL3(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
)
#define IRDMA_SQ_RING_FULL_ERR(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
)
#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
)
#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
)
#define IRDMA_RING_MORE_WORK(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) != 0) \
)
#define IRDMA_RING_USED_QUANTA(_ring) \
( \
(((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
)
#define IRDMA_RING_FREE_QUANTA(_ring) \
( \
((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
)
#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
( \
((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
)
#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
{ \
index = IRDMA_RING_CURRENT_HEAD(_ring); \
IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
}
enum irdma_qp_wqe_size {
IRDMA_WQE_SIZE_32 = 32,
IRDMA_WQE_SIZE_64 = 64,
IRDMA_WQE_SIZE_96 = 96,
IRDMA_WQE_SIZE_128 = 128,
IRDMA_WQE_SIZE_256 = 256,
};
/**
* set_64bit_val - set 64 bit value to hw wqe
* @wqe_words: wqe addr to write
* @byte_index: index in wqe
* @val: value to write
**/
static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val)
{
wqe_words[byte_index >> 3] = htole64(val);
}
/**
* set_32bit_val - set 32 bit value to hw wqe
* @wqe_words: wqe addr to write
* @byte_index: index in wqe
* @val: value to write
**/
static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val)
{
wqe_words[byte_index >> 2] = htole32(val);
}
/**
* get_64bit_val - read 64 bit value from wqe
* @wqe_words: wqe addr
* @byte_index: index to read from
* @val: read value
**/
static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val)
{
*val = le64toh(wqe_words[byte_index >> 3]);
}
/**
* get_32bit_val - read 32 bit value from wqe
* @wqe_words: wqe addr
* @byte_index: index to reaad from
* @val: return 32 bit value
**/
static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val)
{
*val = le32toh(wqe_words[byte_index >> 2]);
}
#endif /* IRDMA_DEFS_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,255 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include <sys/mman.h>
#include <stdbool.h>
#include <stdlib.h>
#include "irdma_umain.h"
#include "irdma-abi.h"
#include "ice_devids.h"
#include "i40e_devids.h"
#include "abi.h"
/**
* Driver version
*/
char libirdma_version[] = "0.0.51-k";
unsigned int irdma_dbg;
#define INTEL_HCA(d) \
{ .vendor = PCI_VENDOR_ID_INTEL, \
.device = d }
struct hca_info {
unsigned vendor;
unsigned device;
};
static const struct hca_info hca_table[] = {
INTEL_HCA(ICE_DEV_ID_E823L_BACKPLANE),
INTEL_HCA(ICE_DEV_ID_E823L_SFP),
INTEL_HCA(ICE_DEV_ID_E823L_10G_BASE_T),
INTEL_HCA(ICE_DEV_ID_E823L_1GBE),
INTEL_HCA(ICE_DEV_ID_E823L_QSFP),
INTEL_HCA(ICE_DEV_ID_E810C_BACKPLANE),
INTEL_HCA(ICE_DEV_ID_E810C_QSFP),
INTEL_HCA(ICE_DEV_ID_E810C_SFP),
INTEL_HCA(ICE_DEV_ID_E810_XXV_BACKPLANE),
INTEL_HCA(ICE_DEV_ID_E810_XXV_QSFP),
INTEL_HCA(ICE_DEV_ID_E810_XXV_SFP),
INTEL_HCA(ICE_DEV_ID_E823C_BACKPLANE),
INTEL_HCA(ICE_DEV_ID_E823C_QSFP),
INTEL_HCA(ICE_DEV_ID_E823C_SFP),
INTEL_HCA(ICE_DEV_ID_E823C_10G_BASE_T),
INTEL_HCA(ICE_DEV_ID_E823C_SGMII),
INTEL_HCA(ICE_DEV_ID_C822N_BACKPLANE),
INTEL_HCA(ICE_DEV_ID_C822N_QSFP),
INTEL_HCA(ICE_DEV_ID_C822N_SFP),
INTEL_HCA(ICE_DEV_ID_E822C_10G_BASE_T),
INTEL_HCA(ICE_DEV_ID_E822C_SGMII),
INTEL_HCA(ICE_DEV_ID_E822L_BACKPLANE),
INTEL_HCA(ICE_DEV_ID_E822L_SFP),
INTEL_HCA(ICE_DEV_ID_E822L_10G_BASE_T),
INTEL_HCA(ICE_DEV_ID_E822L_SGMII),
};
static struct ibv_context_ops irdma_ctx_ops = {
.query_device = irdma_uquery_device,
.query_port = irdma_uquery_port,
.alloc_pd = irdma_ualloc_pd,
.dealloc_pd = irdma_ufree_pd,
.reg_mr = irdma_ureg_mr,
.rereg_mr = NULL,
.dereg_mr = irdma_udereg_mr,
.alloc_mw = irdma_ualloc_mw,
.dealloc_mw = irdma_udealloc_mw,
.bind_mw = irdma_ubind_mw,
.create_cq = irdma_ucreate_cq,
.poll_cq = irdma_upoll_cq,
.req_notify_cq = irdma_uarm_cq,
.cq_event = irdma_cq_event,
.resize_cq = irdma_uresize_cq,
.destroy_cq = irdma_udestroy_cq,
.create_qp = irdma_ucreate_qp,
.query_qp = irdma_uquery_qp,
.modify_qp = irdma_umodify_qp,
.destroy_qp = irdma_udestroy_qp,
.post_send = irdma_upost_send,
.post_recv = irdma_upost_recv,
.create_ah = irdma_ucreate_ah,
.destroy_ah = irdma_udestroy_ah,
.attach_mcast = irdma_uattach_mcast,
.detach_mcast = irdma_udetach_mcast,
};
static int
irdma_init_context(struct verbs_device *vdev,
struct ibv_context *ctx, int cmd_fd)
{
struct irdma_uvcontext *iwvctx;
struct irdma_get_context cmd = {};
struct irdma_get_context_resp resp = {};
struct ibv_pd *ibv_pd;
u64 mmap_key;
iwvctx = container_of(ctx, struct irdma_uvcontext, ibv_ctx);
iwvctx->ibv_ctx.cmd_fd = cmd_fd;
cmd.userspace_ver = IRDMA_ABI_VER;
if (ibv_cmd_get_context(&iwvctx->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd),
&resp.ibv_resp, sizeof(resp))) {
/* failed first attempt */
printf("%s %s get context failure\n", __FILE__, __func__);
return -1;
}
iwvctx->uk_attrs.feature_flags = resp.feature_flags;
iwvctx->uk_attrs.hw_rev = resp.hw_rev;
iwvctx->uk_attrs.max_hw_wq_frags = resp.max_hw_wq_frags;
iwvctx->uk_attrs.max_hw_read_sges = resp.max_hw_read_sges;
iwvctx->uk_attrs.max_hw_inline = resp.max_hw_inline;
iwvctx->uk_attrs.max_hw_rq_quanta = resp.max_hw_rq_quanta;
iwvctx->uk_attrs.max_hw_wq_quanta = resp.max_hw_wq_quanta;
iwvctx->uk_attrs.max_hw_sq_chunk = resp.max_hw_sq_chunk;
iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
iwvctx->abi_ver = IRDMA_ABI_VER;
mmap_key = resp.db_mmap_key;
iwvctx->db = mmap(NULL, IRDMA_HW_PAGE_SIZE, PROT_WRITE | PROT_READ,
MAP_SHARED, cmd_fd, mmap_key);
if (iwvctx->db == MAP_FAILED)
goto err_free;
iwvctx->ibv_ctx.ops = irdma_ctx_ops;
ibv_pd = irdma_ualloc_pd(&iwvctx->ibv_ctx);
if (!ibv_pd) {
munmap(iwvctx->db, IRDMA_HW_PAGE_SIZE);
goto err_free;
}
ibv_pd->context = &iwvctx->ibv_ctx;
iwvctx->iwupd = container_of(ibv_pd, struct irdma_upd, ibv_pd);
return 0;
err_free:
printf("%s %s failure\n", __FILE__, __func__);
return -1;
}
static void
irdma_cleanup_context(struct verbs_device *device,
struct ibv_context *ibctx)
{
struct irdma_uvcontext *iwvctx;
printf("%s %s CALL\n", __FILE__, __func__);
iwvctx = container_of(ibctx, struct irdma_uvcontext, ibv_ctx);
irdma_ufree_pd(&iwvctx->iwupd->ibv_pd);
munmap(iwvctx->db, IRDMA_HW_PAGE_SIZE);
}
static struct verbs_device_ops irdma_dev_ops = {
.init_context = irdma_init_context,
.uninit_context = irdma_cleanup_context,
};
static struct verbs_device *
irdma_driver_init(const char *uverbs_sys_path,
int abi_version)
{
struct irdma_udevice *dev;
int i = 0;
unsigned int device_found = 0;
unsigned vendor_id, device_id;
unsigned hca_size;
char buf[8];
if (ibv_read_sysfs_file(uverbs_sys_path, "device/vendor",
buf, sizeof(buf)) < 0)
return NULL;
sscanf(buf, "%i", &vendor_id);
if (vendor_id != PCI_VENDOR_ID_INTEL)
return NULL;
if (ibv_read_sysfs_file(uverbs_sys_path, "device/device",
buf, sizeof(buf)) < 0)
return NULL;
sscanf(buf, "%i", &device_id);
hca_size = sizeof(hca_table) / sizeof(struct hca_info);
while (i < hca_size && !device_found) {
if (device_id != hca_table[i].device)
device_found = 1;
++i;
}
if (!device_found)
return NULL;
if (abi_version < IRDMA_MIN_ABI_VERSION ||
abi_version > IRDMA_MAX_ABI_VERSION) {
printf("Invalid ABI version: %d of %s\n",
abi_version, uverbs_sys_path);
return NULL;
}
dev = calloc(1, sizeof(struct irdma_udevice));
if (!dev) {
printf("Device creation for %s failed\n", uverbs_sys_path);
return NULL;
}
dev->ibv_dev.ops = &irdma_dev_ops;
dev->ibv_dev.sz = sizeof(*dev);
dev->ibv_dev.size_of_context = sizeof(struct irdma_uvcontext) -
sizeof(struct ibv_context);
return &dev->ibv_dev;
}
static __attribute__((constructor))
void
irdma_register_driver(void)
{
verbs_register_driver("irdma", irdma_driver_init);
}

View File

@ -0,0 +1,218 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (C) 2019 - 2020 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_UMAIN_H
#define IRDMA_UMAIN_H
#include <sys/queue.h>
#include <infiniband/verbs.h>
#include <infiniband/driver.h>
#include "osdep.h"
#include "irdma.h"
#include "irdma_defs.h"
#include "i40iw_hw.h"
#include "irdma_user.h"
#ifndef likely
#define likely(x) __builtin_expect((x), 1)
#endif
#ifndef unlikely
#define unlikely(x) __builtin_expect((x), 0)
#endif
#define PFX "libirdma-"
#define IRDMA_BASE_PUSH_PAGE 1
#define IRDMA_U_MINCQ_SIZE 4
#define IRDMA_DB_SHADOW_AREA_SIZE 64
#define IRDMA_DB_CQ_OFFSET 64
LIST_HEAD(list_head, irdma_cq_buf);
LIST_HEAD(list_head_cmpl, irdma_cmpl_gen);
enum irdma_supported_wc_flags {
IRDMA_CQ_SUPPORTED_WC_FLAGS = IBV_WC_EX_WITH_BYTE_LEN
| IBV_WC_EX_WITH_IMM
| IBV_WC_EX_WITH_QP_NUM
| IBV_WC_EX_WITH_SRC_QP
| IBV_WC_EX_WITH_SL
| IBV_WC_EX_WITH_COMPLETION_TIMESTAMP,
};
struct irdma_udevice {
struct verbs_device ibv_dev;
};
struct irdma_uah {
struct ibv_ah ibv_ah;
uint32_t ah_id;
struct ibv_global_route grh;
};
struct irdma_upd {
struct ibv_pd ibv_pd;
void *arm_cq_page;
void *arm_cq;
uint32_t pd_id;
};
struct irdma_uvcontext {
struct ibv_context ibv_ctx;
struct irdma_upd *iwupd;
struct irdma_uk_attrs uk_attrs;
void *db;
int abi_ver;
bool legacy_mode;
};
struct irdma_uqp;
struct irdma_cq_buf {
LIST_ENTRY(irdma_cq_buf) list;
struct irdma_cq_uk cq;
struct verbs_mr vmr;
};
struct verbs_cq {
union {
struct ibv_cq cq;
struct ibv_cq_ex cq_ex;
};
};
struct irdma_cmpl_gen {
LIST_ENTRY(irdma_cmpl_gen) list;
struct irdma_cq_poll_info cpi;
};
struct irdma_ucq {
struct verbs_cq verbs_cq;
struct verbs_mr vmr;
struct verbs_mr vmr_shadow_area;
pthread_spinlock_t lock;
size_t buf_size;
bool is_armed;
bool skip_arm;
bool arm_sol;
bool skip_sol;
int comp_vector;
uint32_t report_rtt;
struct irdma_uqp *uqp;
struct irdma_cq_uk cq;
struct list_head resize_list;
/* for extended CQ completion fields */
struct irdma_cq_poll_info cur_cqe;
struct list_head_cmpl cmpl_generated;
};
struct irdma_uqp {
struct ibv_qp ibv_qp;
struct ibv_qp_attr attr;
struct irdma_ucq *send_cq;
struct irdma_ucq *recv_cq;
struct verbs_mr vmr;
size_t buf_size;
uint32_t irdma_drv_opt;
pthread_spinlock_t lock;
uint16_t sq_sig_all;
uint16_t qperr;
uint16_t rsvd;
uint32_t pending_rcvs;
uint32_t wq_size;
struct ibv_recv_wr *pend_rx_wr;
struct irdma_qp_uk qp;
enum ibv_qp_type qp_type;
enum ibv_qp_attr_mask attr_mask;
struct irdma_sge *recv_sges;
pthread_t flush_thread;
};
struct irdma_umr {
struct verbs_mr vmr;
uint32_t acc_flags;
};
/* irdma_uverbs.c */
int irdma_uquery_device_ex(struct ibv_context *context,
const struct ibv_query_device_ex_input *input,
struct ibv_device_attr_ex *attr, size_t attr_size);
int irdma_uquery_port(struct ibv_context *context, uint8_t port,
struct ibv_port_attr *attr);
struct ibv_pd *irdma_ualloc_pd(struct ibv_context *context);
int irdma_ufree_pd(struct ibv_pd *pd);
int irdma_uquery_device(struct ibv_context *, struct ibv_device_attr *);
struct ibv_mr *irdma_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
int access);
int irdma_udereg_mr(struct ibv_mr *mr);
struct ibv_mw *irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type);
int irdma_ubind_mw(struct ibv_qp *qp, struct ibv_mw *mw,
struct ibv_mw_bind *mw_bind);
int irdma_udealloc_mw(struct ibv_mw *mw);
struct ibv_cq *irdma_ucreate_cq(struct ibv_context *context, int cqe,
struct ibv_comp_channel *channel,
int comp_vector);
struct ibv_cq_ex *irdma_ucreate_cq_ex(struct ibv_context *context,
struct ibv_cq_init_attr_ex *attr_ex);
void irdma_ibvcq_ex_fill_priv_funcs(struct irdma_ucq *iwucq,
struct ibv_cq_init_attr_ex *attr_ex);
int irdma_uresize_cq(struct ibv_cq *cq, int cqe);
int irdma_udestroy_cq(struct ibv_cq *cq);
int irdma_upoll_cq(struct ibv_cq *cq, int entries, struct ibv_wc *entry);
int irdma_uarm_cq(struct ibv_cq *cq, int solicited);
void irdma_cq_event(struct ibv_cq *cq);
struct ibv_qp *irdma_ucreate_qp(struct ibv_pd *pd,
struct ibv_qp_init_attr *attr);
int irdma_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
struct ibv_qp_init_attr *init_attr);
int irdma_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
int attr_mask);
int irdma_udestroy_qp(struct ibv_qp *qp);
int irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
struct ibv_send_wr **bad_wr);
int irdma_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
struct ibv_recv_wr **bad_wr);
struct ibv_ah *irdma_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr);
int irdma_udestroy_ah(struct ibv_ah *ibah);
int irdma_uattach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
uint16_t lid);
int irdma_udetach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
uint16_t lid);
void irdma_async_event(struct ibv_context *context,
struct ibv_async_event *event);
void irdma_set_hw_attrs(struct irdma_hw_attrs *attrs);
void *irdma_mmap(int fd, off_t offset);
void irdma_munmap(void *map);
void *irdma_flush_thread(void *arg);
#endif /* IRDMA_UMAIN_H */

View File

@ -0,0 +1,479 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_USER_H
#define IRDMA_USER_H
#include "osdep.h"
#define irdma_handle void *
#define irdma_adapter_handle irdma_handle
#define irdma_qp_handle irdma_handle
#define irdma_cq_handle irdma_handle
#define irdma_pd_id irdma_handle
#define irdma_stag_handle irdma_handle
#define irdma_stag_index u32
#define irdma_stag u32
#define irdma_stag_key u8
#define irdma_tagged_offset u64
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
#define IRDMA_ACCESS_FLAGS_ALL 0x3f
#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
#define IRDMA_OP_TYPE_RDMA_READ 0x01
#define IRDMA_OP_TYPE_SEND 0x03
#define IRDMA_OP_TYPE_SEND_INV 0x04
#define IRDMA_OP_TYPE_SEND_SOL 0x05
#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
#define IRDMA_OP_TYPE_BIND_MW 0x08
#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
#define IRDMA_OP_TYPE_INV_STAG 0x0a
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
#define IRDMA_OP_TYPE_NOP 0x0c
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
IRDMA_CQP_WQE_SIZE = 8,
IRDMA_CQE_SIZE = 4,
IRDMA_EXTENDED_CQE_SIZE = 8,
IRDMA_AEQE_SIZE = 2,
IRDMA_CEQE_SIZE = 1,
IRDMA_CQP_CTX_SIZE = 8,
IRDMA_SHADOW_AREA_SIZE = 8,
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_QUERY_FPM_BUF_SIZE = 176,
IRDMA_COMMIT_FPM_BUF_SIZE = 176,
IRDMA_MAX_IW_QP_ID = 262143,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
IRDMA_MIN_CQID = 0,
IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
IRDMA_MIN_CEQ_ENTRIES = 1,
IRDMA_MAX_CEQ_ENTRIES = 262143,
IRDMA_MIN_CQ_SIZE = 1,
IRDMA_MAX_CQ_SIZE = 1048575,
IRDMA_DB_ID_ZERO = 0,
/* 64K + 1 */
IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
/* 64K +1 */
IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
IRDMA_MAX_VF_FPM_ID = 47,
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
IRDMA_MAX_INLINE_DATA_SIZE = 101,
IRDMA_MAX_WQ_ENTRIES = 32768,
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
};
enum irdma_addressing_type {
IRDMA_ADDR_TYPE_ZERO_BASED = 0,
IRDMA_ADDR_TYPE_VA_BASED = 1,
};
enum irdma_flush_opcode {
FLUSH_INVALID = 0,
FLUSH_GENERAL_ERR,
FLUSH_PROT_ERR,
FLUSH_REM_ACCESS_ERR,
FLUSH_LOC_QP_OP_ERR,
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
FLUSH_RETRY_EXC_ERR,
};
enum irdma_cmpl_status {
IRDMA_COMPL_STATUS_SUCCESS = 0,
IRDMA_COMPL_STATUS_FLUSHED,
IRDMA_COMPL_STATUS_INVALID_WQE,
IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
IRDMA_COMPL_STATUS_INVALID_STAG,
IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
IRDMA_COMPL_STATUS_INVALID_PD_ID,
IRDMA_COMPL_STATUS_WRAP_ERROR,
IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
IRDMA_COMPL_STATUS_INVALID_FBO,
IRDMA_COMPL_STATUS_INVALID_LEN,
IRDMA_COMPL_STATUS_INVALID_ACCESS,
IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
IRDMA_COMPL_STATUS_INVALID_REGION,
IRDMA_COMPL_STATUS_INVALID_WINDOW,
IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
IRDMA_COMPL_STATUS_UNKNOWN,
};
enum irdma_cmpl_notify {
IRDMA_CQ_COMPL_EVENT = 0,
IRDMA_CQ_COMPL_SOLICITED = 1,
};
enum irdma_qp_caps {
IRDMA_WRITE_WITH_IMM = 1,
IRDMA_SEND_WITH_IMM = 2,
IRDMA_ROCE = 4,
IRDMA_PUSH_MODE = 8,
};
struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
struct irdma_sge {
irdma_tagged_offset tag_off;
u32 len;
irdma_stag stag;
};
struct irdma_ring {
volatile u32 head;
volatile u32 tail;
u32 size;
};
struct irdma_cqe {
__le64 buf[IRDMA_CQE_SIZE];
};
struct irdma_extended_cqe {
__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
};
struct irdma_post_send {
irdma_sgl sg_list;
u32 num_sges;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_inline_send {
void *data;
u32 len;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
u32 num_sges;
};
struct irdma_rdma_write {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
struct irdma_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
struct irdma_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
struct irdma_sge rem_addr;
};
struct irdma_bind_window {
irdma_stag mr_stag;
u64 bind_len;
void *va;
enum irdma_addressing_type addressing_type;
bool ena_reads:1;
bool ena_writes:1;
irdma_stag mw_stag;
bool mem_window_type_1:1;
};
struct irdma_inv_local_stag {
irdma_stag target_stag;
};
struct irdma_post_sq_info {
u64 wr_id;
u8 op_type;
u8 l4len;
bool signaled:1;
bool read_fence:1;
bool local_fence:1;
bool inline_data:1;
bool imm_data_valid:1;
bool push_wqe:1;
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
u32 imm_data;
u32 stag_to_inv;
union {
struct irdma_post_send send;
struct irdma_rdma_write rdma_write;
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
struct irdma_inline_rdma_write inline_rdma_write;
struct irdma_post_inline_send inline_send;
} op;
};
struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
irdma_stag inv_stag; /* or L_R_Key */
enum irdma_cmpl_status comp_status;
u16 major_err;
u16 minor_err;
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
bool solicited_event:1;
bool ipv4:1;
bool ud_vlan_valid:1;
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
};
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
bool post_sq);
int irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info);
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool inv_stag, bool post_sq);
int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
};
int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
int irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
u16 quanta;
u8 signaled;
u8 reserved[1];
};
struct irdma_qp_quanta {
__le64 elem[IRDMA_WQE_SIZE];
};
struct irdma_qp_uk {
struct irdma_qp_quanta *sq_base;
struct irdma_qp_quanta *rq_base;
struct irdma_uk_attrs *uk_attrs;
u32 IOMEM *wqe_alloc_db;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
u64 *rq_wrid_array;
__le64 *shadow_area;
__le32 *push_db;
__le64 *push_wqe;
struct irdma_ring sq_ring;
struct irdma_ring sq_sig_ring;
struct irdma_ring rq_ring;
struct irdma_ring initial_ring;
u32 qp_id;
u32 qp_caps;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u32 last_rx_cmpl_idx;
u32 last_tx_cmpl_idx;
struct irdma_wqe_uk_ops wqe_ops;
u16 conn_wqes;
u8 qp_type;
u8 swqe_polarity;
u8 swqe_polarity_deferred;
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool deferred_flag:1;
bool push_mode:1; /* whether the last post wqe was pushed */
bool push_dropped:1;
bool first_sq_wq:1;
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
pthread_spinlock_t *lock;
bool force_fence;
u8 dbg_rq_flushed;
u16 ord_cnt;
u8 sq_flush_seen;
u8 rq_flush_seen;
u8 rd_fence_rate;
};
struct irdma_cq_uk {
struct irdma_cqe *cq_base;
u32 IOMEM *cqe_alloc_db;
u32 IOMEM *cq_ack_db;
__le64 *shadow_area;
u32 cq_id;
u32 cq_size;
struct irdma_ring cq_ring;
u8 polarity;
bool armed:1;
bool avoid_mem_cflct:1;
};
struct irdma_qp_uk_init_info {
struct irdma_qp_quanta *sq;
struct irdma_qp_quanta *rq;
struct irdma_uk_attrs *uk_attrs;
u32 IOMEM *wqe_alloc_db;
__le64 *shadow_area;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
u64 *rq_wrid_array;
u32 qp_id;
u32 qp_caps;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u8 first_sq_wq;
u8 type;
u8 rd_fence_rate;
int abi_ver;
bool legacy_mode;
};
struct irdma_cq_uk_init_info {
u32 IOMEM *cqe_alloc_db;
u32 IOMEM *cq_ack_db;
struct irdma_cqe *cq_base;
__le64 *shadow_area;
u32 cq_size;
u32 cq_id;
bool avoid_mem_cflct;
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
/* Export symbols should be added below according to
Documentation/versioning.md document. */
IRDMA_1.0 {
global: *;
local: *;
};
IRDMA_1.1 {
global: *;
} IRDMA_1.0;

View File

@ -0,0 +1,213 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef _ICRDMA_OSDEP_H_
#define _ICRDMA_OSDEP_H_
#include <stdatomic.h>
#include <stdbool.h>
#include <stdio.h>
#include <infiniband/types.h>
#include <infiniband/verbs.h>
#include <infiniband/udma_barrier.h>
#include <sys/bus.h>
#include <sys/bus_dma.h>
#include <sys/endian.h>
#define ATOMIC atomic_t
#define IOMEM
#define IRDMA_NTOHL(a) ntohl(a)
#define IRDMA_NTOHS(a) ntohs(a)
#define MAKEMASK(m, s) ((m) << (s))
#define OS_TIMER timer_list
#define OS_LIST_HEAD list_head
#define OS_LIST_ENTRY list_head
#define DECLARE_HASHTABLE(n, b) struct hlist_head (n)[1 << (b)]
#define HASH_MIN(v, b) (sizeof(v) <= 4 ? hash_32(v, b) : hash_long(v, b))
#define HASH_FOR_EACH_RCU(n, b, o, m) for ((b) = 0, o = NULL; o == NULL && (b) < ARRAY_SIZE(n);\
(b)++)\
hlist_for_each_entry_rcu(o, &n[(b)], m)
#define HASH_FOR_EACH_POSSIBLE_RCU(n, o, m, k) \
hlist_for_each_entry_rcu(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
m)
#define HASH_FOR_EACH_POSSIBLE(n, o, m, k) \
hlist_for_each_entry(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
m)
#define HASH_ADD_RCU(h, n, k) \
hlist_add_head_rcu(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
#define HASH_DEL_RCU(tbl, node) hlist_del_rcu(node)
#define HASH_ADD(h, n, k) \
hlist_add_head(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
#define HASH_DEL(tbl, node) hlist_del(node)
#define WQ_UNBOUND_MAX_ACTIVE max_t(int, 512, num_possible_cpus() * 4)
#define if_addr_rlock(x)
#define if_addr_runlock(x)
/* constants */
#define STATS_TIMER_DELAY 60000
/* a couple of linux size defines */
#define SZ_128 128
#define SZ_2K SZ_128 * 16
#define SZ_1G (SZ_1K * SZ_1K * SZ_1K)
#define SPEED_1000 1000
#define SPEED_10000 10000
#define SPEED_20000 20000
#define SPEED_25000 25000
#define SPEED_40000 40000
#define SPEED_100000 100000
#define BIT_ULL(a) (1ULL << (a))
#define __aligned_u64 uint64_t __aligned(8)
#define VLAN_PRIO_SHIFT 13
/*
* debug definition section
*/
#define irdma_print(S, ...) printf("%s:%d "S, __FUNCTION__, __LINE__, ##__VA_ARGS__)
#define irdma_debug_buf(dev, mask, desc, buf, size) \
do { \
u32 i; \
if (!((mask) & (dev)->debug_mask)) { \
break; \
} \
irdma_debug(dev, mask, "%s\n", desc); \
irdma_debug(dev, mask, "starting address virt=%p phy=%lxh\n", buf, irdma_get_virt_to_phy(buf)); \
for (i = 0; i < size ; i += 8) \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)buf)[i / 8]); \
} while(0)
#define irdma_debug(h, m, s, ...) \
do { \
if (!(h)) { \
if ((m) == IRDMA_DEBUG_INIT) \
printf("irdma INIT " s, ##__VA_ARGS__); \
} else if (((m) & (h)->debug_mask)) { \
printf("irdma " s, ##__VA_ARGS__); \
} \
} while (0)
extern unsigned int irdma_dbg;
#define libirdma_debug(fmt, args...) \
do { \
if (irdma_dbg) \
printf("libirdma-%s: " fmt, __func__, ##args); \
} while (0)
#define irdma_dev_err(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
#define ibdev_err(ibdev, fmt, ...) dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
unsigned char *a; \
printf("%s %u", (name), (unsigned int)(sz)); \
for (a = (unsigned char*)(s); a < (unsigned char *)(s) + (sz) ; a ++) { \
if ((u64)a % 8 == 0) \
printf("\n%p ", a); \
printf("%2x ", *a); \
} \
printf("\n"); \
}while(0)
/*
* debug definition end
*/
typedef __be16 BE16;
typedef __be32 BE32;
typedef uintptr_t irdma_uintptr;
struct irdma_hw;
struct irdma_pci_f;
struct irdma_sc_dev;
struct irdma_sc_qp;
struct irdma_sc_vsi;
#define irdma_pr_info(fmt, args ...) printf("%s: WARN "fmt, __func__, ## args)
#define irdma_pr_err(fmt, args ...) printf("%s: ERR "fmt, __func__, ## args)
#define irdma_memcpy(a, b, c) memcpy((a), (b), (c))
#define irdma_memset(a, b, c) memset((a), (b), (c))
#define irdma_usec_delay(x) DELAY(x)
#define mdelay(x) DELAY((x) * 1000)
#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
#define ah_attr_to_dmac(attr) ((attr).dmac)
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))
#define IRDMA_TAILQ_FOREACH CK_STAILQ_FOREACH
#define IRDMA_TAILQ_FOREACH_SAFE CK_STAILQ_FOREACH_SAFE
#define between(a, b, c) (bool)(c-a >= b-a)
static inline void db_wr32(__u32 val, __u32 *wqe_word)
{
*wqe_word = val;
}
void *hw_to_dev(struct irdma_hw *hw);
struct irdma_dma_mem {
void *va;
u64 pa;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_size_t size;
int nseg;
int flags;
};
struct irdma_virt_mem {
void *va;
u32 size;
};
#ifndef verbs_mr
enum ibv_mr_type {
IBV_MR_TYPE_MR,
IBV_MR_TYPE_NULL_MR,
};
struct verbs_mr {
struct ibv_mr ibv_mr;
enum ibv_mr_type mr_type;
};
#define verbs_get_mr(mr) container_of((mr), struct verbs_mr, ibv_mr)
#endif
#endif /* _ICRDMA_OSDEP_H_ */

View File

@ -12,6 +12,7 @@ SUBDIR= \
libcxgb4 \
libmlx4 \
libmlx5 \
libirdma \
libibnetdisc \
libopensm
@ -22,6 +23,7 @@ SUBDIR_DEPEND_libvendor= libibumad
SUBDIR_DEPEND_libcxgb4= libibverbs
SUBDIR_DEPEND_libmlx4= libibverbs
SUBDIR_DEPEND_libmlx5= libibverbs
SUBDIR_DEPEND_libirdma= libibverbs
SUBDIR_DEPEND_libibnetdisc= libibmad libibumad complib
SUBDIR_PARALLEL=

View File

@ -0,0 +1,23 @@
# $FreeBSD: releng/12.1/lib/ofed/libirdma/Makefile 336568 2018-07-20 23:49:57Z kib $
_spath= ${SRCTOP}/contrib/ofed/libirdma
_ipath= ${SRCTOP}/contrib/ofed/include
lin_inc=/usr/src/sys/compat/linuxkpi/common/include
.PATH: ${_spath}
SHLIBDIR?= /lib
LIB= irdma
SHLIB_MAJOR= 1
MK_PROFILE= no
SRCS= \
irdma_umain.c \
irdma_uverbs.c \
irdma_uk.c \
LIBADD= ibverbs pthread
CFLAGS+= -I${_spath} -I${SRCTOP}/contrib/ofed/libibverbs
VERSION_MAP= ${_spath}/libirdma.map
CFLAGS+= -ferror-limit=1000
.include <bsd.lib.mk>

233
share/man/man4/irdma.4 Normal file
View File

@ -0,0 +1,233 @@
.\" Copyright(c) 2016 - 2022 Intel Corporation
.\" All rights reserved.
.\"
.\" This software is available to you under a choice of one of two
.\" licenses. You may choose to be licensed under the terms of the GNU
.\" General Public License (GPL) Version 2, available from the file
.\" COPYING in the main directory of this source tree, or the
.\" OpenFabrics.org BSD license below:
.\"
.\" Redistribution and use in source and binary forms, with or
.\" without modification, are permitted provided that the following
.\" conditions are met:
.\"
.\" - Redistributions of source code must retain the above
.\" copyright notice, this list of conditions and the following
.\" disclaimer.
.\"
.\" - Redistributions in binary form must reproduce the above
.\" copyright notice, this list of conditions and the following
.\" disclaimer in the documentation and/or other materials
.\" provided with the distribution.
.\"
.\" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
.\" EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
.\" MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
.\" NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
.\" BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
.\" ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
.\" CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.\" SOFTWARE.
.\"
.\" $FreeBSD$
.\"
.Dd March 30, 2022
.Dt IRDMA 4
.Os
.Sh NAME
.Nm irdma
.Nd RDMA FreeBSD driver for Intel(R) Ethernet Controller E810
.Sh SYNOPSIS
This module relies on
.Xr if_ice 4
.Bl -tag -nested-width indent
.It The following kernel options should be included in the configuration:
.Cd options OFED
.Cd options OFED_DEBUG_INIT
.Cd options COMPAT_LINUXKPI
.Cd options SDP
.Cd options IPOIB_CM
.El
.Sh DESCRIPTION
.Ss Features
The
.Nm
driver provides RDMA protocol support on RDMA-capable Intel Ethernet 800 Series NICs which are supported by
.Xr if_ice 4
.
.Pp
The driver supports both iWARP and RoCEv2 protocols.
.Sh CONFIGURATION
.Ss TUNABLES
Tunables can be set at the
.Xr loader 8
prompt before booting the kernel or stored in
.Xr loader.conf 5 .
.Bl -tag -width indent
.It Va dev.irdma<interface_number>.roce_enable
enables RoCEv2 protocol usage on <interface_numer> interface.
.Pp By default RoCEv2 protocol is used.
.It Va dev.irdma<interface_number>.dcqcn_cc_cfg_valid
indicates that all DCQCN parameters are valid and should be updated in registers or QP context.
.Pp
Setting this parameter to 1 means that settings in
.Em dcqcn_min_dec_factor, dcqcn_min_rate_MBps, dcqcn_F, dcqcn_T,
.Em dcqcn_B, dcqcn_rai_factor, dcqcn_hai_factor, dcqcn_rreduce_mperiod
are taken into account. Otherwise default values are used.
.Pp
Note: "roce_enable" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_min_dec_factor
The minimum factor by which the current transmit rate can be changed when processing a CNP. Value is given as a percentage (1-100).
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_min_rate_MBps
The minimum value, in Mbits per second, for rate to limit.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_F
The number of times to stay in each stage of bandwidth recovery.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_T
The number of microseconds that should elapse before increasing the CWND in DCQCN mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_B
The number of bytes to transmit before updating CWND in DCQCN mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_rai_factor
The number of MSS to add to the congestion window in additive increase mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_hai_factor
The number of MSS to add to the congestion window in hyperactive increase mode.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.It Va dev.irdma<interface_number>.dcqcn_rreduce_mperiod
The minimum time between 2 consecutive rate reductions for a single flow. Rate reduction will occur only if a CNP is received during the relevant time interval.
.Pp
Note: "roce_enable" and "dcqcn_cc_cfg_valid" must also be set for this tunable to take effect.
.Ss SYSCTL PROCEDURES
Sysctl controls are available for runtime adjustments.
.Bl -tag -width indent
.It Va dev.irdma<interface_number>.debug
defines level of debug messages.
.Pp
Typical value: 1 for errors only, 0x7fffffff for full debug.
.It Va dev.irdma<interface_number>.dcqcn_enable
enables the DCQCN algorithm for RoCEv2.
.Pp
Note: "roce_enable" must also be set for this sysctl to take effect.
.Pp
Note: The change may be set at any time, but it will be applied only to newly created QPs.
.Ss TESTING
.Bl -enum
.It
To load the irdma driver, run:
.Bl -tag -width indent
.It
kldload irdma
.El
If if_ice is not already loaded, the system will load it on its own. Please check whether the value of sysctl
.Va hw.ice.irdma
is 1, if the irdma driver is not loading. To change the value put:
.Bl -tag -width indent
.It
hw.ice.irdma=1
.El
to
.Pa /boot/loader.conf
and reboot.
.It
To check that the driver was loaded, run:
.Bl -tag -width indent
.It
sysctl -a | grep infiniband
.El
Typically, if everything goes well, around 190 entries per PF will appear.
.It
Each interface of the card may work in either iWARP or RoCEv2 mode. To enable RoCEv2 compatibility, add:
.Bl -tag -width indent
.It
dev.irdma<interface_number>.roce_enable=1
.El
where <interface_number> is a desired ice interface number on which
RoCEv2 protocol needs to be enabled, to:
.Bl -tag -width indent
.It
.Pa /boot/loader.conf
.El
for instance:
.Bl -tag -width indent
.It
dev.irdma0.roce_enable=0
.It
dev.irdma1.roce_enable=1
.El
will keep iWARP mode on ice0 and enable RoCEv2 mode on interface ice1. The RoCEv2 mode is the default.
.Dl
To check irdma roce_enable status, run:
.Bl -tag -width indent
.It
sysctl dev.irdma<interface_number>.roce_enable
.El
for instance:
.Bl -tag -width indent
.It
sysctl dev.irdma2.roce_enable
.El
with returned value of '0' indicate the iWARP mode, and the value of '1' indicate the RoCEv2 mode.
.Pp
Note: An interface configured in one mode will not be able to connect
to a node configured in another mode.
.Pp
Note: RoCEv2 has currently limited support, for functional testing only.
DCB and Priority Flow Controller (PFC) are not currently supported which
may lead to significant performance loss or connectivity issues.
.It
Enable flow control in the ice driver:
.Bl -tag -width indent
.It
sysctl dev.ice.<interface_number>.fc=3
.El
Enable flow control on the switch your system is connected to. See your
switch documentation for details.
.It
The source code for krping software is provided with the kernel in
/usr/src/sys/contrib/rdma/krping/. To compile the software, change
directory to /usr/src/sys/modules/rdma/krping/ and invoke the following:
.Bl -tag -width indent
.It
make clean
.It
make
.It
make install
.El
.It
Start a krping server on one machine:
.Bl -tag -width indent
.It
echo size=64,count=1,port=6601,addr=100.0.0.189,server > /dev/krping
.El
.It
Connect a client from another machine:
.Bl -tag -width indent
.It
echo size=64,count=1,port=6601,addr=100.0.0.189,client > /dev/krping
.El
.Sh SUPPORT
For general information and support, go to the Intel support website at:
.Lk http://support.intel.com/ .
.Pp
If an issue is identified with this driver with a supported adapter, email all the specific information related to the issue to
.Mt freebsd@intel.com .
.Sh SEE ALSO
.Xr if_ice 4
.Sh AUTHORS
.An -nosplit
The
.Nm
driver was prepared by
.An Bartosz Sobczak Aq Mt bartosz.sobczak@intel.com .

View File

@ -83,6 +83,7 @@ LIBIBVERBS?= ${LIBDESTDIR}${LIBDIR_BASE}/libibverbs.a
LIBICP?= ${LIBDESTDIR}${LIBDIR_BASE}/libicp.a
LIBIPSEC?= ${LIBDESTDIR}${LIBDIR_BASE}/libipsec.a
LIBIPT?= ${LIBDESTDIR}${LIBDIR_BASE}/libipt.a
LIBIRDMA?= ${LIBDESTDIR}${LIBDIR_BASE}/libirdma.a
LIBISCSIUTIL?= ${LIBDESTDIR}${LIBDIR_BASE}/libiscsiutil.a
LIBJAIL?= ${LIBDESTDIR}${LIBDIR_BASE}/libjail.a
LIBKADM5CLNT?= ${LIBDESTDIR}${LIBDIR_BASE}/libkadm5clnt.a

View File

@ -243,6 +243,7 @@ _LIBRARIES+= \
ibnetdisc \
ibumad \
ibverbs \
irdma \
mlx4 \
mlx5 \
rdmacm \
@ -431,6 +432,7 @@ _DP_ibmad= ibumad
_DP_ibnetdisc= osmcomp ibmad ibumad
_DP_ibumad=
_DP_ibverbs=
_DP_irdma= ibverbs pthread
_DP_mlx4= ibverbs pthread
_DP_mlx5= ibverbs pthread
_DP_rdmacm= ibverbs
@ -677,6 +679,7 @@ LIBIBMADDIR= ${OBJTOP}/lib/ofed/libibmad
LIBIBNETDISCDIR=${OBJTOP}/lib/ofed/libibnetdisc
LIBIBUMADDIR= ${OBJTOP}/lib/ofed/libibumad
LIBIBVERBSDIR= ${OBJTOP}/lib/ofed/libibverbs
LIBIRDMADIR= ${OBJTOP}/lib/ofed/libirdma
LIBMLX4DIR= ${OBJTOP}/lib/ofed/libmlx4
LIBMLX5DIR= ${OBJTOP}/lib/ofed/libmlx5
LIBRDMACMDIR= ${OBJTOP}/lib/ofed/librdmacm

View File

@ -292,6 +292,8 @@ device cpufreq
# igc: Intel I225 2.5Gb Ethernet adapter
# ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter
# Requires the ipw firmware module
# irdma: Intel 800 Series RDMA driver
# Requires the ice module
# iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters
# Requires the iwi firmware module
# iwn: Intel Wireless WiFi Link 1000/105/135/2000/4965/5000/6000/6050 abgn
@ -319,6 +321,7 @@ device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
device ice # Intel 800 Series Physical Function
device ice_ddp # Intel 800 Series DDP Package
device irdma # Intel 800 Series RDMA driver
device mthca # Mellanox HCA InfiniBand
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand

View File

@ -4715,6 +4715,37 @@ ofed/drivers/infiniband/ulp/sdp/sdp_cma.c optional sdp inet \
ofed/drivers/infiniband/ulp/sdp/sdp_tx.c optional sdp inet \
compile-with "${OFED_C} -I$S/ofed/drivers/infiniband/ulp/sdp/ ${NO_WUNUSED_BUT_SET_VARIABLE}"
dev/irdma/icrdma.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_cm.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_ctrl.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_hmc.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_hw.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/icrdma_hw.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/fbsd_kcompat.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_kcompat.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_pble.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_puda.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_uda.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_uk.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_utils.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_verbs.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/irdma/irdma_ws.c optional irdma ice pci ofed \
compile-with "${OFED_C} -I$S/dev/ice/"
dev/mthca/mthca_allocator.c optional mthca pci ofed \
compile-with "${OFED_C}"
dev/mthca/mthca_av.c optional mthca pci ofed \

View File

@ -0,0 +1,736 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include "osdep.h"
#include "ice_rdma.h"
#include "irdma_di_if.h"
#include "irdma_main.h"
#include <sys/gsb_crc32.h>
#include <netinet/in_fib.h>
#include <netinet6/in6_fib.h>
#include <net/route/nhop.h>
/* additional QP debuging option. Keep false unless needed */
bool irdma_upload_context = false;
inline u32
irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg){
KASSERT(reg < dev_ctx->mem_bus_space_size,
("irdma: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
return (bus_space_read_4(dev_ctx->mem_bus_space_tag,
dev_ctx->mem_bus_space_handle, reg));
}
inline void
irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value)
{
KASSERT(reg < dev_ctx->mem_bus_space_size,
("irdma: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
bus_space_write_4(dev_ctx->mem_bus_space_tag,
dev_ctx->mem_bus_space_handle, reg, value);
}
inline u64
irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg){
KASSERT(reg < dev_ctx->mem_bus_space_size,
("irdma: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
return (bus_space_read_8(dev_ctx->mem_bus_space_tag,
dev_ctx->mem_bus_space_handle, reg));
}
inline void
irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value)
{
KASSERT(reg < dev_ctx->mem_bus_space_size,
("irdma: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)dev_ctx->mem_bus_space_size));
bus_space_write_8(dev_ctx->mem_bus_space_tag,
dev_ctx->mem_bus_space_handle, reg, value);
}
int
irdma_register_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
struct ice_rdma_peer *peer = iwdev->rf->peer_info;
struct ice_rdma_request req = {0};
struct ice_rdma_qset_update *res = &req.res;
req.type = ICE_RDMA_EVENT_QSET_REGISTER;
res->cnt_req = 1;
res->res_type = ICE_RDMA_QSET_ALLOC;
res->qsets.qs_handle = tc_node->qs_handle;
res->qsets.tc = tc_node->traffic_class;
res->qsets.vsi_id = vsi->vsi_idx;
IRDMA_DI_REQ_HANDLER(peer, &req);
tc_node->l2_sched_node_id = res->qsets.teid;
vsi->qos[tc_node->user_pri].l2_sched_node_id =
res->qsets.teid;
return 0;
}
void
irdma_unregister_qset(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node)
{
struct irdma_device *iwdev = vsi->back_vsi;
struct ice_rdma_peer *peer = iwdev->rf->peer_info;
struct ice_rdma_request req = {0};
struct ice_rdma_qset_update *res = &req.res;
req.type = ICE_RDMA_EVENT_QSET_REGISTER;
res->res_allocated = 1;
res->res_type = ICE_RDMA_QSET_FREE;
res->qsets.vsi_id = vsi->vsi_idx;
res->qsets.teid = tc_node->l2_sched_node_id;
res->qsets.qs_handle = tc_node->qs_handle;
IRDMA_DI_REQ_HANDLER(peer, &req);
}
void *
hw_to_dev(struct irdma_hw *hw)
{
struct irdma_pci_f *rf;
rf = container_of(hw, struct irdma_pci_f, hw);
return rf->pcidev;
}
void
irdma_free_hash_desc(void *desc)
{
return;
}
int
irdma_init_hash_desc(void **desc)
{
return 0;
}
int
irdma_ieq_check_mpacrc(void *desc,
void *addr, u32 len, u32 val)
{
u32 crc = calculate_crc32c(0xffffffff, addr, len) ^ 0xffffffff;
int ret_code = 0;
if (crc != val) {
irdma_pr_err("mpa crc check fail %x %x\n", crc, val);
ret_code = -EINVAL;
}
printf("%s: result crc=%x value=%x\n", __func__, crc, val);
return ret_code;
}
/**
* irdma_add_ipv6_addr - add ipv6 address to the hw arp table
* @iwdev: irdma device
* @ifp: interface network device pointer
*/
static void
irdma_add_ipv6_addr(struct irdma_device *iwdev, struct ifnet *ifp)
{
struct ifaddr *ifa, *tmp;
struct sockaddr_in6 *sin6;
u32 local_ipaddr6[4];
u8 *mac_addr;
char ip6buf[INET6_ADDRSTRLEN];
if_addr_rlock(ifp);
IRDMA_TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, tmp) {
sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
if (sin6->sin6_family != AF_INET6)
continue;
irdma_copy_ip_ntohl(local_ipaddr6, (u32 *)&sin6->sin6_addr);
mac_addr = IF_LLADDR(ifp);
printf("%s:%d IP=%s, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, __LINE__,
ip6_sprintf(ip6buf, &sin6->sin6_addr),
mac_addr[0], mac_addr[1], mac_addr[2],
mac_addr[3], mac_addr[4], mac_addr[5]);
irdma_manage_arp_cache(iwdev->rf, mac_addr, local_ipaddr6,
IRDMA_ARP_ADD);
}
if_addr_runlock(ifp);
}
/**
* irdma_add_ipv4_addr - add ipv4 address to the hw arp table
* @iwdev: irdma device
* @ifp: interface network device pointer
*/
static void
irdma_add_ipv4_addr(struct irdma_device *iwdev, struct ifnet *ifp)
{
struct ifaddr *ifa;
struct sockaddr_in *sin;
u32 ip_addr[4] = {};
u8 *mac_addr;
if_addr_rlock(ifp);
IRDMA_TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
sin = (struct sockaddr_in *)ifa->ifa_addr;
if (sin->sin_family != AF_INET)
continue;
ip_addr[0] = ntohl(sin->sin_addr.s_addr);
mac_addr = IF_LLADDR(ifp);
printf("%s:%d IP=%d.%d.%d.%d, MAC=%02x:%02x:%02x:%02x:%02x:%02x\n",
__func__, __LINE__,
ip_addr[0] >> 24,
(ip_addr[0] >> 16) & 0xFF,
(ip_addr[0] >> 8) & 0xFF,
ip_addr[0] & 0xFF,
mac_addr[0], mac_addr[1], mac_addr[2],
mac_addr[3], mac_addr[4], mac_addr[5]);
irdma_manage_arp_cache(iwdev->rf, mac_addr, ip_addr,
IRDMA_ARP_ADD);
}
if_addr_runlock(ifp);
}
/**
* irdma_add_ip - add ip addresses
* @iwdev: irdma device
*
* Add ipv4/ipv6 addresses to the arp cache
*/
void
irdma_add_ip(struct irdma_device *iwdev)
{
struct ifnet *ifp = iwdev->netdev;
struct ifnet *ifv;
int i;
irdma_add_ipv4_addr(iwdev, ifp);
irdma_add_ipv6_addr(iwdev, ifp);
for (i = 0; ifp->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
ifv = VLAN_DEVAT(ifp, i);
if (!ifv)
continue;
irdma_add_ipv4_addr(iwdev, ifv);
irdma_add_ipv6_addr(iwdev, ifv);
}
}
static void
irdma_ifaddrevent_handler(void *arg, struct ifnet *ifp, struct ifaddr *ifa, int event)
{
struct irdma_pci_f *rf = arg;
struct ifnet *ifv = NULL;
struct sockaddr_in *sin;
struct epoch_tracker et;
int arp_index = 0, i = 0;
u32 ip[4] = {};
if (!ifa || !ifa->ifa_addr || !ifp)
return;
if (rf->iwdev->netdev != ifp) {
for (i = 0; rf->iwdev->netdev->if_vlantrunk != NULL && i < VLAN_N_VID; ++i) {
NET_EPOCH_ENTER(et);
ifv = VLAN_DEVAT(rf->iwdev->netdev, i);
NET_EPOCH_EXIT(et);
if (ifv == ifp)
break;
}
if (ifv != ifp)
return;
}
sin = (struct sockaddr_in *)ifa->ifa_addr;
switch (event) {
case IFADDR_EVENT_ADD:
if (sin->sin_family == AF_INET)
irdma_add_ipv4_addr(rf->iwdev, ifp);
else if (sin->sin_family == AF_INET6)
irdma_add_ipv6_addr(rf->iwdev, ifp);
break;
case IFADDR_EVENT_DEL:
if (sin->sin_family == AF_INET) {
ip[0] = ntohl(sin->sin_addr.s_addr);
} else if (sin->sin_family == AF_INET6) {
irdma_copy_ip_ntohl(ip, (u32 *)&((struct sockaddr_in6 *)sin)->sin6_addr);
} else {
break;
}
for_each_set_bit(arp_index, rf->allocated_arps, rf->arp_table_size) {
if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip))) {
irdma_manage_arp_cache(rf, rf->arp_table[arp_index].mac_addr,
rf->arp_table[arp_index].ip_addr,
IRDMA_ARP_DELETE);
}
}
break;
default:
break;
}
}
void
irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf)
{
rf->irdma_ifaddr_event = EVENTHANDLER_REGISTER(ifaddr_event_ext,
irdma_ifaddrevent_handler,
rf,
EVENTHANDLER_PRI_ANY);
}
void
irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf)
{
EVENTHANDLER_DEREGISTER(ifaddr_event_ext, rf->irdma_ifaddr_event);
}
static int
irdma_get_route_ifp(struct sockaddr *dst_sin, struct ifnet *netdev,
struct ifnet **ifp, struct sockaddr **nexthop, bool *gateway)
{
struct nhop_object *nh;
if (dst_sin->sa_family == AF_INET6)
nh = fib6_lookup(RT_DEFAULT_FIB, &((struct sockaddr_in6 *)dst_sin)->sin6_addr, 0, NHR_NONE, 0);
else
nh = fib4_lookup(RT_DEFAULT_FIB, ((struct sockaddr_in *)dst_sin)->sin_addr, 0, NHR_NONE, 0);
if (!nh || (nh->nh_ifp != netdev &&
rdma_vlan_dev_real_dev(nh->nh_ifp) != netdev))
goto rt_not_found;
*gateway = (nh->nh_flags & NHF_GATEWAY) ? true : false;
*nexthop = (*gateway) ? &nh->gw_sa : dst_sin;
*ifp = nh->nh_ifp;
return 0;
rt_not_found:
pr_err("irdma: route not found\n");
return -ENETUNREACH;
}
/**
* irdma_get_dst_mac - get destination mac address
* @cm_node: connection's node
* @dst_sin: destination address information
* @dst_mac: mac address array to return
*/
int
irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin, u8 *dst_mac)
{
struct ifnet *netdev = cm_node->iwdev->netdev;
#ifdef VIMAGE
struct rdma_cm_id *rdma_id = (struct rdma_cm_id *)cm_node->cm_id->context;
struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
#endif
struct ifnet *ifp;
struct llentry *lle;
struct sockaddr *nexthop;
struct epoch_tracker et;
int err;
bool gateway;
NET_EPOCH_ENTER(et);
CURVNET_SET_QUIET(vnet);
err = irdma_get_route_ifp(dst_sin, netdev, &ifp, &nexthop, &gateway);
if (err)
goto get_route_fail;
if (dst_sin->sa_family == AF_INET) {
err = arpresolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
} else if (dst_sin->sa_family == AF_INET6) {
err = nd6_resolve(ifp, gateway, NULL, nexthop, dst_mac, NULL, &lle);
} else {
err = -EPROTONOSUPPORT;
}
get_route_fail:
CURVNET_RESTORE();
NET_EPOCH_EXIT(et);
if (err) {
pr_err("failed to resolve neighbor address (err=%d)\n",
err);
return -ENETUNREACH;
}
return 0;
}
/**
* irdma_addr_resolve_neigh - resolve neighbor address
* @cm_node: connection's node
* @dst_ip: remote ip address
* @arpindex: if there is an arp entry
*/
int
irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node,
u32 dst_ip, int arpindex)
{
struct irdma_device *iwdev = cm_node->iwdev;
struct sockaddr_in dst_sin = {};
int err;
u32 ip[4] = {};
u8 dst_mac[MAX_ADDR_LEN];
dst_sin.sin_len = sizeof(dst_sin);
dst_sin.sin_family = AF_INET;
dst_sin.sin_port = 0;
dst_sin.sin_addr.s_addr = htonl(dst_ip);
err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_sin, dst_mac);
if (err)
return arpindex;
ip[0] = dst_ip;
return irdma_add_arp(iwdev->rf, ip, dst_mac);
}
/**
* irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
* @cm_node: connection's node
* @dest: remote ip address
* @arpindex: if there is an arp entry
*/
int
irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node,
u32 *dest, int arpindex)
{
struct irdma_device *iwdev = cm_node->iwdev;
struct sockaddr_in6 dst_addr = {};
int err;
u8 dst_mac[MAX_ADDR_LEN];
dst_addr.sin6_family = AF_INET6;
dst_addr.sin6_len = sizeof(dst_addr);
dst_addr.sin6_scope_id = iwdev->netdev->if_index;
irdma_copy_ip_htonl(dst_addr.sin6_addr.__u6_addr.__u6_addr32, dest);
err = irdma_get_dst_mac(cm_node, (struct sockaddr *)&dst_addr, dst_mac);
if (err)
return arpindex;
return irdma_add_arp(iwdev->rf, dest, dst_mac);
}
int
irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
struct irdma_cm_info *cm_info)
{
int arpindex;
int oldarpindex;
if ((cm_node->ipv4 &&
irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
(!cm_node->ipv4 &&
irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
cm_node->do_lpb = true;
arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
NULL,
IRDMA_ARP_RESOLVE);
} else {
oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
NULL,
IRDMA_ARP_RESOLVE);
if (cm_node->ipv4)
arpindex = irdma_addr_resolve_neigh(cm_node,
cm_info->rem_addr[0],
oldarpindex);
else
arpindex = irdma_addr_resolve_neigh_ipv6(cm_node,
cm_info->rem_addr,
oldarpindex);
}
return arpindex;
}
/**
* irdma_add_handler - add a handler to the list
* @hdl: handler to be added to the handler list
*/
void
irdma_add_handler(struct irdma_handler *hdl)
{
unsigned long flags;
spin_lock_irqsave(&irdma_handler_lock, flags);
list_add(&hdl->list, &irdma_handlers);
spin_unlock_irqrestore(&irdma_handler_lock, flags);
}
/**
* irdma_del_handler - delete a handler from the list
* @hdl: handler to be deleted from the handler list
*/
void
irdma_del_handler(struct irdma_handler *hdl)
{
unsigned long flags;
spin_lock_irqsave(&irdma_handler_lock, flags);
list_del(&hdl->list);
spin_unlock_irqrestore(&irdma_handler_lock, flags);
}
/**
* irdma_set_rf_user_cfg_params - apply user configurable settings
* @rf: RDMA PCI function
*/
void
irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf)
{
int en_rem_endpoint_trk = 0;
int limits_sel = 4;
rf->en_rem_endpoint_trk = en_rem_endpoint_trk;
rf->limits_sel = limits_sel;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
/* Enable DCQCN algorithm by default */
rf->dcqcn_ena = true;
}
/**
* irdma_sysctl_dcqcn_update - handle dcqcn_ena sysctl update
* @arg1: pointer to rf
* @arg2: unused
* @oidp: sysctl oid structure
* @req: sysctl request pointer
*/
static int
irdma_sysctl_dcqcn_update(SYSCTL_HANDLER_ARGS)
{
struct irdma_pci_f *rf = (struct irdma_pci_f *)arg1;
int ret;
u8 dcqcn_ena = rf->dcqcn_ena;
ret = sysctl_handle_8(oidp, &dcqcn_ena, 0, req);
if ((ret) || (req->newptr == NULL))
return ret;
if (dcqcn_ena == 0)
rf->dcqcn_ena = false;
else
rf->dcqcn_ena = true;
return 0;
}
/**
* irdma_dcqcn_tunables_init - create tunables for dcqcn settings
* @rf: RDMA PCI function
*
* Create DCQCN related sysctls for the driver.
* dcqcn_ena is writeable settings and applicable to next QP creation or
* context setting.
* all other settings are of RDTUN type (read on driver load) and are
* applicable only to CQP creation.
*/
void
irdma_dcqcn_tunables_init(struct irdma_pci_f *rf)
{
struct sysctl_oid_list *irdma_sysctl_oid_list;
irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
SYSCTL_ADD_PROC(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_enable", CTLFLAG_RW | CTLTYPE_U8, rf, 0,
irdma_sysctl_dcqcn_update, "A",
"enables DCQCN algorithm for RoCEv2 on all ports, default=true");
SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_cc_cfg_valid", CTLFLAG_RDTUN,
&rf->dcqcn_params.cc_cfg_valid, 0,
"set DCQCN parameters to be valid, default=false");
rf->dcqcn_params.min_dec_factor = 1;
SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_min_dec_factor", CTLFLAG_RDTUN,
&rf->dcqcn_params.min_dec_factor, 0,
"set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1");
SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_min_rate_MBps", CTLFLAG_RDTUN,
&rf->dcqcn_params.min_rate, 0,
"set minimum rate limit value, in MBits per second, default=0");
SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_F", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_f, 0,
"set number of times to stay in each stage of bandwidth recovery, default=0");
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_T", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_t, 0,
"set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0");
SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_B", CTLFLAG_RDTUN, &rf->dcqcn_params.dcqcn_b, 0,
"set number of MSS to add to the congestion window in additive increase mode, default=0");
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_rai_factor", CTLFLAG_RDTUN,
&rf->dcqcn_params.rai_factor, 0,
"set number of MSS to add to the congestion window in additive increase mode, default=0");
SYSCTL_ADD_U16(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_hai_factor", CTLFLAG_RDTUN,
&rf->dcqcn_params.hai_factor, 0,
"set number of MSS to add to the congestion window in hyperactive increase mode, default=0");
SYSCTL_ADD_U32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "dcqcn_rreduce_mperiod", CTLFLAG_RDTUN,
&rf->dcqcn_params.rreduce_mperiod, 0,
"set minimum time between 2 consecutive rate reductions for a single flow, default=0");
}
/**
* irdma_dmamap_cb - callback for bus_dmamap_load
*/
static void
irdma_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
{
if (error)
return;
*(bus_addr_t *) arg = segs->ds_addr;
return;
}
/**
* irdma_allocate_dma_mem - allocate dma memory
* @hw: pointer to hw structure
* @mem: structure holding memory information
* @size: requested size
* @alignment: requested alignment
*/
void *
irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
u64 size, u32 alignment)
{
struct irdma_dev_ctx *dev_ctx = (struct irdma_dev_ctx *)hw->dev_context;
device_t dev = dev_ctx->dev;
void *va;
int ret;
ret = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
alignment, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
size, /* maxsize */
1, /* nsegments */
size, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&mem->tag);
if (ret != 0) {
device_printf(dev, "%s: bus_dma_tag_create failed, error %u\n",
__func__, ret);
goto fail_0;
}
ret = bus_dmamem_alloc(mem->tag, (void **)&va,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
if (ret != 0) {
device_printf(dev, "%s: bus_dmamem_alloc failed, error %u\n",
__func__, ret);
goto fail_1;
}
ret = bus_dmamap_load(mem->tag, mem->map, va, size,
irdma_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT);
if (ret != 0) {
device_printf(dev, "%s: bus_dmamap_load failed, error %u\n",
__func__, ret);
goto fail_2;
}
mem->nseg = 1;
mem->size = size;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return va;
fail_2:
bus_dmamem_free(mem->tag, va, mem->map);
fail_1:
bus_dma_tag_destroy(mem->tag);
fail_0:
mem->map = NULL;
mem->tag = NULL;
return NULL;
}
/**
* irdma_free_dma_mem - Memory free helper fn
* @hw: pointer to hw structure
* @mem: ptr to mem struct to free
*/
int
irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem)
{
if (!mem)
return -EINVAL;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(mem->tag, mem->map);
if (!mem->va)
return -ENOMEM;
bus_dmamem_free(mem->tag, mem->va, mem->map);
bus_dma_tag_destroy(mem->tag);
mem->va = NULL;
return 0;
}
inline void
irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk)
{
kfree(chunk->bitmapmem.va);
}

View File

@ -0,0 +1,251 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef FBSD_KCOMPAT_H
#define FBSD_KCOMPAT_H
#include "ice_rdma.h"
#define TASKLET_DATA_TYPE unsigned long
#define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE)
#define tasklet_setup(tasklet, callback) \
tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback), \
(TASKLET_DATA_TYPE)(tasklet))
#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
#define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
(sizeof(struct drv_struct) + \
BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
BUILD_BUG_ON_ZERO( \
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
#define set_ibdev_dma_device(ibdev, dev) \
ibdev.dma_device = (dev)
#define set_max_sge(props, rf) \
((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags)
#define kc_set_props_ip_gid_caps(props) \
((props)->port_cap_flags |= IB_PORT_IP_BASED_GIDS)
#define rdma_query_gid(ibdev, port, index, gid) \
ib_get_cached_gid(ibdev, port, index, gid, NULL)
#define kmap(pg) page_address(pg)
#define kmap_local_page(pg) page_address(pg)
#define kunmap(pg)
#define kunmap_local(pg)
#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \
((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL))
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
#ifndef IB_QP_ATTR_STANDARD_BITS
#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
#endif
#define IRDMA_QOS_MODE_VLAN 0x0
#define IRDMA_QOS_MODE_DSCP 0x1
void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
struct irdma_tunable_info {
struct sysctl_ctx_list irdma_sysctl_ctx;
struct sysctl_oid *irdma_sysctl_tree;
u8 roce_ena;
};
static inline int irdma_iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
*pkey = 0;
return 0;
}
static inline int cq_validate_flags(u32 flags, u8 hw_rev)
{
/* GEN1 does not support CQ create flags */
if (hw_rev == IRDMA_GEN_1)
return flags ? -EOPNOTSUPP : 0;
return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
}
static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
u32 *idx)
{
*idx += 1;
if (!(*pinfo) || *idx != (*pinfo)->cnt)
return ++pbl;
*idx = 0;
(*pinfo)++;
return (*pinfo)->addr;
}
int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int irdma_create_ah(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata);
int irdma_create_ah_stub(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata);
void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr);
void irdma_destroy_ah(struct ib_ah *ibah, u32 flags);
void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags);
int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
void irdma_get_eth_speed_and_width(u32 link_speed, u8 *active_speed,
u8 *active_width);
enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
int irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
int irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
int irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
int irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
int irdma_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
struct rdma_hw_stats *irdma_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
int irdma_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats, u8 port_num,
int index);
int irdma_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void ib_unregister_device(struct ib_device *ibdev);
void irdma_disassociate_ucontext(struct ib_ucontext *context);
int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp,
struct ib_qp_attr *attr,
u16 *vlan_id);
struct irdma_device *kc_irdma_get_device(struct ifnet *netdev);
void kc_irdma_put_device(struct irdma_device *iwdev);
void kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node);
void irdma_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len);
int irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
struct ib_port_modify *props);
int irdma_get_dst_mac(struct irdma_cm_node *cm_node, struct sockaddr *dst_sin,
u8 *dst_mac);
int irdma_resolve_neigh_lpb_chk(struct irdma_device *iwdev, struct irdma_cm_node *cm_node,
struct irdma_cm_info *cm_info);
int irdma_addr_resolve_neigh(struct irdma_cm_node *cm_node, u32 dst_ip,
int arpindex);
int irdma_addr_resolve_neigh_ipv6(struct irdma_cm_node *cm_node, u32 *dest,
int arpindex);
void irdma_dcqcn_tunables_init(struct irdma_pci_f *rf);
u32 irdma_create_stag(struct irdma_device *iwdev);
void irdma_free_stag(struct irdma_device *iwdev, u32 stag);
struct irdma_mr;
struct irdma_cq;
struct irdma_cq_buf;
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr);
struct ib_mw *irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int irdma_hw_alloc_stag(struct irdma_device *iwdev, struct irdma_mr *iwmr);
void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq);
int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
struct irdma_device *iwdev);
void irdma_setup_virt_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *init_info);
int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr);
void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_qp_host_ctx_info *ctx_info);
void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_qp_host_ctx_info *ctx_info);
int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp);
void irdma_dealloc_push_page(struct irdma_pci_f *rf,
struct irdma_sc_qp *qp);
int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev,
struct irdma_cq_buf *lcqe_buf);
void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void irdma_dealloc_ucontext(struct ib_ucontext *context);
int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *,
const struct ib_gid_attr *, void **);
int irdma_del_gid(struct ib_device *, u8, unsigned int, void **);
struct ib_device *ib_device_get_by_netdev(struct ifnet *ndev, int driver_id);
void ib_device_put(struct ib_device *device);
void ib_unregister_device_put(struct ib_device *device);
enum ib_mtu ib_mtu_int_to_enum(int mtu);
struct irdma_pbl *irdma_get_pbl(unsigned long va, struct list_head *pbl_list);
void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq);
void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp);
struct irdma_ucontext;
void irdma_del_memlist(struct irdma_mr *iwmr, struct irdma_ucontext *ucontext);
void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
enum irdma_pble_level level);
void irdma_reg_ipaddr_event_cb(struct irdma_pci_f *rf);
void irdma_dereg_ipaddr_event_cb(struct irdma_pci_f *rf);
/* Introduced in this series https://lore.kernel.org/linux-rdma/0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com/
* An irdma version helper doing same for older functions with difference that iova is passed in
* as opposed to derived from umem->iova.
*/
static inline size_t irdma_ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned long pgsz, u64 iova)
{
/* some older OFED distros do not have ALIGN_DOWN */
#ifndef ALIGN_DOWN
#define ALIGN_DOWN(x, a) ALIGN((x) - ((a) - 1), (a))
#endif
return (size_t)((ALIGN(iova + umem->length, pgsz) -
ALIGN_DOWN(iova, pgsz))) / pgsz;
}
#endif /* FBSD_KCOMPAT_H */

View File

@ -0,0 +1,92 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2019 - 2020 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef ICE_DEVIDS_H
#define ICE_DEVIDS_H
#define PCI_VENDOR_ID_INTEL 0x8086
/* Device IDs */
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
#define ICE_DEV_ID_E823L_SFP 0x124D
/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
/* Intel(R) Ethernet Connection E823-L 1GbE */
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
/* Intel(R) Ethernet Connection E823-C for QSFP */
#define ICE_DEV_ID_E823C_QSFP 0x188B
/* Intel(R) Ethernet Connection E823-C for SFP */
#define ICE_DEV_ID_E823C_SFP 0x188C
/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
/* Intel(R) Ethernet Connection E823-C 1GbE */
#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection C822N for backplane */
#define ICE_DEV_ID_C822N_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection C822N for QSFP */
#define ICE_DEV_ID_C822N_QSFP 0x1891
/* Intel(R) Ethernet Connection C822N for SFP */
#define ICE_DEV_ID_C822N_SFP 0x1892
/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
/* Intel(R) Ethernet Connection E822-L for backplane */
#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* ICE_DEVIDS_H */

704
sys/dev/irdma/icrdma.c Normal file
View File

@ -0,0 +1,704 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <linux/device.h>
#include <sys/rman.h>
#include "ice_rdma.h"
#include "irdma_main.h"
#include "icrdma_hw.h"
#include "irdma_if.h"
#include "irdma_di_if.h"
/**
* Driver version
*/
char irdma_driver_version[] = "0.0.51-k";
#define pf_if_d(peer) peer->ifp->if_dunit
/**
* irdma_init_tunable - prepare tunables
* @rf: RDMA PCI function
* @pf_id: id of the pf
*/
static void
irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
{
struct sysctl_oid_list *irdma_sysctl_oid_list;
char pf_name[16];
snprintf(pf_name, 15, "irdma%d", pf_id);
sysctl_ctx_init(&rf->tun_info.irdma_sysctl_ctx);
rf->tun_info.irdma_sysctl_tree = SYSCTL_ADD_NODE(&rf->tun_info.irdma_sysctl_ctx,
SYSCTL_STATIC_CHILDREN(_dev),
OID_AUTO, pf_name, CTLFLAG_RD,
NULL, "");
irdma_sysctl_oid_list = SYSCTL_CHILDREN(rf->tun_info.irdma_sysctl_tree);
/*
* debug mask setting
*/
SYSCTL_ADD_S32(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list,
OID_AUTO, "debug", CTLFLAG_RWTUN, &rf->sc_dev.debug_mask,
0, "irdma debug");
/*
* RoCEv2/iWARP setting RoCEv2 the default mode
*/
rf->tun_info.roce_ena = 1;
SYSCTL_ADD_U8(&rf->tun_info.irdma_sysctl_ctx, irdma_sysctl_oid_list, OID_AUTO,
"roce_enable", CTLFLAG_RDTUN, &rf->tun_info.roce_ena, 0,
"RoCEv2 mode enable");
rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
if (rf->tun_info.roce_ena == 1)
rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
else if (rf->tun_info.roce_ena != 0)
printf("%s:%d wrong roce_enable value (%d), using iWARP\n",
__func__, __LINE__, rf->tun_info.roce_ena);
printf("%s:%d protocol: %s, roce_enable value: %d\n", __func__, __LINE__,
(rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? "iWARP" : "RoCEv2",
rf->tun_info.roce_ena);
irdma_dcqcn_tunables_init(rf);
}
/**
* irdma_find_handler - obtain hdl object to identify pf
* @p_dev: the peer interface structure
*/
static struct irdma_handler *
irdma_find_handler(struct ice_rdma_peer *p_dev)
{
struct irdma_handler *hdl;
unsigned long flags;
spin_lock_irqsave(&irdma_handler_lock, flags);
list_for_each_entry(hdl, &irdma_handlers, list) {
if (!hdl)
continue;
if (!hdl->iwdev->rf->peer_info)
continue;
if (hdl->iwdev->rf->peer_info->dev == p_dev->dev) {
spin_unlock_irqrestore(&irdma_handler_lock, flags);
return hdl;
}
}
spin_unlock_irqrestore(&irdma_handler_lock, flags);
return NULL;
}
/**
* peer_to_iwdev - return iwdev based on peer
* @peer: the peer interface structure
*/
static struct irdma_device *
peer_to_iwdev(struct ice_rdma_peer *peer)
{
struct irdma_handler *hdl;
hdl = irdma_find_handler(peer);
if (!hdl) {
printf("%s:%d rdma handler not found\n", __func__, __LINE__);
return NULL;
}
return hdl->iwdev;
}
/**
* irdma_get_qos_info - save qos info from parameters to internal struct
* @l2params: destination, qos, tc, mtu info structure
* @qos_info: source, DCB settings structure
*/
static void
irdma_get_qos_info(struct irdma_l2params *l2params, struct ice_qos_params *qos_info)
{
int i;
l2params->num_tc = qos_info->num_tc;
l2params->num_apps = qos_info->num_apps;
l2params->vsi_prio_type = qos_info->vsi_priority_type;
l2params->vsi_rel_bw = qos_info->vsi_relative_bw;
for (i = 0; i < l2params->num_tc; i++) {
l2params->tc_info[i].egress_virt_up =
qos_info->tc_info[i].egress_virt_up;
l2params->tc_info[i].ingress_virt_up =
qos_info->tc_info[i].ingress_virt_up;
l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
}
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
l2params->up2tc[i] = qos_info->up2tc[i];
if (qos_info->pfc_mode == IRDMA_QOS_MODE_DSCP) {
l2params->dscp_mode = true;
memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
}
printf("%s:%d: l2params settings:\n num_tc %d,\n num_apps %d,\n",
__func__, __LINE__, l2params->num_tc, l2params->num_apps);
printf(" vsi_prio_type %d,\n vsi_rel_bw %d,\n egress_virt_up:",
l2params->vsi_prio_type, l2params->vsi_rel_bw);
for (i = 0; i < l2params->num_tc; i++)
printf(" %d", l2params->tc_info[i].egress_virt_up);
printf("\n ingress_virt_up:");
for (i = 0; i < l2params->num_tc; i++)
printf(" %d", l2params->tc_info[i].ingress_virt_up);
printf("\n prio_type:");
for (i = 0; i < l2params->num_tc; i++)
printf(" %d", l2params->tc_info[i].prio_type);
printf("\n rel_bw:");
for (i = 0; i < l2params->num_tc; i++)
printf(" %d", l2params->tc_info[i].rel_bw);
printf("\n tc_ctx:");
for (i = 0; i < l2params->num_tc; i++)
printf(" %lu", l2params->tc_info[i].tc_ctx);
printf("\n up2tc:");
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
printf(" %d", l2params->up2tc[i]);
printf(" dscp_mode: %d,\n", l2params->dscp_mode);
for (i = 0; i < IRDMA_DSCP_NUM_VAL; i++)
printf(" %d", l2params->dscp_map[i]);
printf("\n");
dump_struct(l2params, sizeof(*l2params), "l2params");
}
/**
* irdma_log_invalid_mtu - check mtu setting validity
* @mtu: mtu value
* @dev: hardware control device structure
*/
static void
irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
{
if (mtu < IRDMA_MIN_MTU_IPV4)
irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
else if (mtu < IRDMA_MIN_MTU_IPV6)
irdma_dev_warn(dev, "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
}
/**
* irdma_event_handler - handling events from lan driver
* @peer: the peer interface structure
* @event: event info structure
*/
static void
irdma_event_handler(struct ice_rdma_peer *peer, struct ice_rdma_event *event)
{
struct irdma_device *iwdev;
struct irdma_l2params l2params = {};
printf("%s:%d event_handler %s (%x) on pf %d (%d)\n", __func__, __LINE__,
(event->type == 1) ? "LINK CHANGE" :
(event->type == 2) ? "MTU CHANGE" :
(event->type == 3) ? "TC CHANGE" : "UNKNOWN",
event->type, peer->pf_id, pf_if_d(peer));
iwdev = peer_to_iwdev(peer);
if (!iwdev) {
printf("%s:%d rdma device not found\n", __func__, __LINE__);
return;
}
switch (event->type) {
case ICE_RDMA_EVENT_LINK_CHANGE:
printf("%s:%d PF: %x (%x), state: %d, speed: %lu\n", __func__, __LINE__,
peer->pf_id, pf_if_d(peer), event->linkstate, event->baudrate);
break;
case ICE_RDMA_EVENT_MTU_CHANGE:
if (iwdev->vsi.mtu != event->mtu) {
l2params.mtu = event->mtu;
l2params.mtu_changed = true;
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
break;
case ICE_RDMA_EVENT_TC_CHANGE:
/*
* 1. check if it is pre or post 2. check if it is currently being done
*/
if (event->prep == iwdev->vsi.tc_change_pending) {
printf("%s:%d can't process %s TC change if TC change is %spending\n",
__func__, __LINE__,
event->prep ? "pre" : "post",
event->prep ? " " : "not ");
goto done;
}
if (event->prep) {
iwdev->vsi.tc_change_pending = true;
irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
wait_event_timeout(iwdev->suspend_wq,
!atomic_read(&iwdev->vsi.qp_suspend_reqs),
IRDMA_EVENT_TIMEOUT_MS * 10);
irdma_ws_reset(&iwdev->vsi);
printf("%s:%d TC change preparation done\n", __func__, __LINE__);
} else {
l2params.tc_changed = true;
irdma_get_qos_info(&l2params, &event->port_qos);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
irdma_check_fc_for_tc_update(&iwdev->vsi, &l2params);
irdma_change_l2params(&iwdev->vsi, &l2params);
printf("%s:%d TC change done\n", __func__, __LINE__);
}
break;
case ICE_RDMA_EVENT_CRIT_ERR:
printf("%s:%d event type received: %d\n", __func__, __LINE__, event->type);
break;
default:
printf("%s:%d event type unsupported: %d\n", __func__, __LINE__, event->type);
}
done:
return;
}
/**
* irdma_link_change - Callback for link state change
* @peer: the peer interface structure
* @linkstate: state of the link
* @baudrate: speed of the link
*/
static void
irdma_link_change(struct ice_rdma_peer *peer, int linkstate, uint64_t baudrate)
{
printf("%s:%d PF: %x (%x), state: %d, speed: %lu\n", __func__, __LINE__,
peer->pf_id, pf_if_d(peer), linkstate, baudrate);
}
/**
* irdma_finalize_task - Finish open or close phase in a separate thread
* @context: instance holding peer and iwdev information
*
* Triggered from irdma_open or irdma_close to perform rt_init_hw or
* rt_deinit_hw respectively. Does registration and unregistration of
* the device.
*/
static void
irdma_finalize_task(void *context, int pending)
{
struct irdma_task_arg *task_arg = (struct irdma_task_arg *)context;
struct irdma_device *iwdev = task_arg->iwdev;
struct irdma_pci_f *rf = iwdev->rf;
struct ice_rdma_peer *peer = task_arg->peer;
struct irdma_l2params l2params = {{{0}}};
struct ice_rdma_request req = {0};
int status = 0;
if (iwdev->iw_status) {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred closing %d (%d)\n",
rf->peer_info->pf_id, pf_if_d(peer));
irdma_dereg_ipaddr_event_cb(rf);
irdma_ib_unregister_device(iwdev);
req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
req.enable_filter = false;
IRDMA_DI_REQ_HANDLER(peer, &req);
irdma_rt_deinit_hw(iwdev);
} else {
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Starting deferred opening %d (%d)\n",
rf->peer_info->pf_id, pf_if_d(peer));
l2params.mtu = peer->mtu;
irdma_get_qos_info(&l2params, &peer->initial_qos_info);
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
status = irdma_rt_init_hw(iwdev, &l2params);
if (status) {
irdma_pr_err("RT init failed %d\n", status);
ib_dealloc_device(&iwdev->ibdev);
return;
}
status = irdma_ib_register_device(iwdev);
if (status) {
irdma_pr_err("Registration failed %d\n", status);
irdma_rt_deinit_hw(iwdev);
ib_dealloc_device(&iwdev->ibdev);
}
req.type = ICE_RDMA_EVENT_VSI_FILTER_UPDATE;
req.enable_filter = true;
IRDMA_DI_REQ_HANDLER(peer, &req);
irdma_reg_ipaddr_event_cb(rf);
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT, "Deferred opening finished %d (%d)\n",
rf->peer_info->pf_id, pf_if_d(peer));
}
}
/**
* irdma_open - Callback for operation open for RDMA device
* @peer: the new peer interface structure
*
* Callback implementing the RDMA_OPEN function. Called by the ice driver to
* notify the RDMA client driver that a new device has been initialized.
*/
static int
irdma_open(struct ice_rdma_peer *peer)
{
struct ice_rdma_event event = {0};
event.type = ICE_RDMA_EVENT_MTU_CHANGE;
event.mtu = peer->mtu;
irdma_event_handler(peer, &event);
return 0;
}
/**
* irdma_close - Callback to notify that a peer device is down
* @peer: the RDMA peer device being stopped
*
* Callback implementing the RDMA_CLOSE function. Called by the ice driver to
* notify the RDMA client driver that a peer device is being stopped.
*/
static int
irdma_close(struct ice_rdma_peer *peer)
{
/*
* This is called when ifconfig down. Keeping it for compatibility with ice. This event might be usefull for
* future.
*/
return 0;
}
/**
* irdma_alloc_pcidev - allocate memory for pcidev and populate data
* @peer: the new peer interface structure
* @rf: RDMA PCI function
*/
static int
irdma_alloc_pcidev(struct ice_rdma_peer *peer, struct irdma_pci_f *rf)
{
rf->pcidev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
if (!rf->pcidev) {
return -ENOMEM;
}
if (linux_pci_attach_device(rf->dev_ctx.dev, NULL, NULL, rf->pcidev))
return -ENOMEM;
return 0;
}
/**
* irdma_dealloc_pcidev - deallocate memory for pcidev
* @rf: RDMA PCI function
*/
static void
irdma_dealloc_pcidev(struct irdma_pci_f *rf)
{
linux_pci_detach_device(rf->pcidev);
kfree(rf->pcidev);
}
/**
* irdma_fill_device_info - assign initial values to rf variables
* @iwdev: irdma device
* @peer: the peer interface structure
*/
static void
irdma_fill_device_info(struct irdma_device *iwdev,
struct ice_rdma_peer *peer)
{
struct irdma_pci_f *rf = iwdev->rf;
rf->peer_info = peer;
rf->gen_ops.register_qset = irdma_register_qset;
rf->gen_ops.unregister_qset = irdma_unregister_qset;
rf->rdma_ver = IRDMA_GEN_2;
rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
rf->check_fc = irdma_check_fc_for_qp;
irdma_set_rf_user_cfg_params(rf);
rf->default_vsi.vsi_idx = peer->pf_vsi_num;
rf->dev_ctx.dev = peer->dev;
rf->dev_ctx.mem_bus_space_tag = rman_get_bustag(peer->pci_mem);
rf->dev_ctx.mem_bus_space_handle = rman_get_bushandle(peer->pci_mem);
rf->dev_ctx.mem_bus_space_size = rman_get_size(peer->pci_mem);
rf->hw.dev_context = &rf->dev_ctx;
rf->hw.hw_addr = (u8 *)rman_get_virtual(peer->pci_mem);
rf->msix_count = peer->msix.count;
rf->msix_info.entry = peer->msix.base;
rf->msix_info.vector = peer->msix.count;
printf("%s:%d msix_info: %d %d %d\n", __func__, __LINE__,
rf->msix_count, rf->msix_info.entry, rf->msix_info.vector);
rf->iwdev = iwdev;
iwdev->netdev = peer->ifp;
iwdev->init_state = INITIAL_STATE;
iwdev->vsi_num = peer->pf_vsi_num;
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) {
iwdev->roce_mode = true;
}
}
/**
* irdma_probe - Callback to probe a new RDMA peer device
* @peer: the new peer interface structure
*
* Callback implementing the RDMA_PROBE function. Called by the ice driver to
* notify the RDMA client driver that a new device has been created
*/
static int
irdma_probe(struct ice_rdma_peer *peer)
{
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_handler *hdl;
int err = 0;
irdma_pr_info("probe: irdma-%s peer=%p, peer->pf_id=%d, peer->ifp=%p, peer->ifp->if_dunit=%d, peer->pci_mem->r_bustag=%lx\n",
irdma_driver_version, peer, peer->pf_id, peer->ifp,
pf_if_d(peer), peer->pci_mem->r_bustag);
hdl = irdma_find_handler(peer);
if (hdl)
return -EBUSY;
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return -ENOMEM;
iwdev = (struct irdma_device *)ib_alloc_device(sizeof(*iwdev));
if (!iwdev) {
kfree(hdl);
return -ENOMEM;
}
iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
if (!iwdev->rf) {
ib_dealloc_device(&iwdev->ibdev);
kfree(hdl);
return -ENOMEM;
}
hdl->iwdev = iwdev;
iwdev->hdl = hdl;
irdma_init_tunable(iwdev->rf, pf_if_d(peer));
irdma_fill_device_info(iwdev, peer);
rf = iwdev->rf;
if (irdma_alloc_pcidev(peer, rf))
goto err_pcidev;
irdma_add_handler(hdl);
if (irdma_ctrl_init_hw(rf)) {
err = -EIO;
goto err_ctrl_init;
}
rf->dev_ctx.task_arg.peer = peer;
rf->dev_ctx.task_arg.iwdev = iwdev;
rf->dev_ctx.task_arg.peer = peer;
TASK_INIT(&hdl->deferred_task, 0, irdma_finalize_task, &rf->dev_ctx.task_arg);
hdl->deferred_tq = taskqueue_create_fast("irdma_defer",
M_NOWAIT, taskqueue_thread_enqueue,
&hdl->deferred_tq);
taskqueue_start_threads(&hdl->deferred_tq, 1, PI_NET, "irdma_defer_t");
taskqueue_enqueue(hdl->deferred_tq, &hdl->deferred_task);
return 0;
err_ctrl_init:
irdma_del_handler(hdl);
irdma_dealloc_pcidev(rf);
err_pcidev:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
kfree(hdl);
return err;
}
/**
* irdma_remove - Callback to remove an RDMA peer device
* @peer: the new peer interface structure
*
* Callback implementing the RDMA_REMOVE function. Called by the ice driver to
* notify the RDMA client driver that the device wille be delated
*/
static int
irdma_remove(struct ice_rdma_peer *peer)
{
struct irdma_handler *hdl;
struct irdma_device *iwdev;
irdma_debug((struct irdma_sc_dev *)NULL, IRDMA_DEBUG_INIT, "removing %s\n", __FUNCTION__);
hdl = irdma_find_handler(peer);
if (!hdl)
return 0;
iwdev = hdl->iwdev;
if (iwdev->vsi.tc_change_pending) {
iwdev->vsi.tc_change_pending = false;
irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_RESUME);
}
taskqueue_enqueue(hdl->deferred_tq, &hdl->deferred_task);
taskqueue_drain(hdl->deferred_tq, &hdl->deferred_task);
taskqueue_free(hdl->deferred_tq);
hdl->iwdev->rf->dev_ctx.task_arg.iwdev = NULL;
hdl->iwdev->rf->dev_ctx.task_arg.peer = NULL;
sysctl_ctx_free(&iwdev->rf->tun_info.irdma_sysctl_ctx);
hdl->iwdev->rf->tun_info.irdma_sysctl_tree = NULL;
irdma_ctrl_deinit_hw(iwdev->rf);
irdma_dealloc_pcidev(iwdev->rf);
irdma_del_handler(iwdev->hdl);
kfree(iwdev->hdl);
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
irdma_pr_info("IRDMA hardware deinitialization complete\n");
return 0;
}
/**
* irdma_prep_for_unregister - ensure the driver is ready to unregister
*/
static void
irdma_prep_for_unregister()
{
struct irdma_handler *hdl;
unsigned long flags;
bool hdl_valid;
do {
hdl_valid = false;
spin_lock_irqsave(&irdma_handler_lock, flags);
list_for_each_entry(hdl, &irdma_handlers, list) {
if (!hdl)
continue;
if (!hdl->iwdev->rf->peer_info)
continue;
hdl_valid = true;
break;
}
spin_unlock_irqrestore(&irdma_handler_lock, flags);
if (!hdl || !hdl_valid)
break;
IRDMA_CLOSE(hdl->iwdev->rf->peer_info);
IRDMA_REMOVE(hdl->iwdev->rf->peer_info);
} while (1);
}
static kobj_method_t irdma_methods[] = {
KOBJMETHOD(irdma_probe, irdma_probe),
KOBJMETHOD(irdma_open, irdma_open),
KOBJMETHOD(irdma_close, irdma_close),
KOBJMETHOD(irdma_remove, irdma_remove),
KOBJMETHOD(irdma_link_change, irdma_link_change),
KOBJMETHOD(irdma_event_handler, irdma_event_handler),
KOBJMETHOD_END
};
/* declare irdma_class which extends the ice_rdma_di class */
DEFINE_CLASS_1(irdma, irdma_class, irdma_methods, sizeof(struct ice_rdma_peer), ice_rdma_di_class);
static struct ice_rdma_info irdma_info = {
.major_version = ICE_RDMA_MAJOR_VERSION,
.minor_version = ICE_RDMA_MINOR_VERSION,
.patch_version = ICE_RDMA_PATCH_VERSION,
.rdma_class = &irdma_class,
};
/**
* irdma_module_event_handler - Module event handler callback
* @mod: unused mod argument
* @what: the module event to handle
* @arg: unused module event argument
*
* Callback used by the FreeBSD module stack to notify the driver of module
* events. Used to implement custom handling for certain module events such as
* load and unload.
*/
static int
irdma_module_event_handler(module_t __unused mod, int what, void __unused * arg)
{
switch (what) {
case MOD_LOAD:
printf("Loading irdma module\n");
return ice_rdma_register(&irdma_info);
case MOD_UNLOAD:
printf("Unloading irdma module\n");
irdma_prep_for_unregister();
ice_rdma_unregister();
return (0);
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t irdma_moduledata = {
"irdma",
irdma_module_event_handler,
NULL
};
DECLARE_MODULE(irdma, irdma_moduledata, SI_SUB_LAST, SI_ORDER_ANY);
MODULE_VERSION(irdma, 1);
MODULE_DEPEND(irdma, ice, 1, 1, 1);
MODULE_DEPEND(irdma, ibcore, 1, 1, 1);

418
sys/dev/irdma/icrdma_hw.c Normal file
View File

@ -0,0 +1,418 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_type.h"
#include "icrdma_hw.h"
void disable_prefetch(struct irdma_hw *hw);
void disable_tx_spad(struct irdma_hw *hw);
void rdpu_ackreqpmthresh(struct irdma_hw *hw);
static u32 icrdma_regs[IRDMA_MAX_REGS] = {
PFPE_CQPTAIL,
PFPE_CQPDB,
PFPE_CCQPSTATUS,
PFPE_CCQPHIGH,
PFPE_CCQPLOW,
PFPE_CQARM,
PFPE_CQACK,
PFPE_AEQALLOC,
PFPE_CQPERRCODES,
PFPE_WQEALLOC,
GLINT_DYN_CTL(0),
ICRDMA_DB_ADDR_OFFSET,
GLPCI_LBARCTRL,
GLPE_CPUSTATUS0,
GLPE_CPUSTATUS1,
GLPE_CPUSTATUS2,
PFINT_AEQCTL,
GLINT_CEQCTL(0),
VSIQF_PE_CTL1(0),
PFHMC_PDINV,
GLHMC_VFPDINV(0),
GLPE_CRITERR,
GLINT_RATE(0),
};
static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE_M,
ICRDMA_CCQPSTATUS_CCQP_ERR_M,
ICRDMA_CQPSQ_STAG_PDID_M,
ICRDMA_CQPSQ_CQ_CEQID_M,
ICRDMA_CQPSQ_CQ_CQID_M,
ICRDMA_COMMIT_FPM_CQCNT_M,
};
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CCQPSTATUS_CCQP_DONE_S,
ICRDMA_CCQPSTATUS_CCQP_ERR_S,
ICRDMA_CQPSQ_STAG_PDID_S,
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
};
/**
* icrdma_ena_irq - Enable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void
icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
{
u32 val;
u32 interval = 0;
if (dev->ceq_itr && dev->aeq->msix_idx != idx)
interval = dev->ceq_itr >> 1; /* 2 usec units */
val = LS_64(0, IRDMA_GLINT_DYN_CTL_ITR_INDX) |
LS_64(interval, IRDMA_GLINT_DYN_CTL_INTERVAL) |
IRDMA_GLINT_DYN_CTL_INTENA_M | IRDMA_GLINT_DYN_CTL_CLEARPBA_M;
writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
}
/**
* icrdma_disable_irq - Disable interrupt
* @dev: pointer to the device structure
* @idx: vector index
*/
static void
icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
{
writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
}
/**
* icrdma_cfg_ceq- Configure CEQ interrupt
* @dev: pointer to the device structure
* @ceq_id: Completion Event Queue ID
* @idx: vector index
* @enable: True to enable, False disables
*/
static void
icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
bool enable)
{
u32 reg_val;
reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA_M : 0;
reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
IRDMA_GLINT_CEQCTL_ITR_INDX_M;
writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
}
static const struct irdma_irq_ops icrdma_irq_ops = {
.irdma_cfg_aeq = irdma_cfg_aeq,
.irdma_cfg_ceq = icrdma_cfg_ceq,
.irdma_dis_irq = icrdma_disable_irq,
.irdma_en_irq = icrdma_ena_irq,
};
static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
[IRDMA_HW_STAT_INDEX_RXVLANERR] = {0, 32, IRDMA_MAX_STATS_24},
[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = {8, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = {16, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = {24, 32, IRDMA_MAX_STATS_32},
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = {24, 0, IRDMA_MAX_STATS_32},
[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = {32, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = {40, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = {48, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = {56, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = {64, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = {72, 32, IRDMA_MAX_STATS_32},
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = {72, 0, IRDMA_MAX_STATS_32},
[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = {80, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = {88, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = {96, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = {104, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = {112, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = {120, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = {128, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = {136, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = {144, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = {152, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = {160, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = {168, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = {176, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = {184, 32, IRDMA_MAX_STATS_24},
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = {184, 0, IRDMA_MAX_STATS_24},
[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = {192, 32, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = {200, 32, IRDMA_MAX_STATS_24},
[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = {200, 0, IRDMA_MAX_STATS_24},
[IRDMA_HW_STAT_INDEX_TCPTXSEG] = {208, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = {216, 32, IRDMA_MAX_STATS_32},
[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = {224, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = {232, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMARXWRS] = {240, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMARXRDS] = {248, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = {256, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMATXWRS] = {264, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMATXRDS] = {272, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = {280, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMAVBND] = {288, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RDMAVINV] = {296, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = {304, 0, IRDMA_MAX_STATS_48},
[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = {312, 32, IRDMA_MAX_STATS_16},
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = {312, 0, IRDMA_MAX_STATS_32},
[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = {320, 0, IRDMA_MAX_STATS_32},
};
void
icrdma_init_hw(struct irdma_sc_dev *dev)
{
int i;
u8 IOMEM *hw_addr;
for (i = 0; i < IRDMA_MAX_REGS; ++i) {
hw_addr = dev->hw->hw_addr;
if (i == IRDMA_DB_ADDR_OFFSET)
hw_addr = NULL;
dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
}
dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
dev->hw_shifts[i] = icrdma_shifts[i];
for (i = 0; i < IRDMA_MAX_MASKS; ++i)
dev->hw_masks[i] = icrdma_masks[i];
dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
dev->irq_ops = &icrdma_irq_ops;
dev->hw_stats_map = icrdma_hw_stat_map;
dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
dev->hw_attrs.uk_attrs.max_hw_wq_size = IRDMA_QP_WQE_MAX_SIZE;
dev->hw_attrs.uk_attrs.min_sw_wq_size = IRDMA_QP_SW_MIN_WQSIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
disable_tx_spad(dev->hw);
disable_prefetch(dev->hw);
rdpu_ackreqpmthresh(dev->hw);
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RELAX_RQ_ORDER;
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
IRDMA_FEATURE_CQ_RESIZE;
}
void
irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_handle)
{
cc->config_ok = false;
cc->traffic_class = traffic_class;
cc->qs_handle = qs_handle;
cc->lfc_set = 0;
cc->pfc_set = 0;
}
static bool
irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
{
u32 lfc = 1;
u8 fn_id = vsi->dev->hmc_fn_id;
lfc &= (rd32(vsi->dev->hw,
PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
lfc &= (rd32(vsi->dev->hw,
PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
lfc &= rd32(vsi->dev->hw,
PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id);
if (lfc)
return true;
return false;
}
static bool
irdma_check_tc_has_pfc(struct irdma_sc_vsi *vsi, u64 reg_offset, u16 traffic_class)
{
u32 value, pfc = 0;
u32 i;
value = rd32(vsi->dev->hw, reg_offset);
for (i = 0; i < 4; i++)
pfc |= (value >> (8 * i + traffic_class)) & 0x1;
if (pfc)
return true;
return false;
}
static bool
irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
{
u32 pause;
u8 fn_id = vsi->dev->hmc_fn_id;
pause = (rd32(vsi->dev->hw,
PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >>
cc->traffic_class) & BIT(0);
pause &= (rd32(vsi->dev->hw,
PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >>
cc->traffic_class) & BIT(0);
return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) &&
pause;
}
bool
irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
{
cc->lfc_set = irdma_is_lfc_set(cc, vsi);
cc->pfc_set = irdma_is_pfc_set(cc, vsi);
cc->config_ok = cc->lfc_set || cc->pfc_set;
return cc->config_ok;
}
#define IRDMA_RCV_WND_NO_FC 65536
#define IRDMA_RCV_WND_FC 65536
#define IRDMA_CWND_NO_FC 0x1
#define IRDMA_CWND_FC 0x18
#define IRDMA_ACKCREDS_NO_FC 0x02
#define IRDMA_ACKCREDS_FC 0x06
static void
irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class)
{
struct irdma_config_check *cfg_chk = &vsi->cfg_check[user_prio];
if (!irdma_is_config_ok(cfg_chk, vsi)) {
if (vsi->tc_print_warning[traffic_class]) {
irdma_pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", traffic_class);
vsi->tc_print_warning[traffic_class] = false;
}
} else {
if (vsi->tc_print_warning[traffic_class]) {
irdma_pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", traffic_class);
vsi->tc_print_warning[traffic_class] = false;
}
}
}
void
irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params)
{
u8 i;
for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
vsi->tc_print_warning[i] = true;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
u8 tc = l2params->up2tc[i];
cfg_chk->traffic_class = tc;
cfg_chk->qs_handle = vsi->qos[i].qs_handle;
irdma_check_flow_ctrl(vsi, i, tc);
}
}
void
irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp)
{
u8 i;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
irdma_init_config_check(cfg_chk,
vsi->qos[i].traffic_class,
vsi->qos[i].qs_handle);
if (sc_qp->qs_handle == cfg_chk->qs_handle)
irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class);
}
}
#define GLPE_WQMTXIDXADDR 0x50E000
#define GLPE_WQMTXIDXDATA 0x50E004
void
disable_prefetch(struct irdma_hw *hw)
{
u32 wqm_data;
wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
irdma_mb();
wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
wqm_data &= ~(1);
wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
}
void
disable_tx_spad(struct irdma_hw *hw)
{
u32 wqm_data;
wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
irdma_mb();
wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
wqm_data &= ~(1 << 3);
wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
}
#define GL_RDPU_CNTRL 0x52054
void
rdpu_ackreqpmthresh(struct irdma_hw *hw)
{
u32 val;
val = rd32(hw, GL_RDPU_CNTRL);
val &= ~(0x3f << 10);
val |= (3 << 10);
wr32(hw, GL_RDPU_CNTRL, val);
}

137
sys/dev/irdma/icrdma_hw.h Normal file
View File

@ -0,0 +1,137 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2019 - 2020 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef ICRDMA_HW_H
#define ICRDMA_HW_H
#include "irdma.h"
#define VFPE_CQPTAIL1 0x0000a000
#define VFPE_CQPDB1 0x0000bc00
#define VFPE_CCQPSTATUS1 0x0000b800
#define VFPE_CCQPHIGH1 0x00009800
#define VFPE_CCQPLOW1 0x0000ac00
#define VFPE_CQARM1 0x0000b400
#define VFPE_CQARM1 0x0000b400
#define VFPE_CQACK1 0x0000b000
#define VFPE_AEQALLOC1 0x0000a400
#define VFPE_CQPERRCODES1 0x00009c00
#define VFPE_WQEALLOC1 0x0000c000
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */
#define PFPE_CQPTAIL 0x00500880
#define PFPE_CQPDB 0x00500800
#define PFPE_CCQPSTATUS 0x0050a000
#define PFPE_CCQPHIGH 0x0050a100
#define PFPE_CCQPLOW 0x0050a080
#define PFPE_CQARM 0x00502c00
#define PFPE_CQACK 0x00502c80
#define PFPE_AEQALLOC 0x00502d00
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */
#define GLPCI_LBARCTRL 0x0009de74
#define GLPE_CPUSTATUS0 0x0050ba5c
#define GLPE_CPUSTATUS1 0x0050ba60
#define GLPE_CPUSTATUS2 0x0050ba64
#define PFINT_AEQCTL 0x0016cb00
#define PFPE_CQPERRCODES 0x0050a200
#define PFPE_WQEALLOC 0x00504400
#define GLINT_CEQCTL(_INT) (0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */
#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */
#define PFHMC_PDINV 0x00520300
#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */
#define GLPE_CRITERR 0x00534000
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 0x001e3180
#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_1 0x001e3184
#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_2 0x001e3188
#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_3 0x001e318c
#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 0x001e31a0
#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_1 0x001e31a4
#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_2 0x001e31a8
#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_3 0x001e31aC
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 0x001e34c0
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_1 0x001e34c4
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_2 0x001e34c8
#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_3 0x001e34cC
#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_0 0x001e35c0
#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_1 0x001e35c4
#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_2 0x001e35c8
#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_3 0x001e35cC
#define GLDCB_TC2PFC 0x001d2694
#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001e31c0
#define ICRDMA_DB_ADDR_OFFSET (8 * 1024 * 1024 - 64 * 1024)
#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
/* CCQSTATUS */
#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
#define ICRDMA_CCQPSTATUS_CCQP_DONE_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_DONE_S)
#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
#define ICRDMA_CCQPSTATUS_CCQP_ERR_M (0x1ULL << ICRDMA_CCQPSTATUS_CCQP_ERR_S)
#define ICRDMA_CQPSQ_STAG_PDID_S 46
#define ICRDMA_CQPSQ_STAG_PDID_M (0x3ffffULL << ICRDMA_CQPSQ_STAG_PDID_S)
#define ICRDMA_CQPSQ_CQ_CEQID_S 22
#define ICRDMA_CQPSQ_CQ_CEQID_M (0x3ffULL << ICRDMA_CQPSQ_CQ_CEQID_S)
#define ICRDMA_CQPSQ_CQ_CQID_S 0
#define ICRDMA_CQPSQ_CQ_CQID_M (0x7ffffULL << ICRDMA_CQPSQ_CQ_CQID_S)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT_M (0xfffffULL << ICRDMA_COMMIT_FPM_CQCNT_S)
enum icrdma_device_caps_const {
ICRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
ICRDMA_MAX_SGE_RD = 13,
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
};
void icrdma_init_hw(struct irdma_sc_dev *dev);
void irdma_init_config_check(struct irdma_config_check *cc,
u8 traffic_class,
u16 qs_handle);
bool irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi);
void irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params);
void irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp);
#endif /* ICRDMA_HW_H*/

143
sys/dev/irdma/irdma-abi.h Normal file
View File

@ -0,0 +1,143 @@
/*-
* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB)
*
*
* Copyright (c) 2006 - 2021 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_ABI_H
#define IRDMA_ABI_H
#include <linux/types.h>
/* irdma must support legacy GEN_1 i40iw kernel
* and user-space whose last ABI ver is 5
*/
#define IRDMA_ABI_VER 5
enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_MEM = 0,
IRDMA_MEMREG_TYPE_QP = 1,
IRDMA_MEMREG_TYPE_CQ = 2,
};
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
};
struct irdma_alloc_ucontext_resp {
__u32 max_pds;
__u32 max_qps;
__u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */
__u8 kernel_ver;
__u8 rsvd[3];
__aligned_u64 feature_flags;
__aligned_u64 db_mmap_key;
__u32 max_hw_wq_frags;
__u32 max_hw_read_sges;
__u32 max_hw_inline;
__u32 max_hw_rq_quanta;
__u32 max_hw_wq_quanta;
__u32 min_hw_cq_size;
__u32 max_hw_cq_size;
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
};
struct irdma_alloc_pd_resp {
__u32 pd_id;
__u8 rsvd[4];
};
struct irdma_resize_cq_req {
__aligned_u64 user_cq_buffer;
};
struct irdma_create_cq_req {
__aligned_u64 user_cq_buf;
__aligned_u64 user_shadow_area;
};
struct irdma_create_qp_req {
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
};
struct irdma_mem_reg_req {
__u16 reg_type; /* enum irdma_memreg_type */
__u16 cq_pages;
__u16 rq_pages;
__u16 sq_pages;
};
struct irdma_modify_qp_req {
__u8 sq_flush;
__u8 rq_flush;
__u8 rsvd[6];
};
struct irdma_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
};
struct irdma_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 irdma_drv_opt;
__u16 push_idx;
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
};
struct irdma_modify_qp_resp {
__aligned_u64 push_wqe_mmap_key;
__aligned_u64 push_db_mmap_key;
__u16 push_offset;
__u8 push_valid;
__u8 rd_fence_rate;
__u8 rsvd[4];
};
struct irdma_create_ah_resp {
__u32 ah_id;
__u8 rsvd[4];
};
#endif /* IRDMA_ABI_H */

238
sys/dev/irdma/irdma.h Normal file
View File

@ -0,0 +1,238 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_H
#define IRDMA_H
#define RDMA_BIT2(type, a) ((u##type) 1UL << a)
#define RDMA_MASK3(type, mask, shift) ((u##type) mask << shift)
#define MAKEMASK(m, s) ((m) << (s))
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_S 20
#define IRDMA_WQEALLOC_WQE_DESC_INDEX_M (0xfff << IRDMA_WQEALLOC_WQE_DESC_INDEX_S)
#define IRDMA_CQPTAIL_WQTAIL_S 0
#define IRDMA_CQPTAIL_WQTAIL_M (0x7ff << IRDMA_CQPTAIL_WQTAIL_S)
#define IRDMA_CQPTAIL_CQP_OP_ERR_S 31
#define IRDMA_CQPTAIL_CQP_OP_ERR_M (0x1 << IRDMA_CQPTAIL_CQP_OP_ERR_S)
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_S 0
#define IRDMA_CQPERRCODES_CQP_MINOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MINOR_CODE_S)
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S 16
#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE_M (0xffff << IRDMA_CQPERRCODES_CQP_MAJOR_CODE_S)
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S 4
#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_M (0x3 << IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE_S)
#define IRDMA_GLINT_RATE_INTERVAL_S 0
#define IRDMA_GLINT_RATE_INTERVAL_M (0x3c << IRDMA_GLINT_RATE_INTERVAL_S)
#define IRDMA_GLINT_RATE_INTRL_ENA_S 6
#define IRDMA_GLINT_RATE_INTRL_ENA_M BIT(6)
#define IRDMA_GLINT_DYN_CTL_INTENA_S 0
#define IRDMA_GLINT_DYN_CTL_INTENA_M (0x1 << IRDMA_GLINT_DYN_CTL_INTENA_S)
#define IRDMA_GLINT_DYN_CTL_CLEARPBA_S 1
#define IRDMA_GLINT_DYN_CTL_CLEARPBA_M (0x1 << IRDMA_GLINT_DYN_CTL_CLEARPBA_S)
#define IRDMA_GLINT_DYN_CTL_ITR_INDX_S 3
#define IRDMA_GLINT_DYN_CTL_ITR_INDX_M (0x3 << IRDMA_GLINT_DYN_CTL_ITR_INDX_S)
#define IRDMA_GLINT_DYN_CTL_INTERVAL_S 5
#define IRDMA_GLINT_DYN_CTL_INTERVAL_M (0xfff << IRDMA_GLINT_DYN_CTL_INTERVAL_S)
#define IRDMA_GLINT_CEQCTL_ITR_INDX_S 11
#define IRDMA_GLINT_CEQCTL_ITR_INDX_M (0x3 << IRDMA_GLINT_CEQCTL_ITR_INDX_S)
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_S 30
#define IRDMA_GLINT_CEQCTL_CAUSE_ENA_M (0x1 << IRDMA_GLINT_CEQCTL_CAUSE_ENA_S)
#define IRDMA_GLINT_CEQCTL_MSIX_INDX_S 0
#define IRDMA_GLINT_CEQCTL_MSIX_INDX_M (0x7ff << IRDMA_GLINT_CEQCTL_MSIX_INDX_S)
#define IRDMA_PFINT_AEQCTL_MSIX_INDX_S 0
#define IRDMA_PFINT_AEQCTL_MSIX_INDX_M (0x7ff << IRDMA_PFINT_AEQCTL_MSIX_INDX_S)
#define IRDMA_PFINT_AEQCTL_ITR_INDX_S 11
#define IRDMA_PFINT_AEQCTL_ITR_INDX_M (0x3 << IRDMA_PFINT_AEQCTL_ITR_INDX_S)
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_S 30
#define IRDMA_PFINT_AEQCTL_CAUSE_ENA_M (0x1 << IRDMA_PFINT_AEQCTL_CAUSE_ENA_S)
#define IRDMA_PFHMC_PDINV_PMSDIDX_S 0
#define IRDMA_PFHMC_PDINV_PMSDIDX_M (0xfff << IRDMA_PFHMC_PDINV_PMSDIDX_S)
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_S 15
#define IRDMA_PFHMC_PDINV_PMSDPARTSEL_M (0x1 << IRDMA_PFHMC_PDINV_PMSDPARTSEL_S)
#define IRDMA_PFHMC_PDINV_PMPDIDX_S 16
#define IRDMA_PFHMC_PDINV_PMPDIDX_M (0x1ff << IRDMA_PFHMC_PDINV_PMPDIDX_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_S 0
#define IRDMA_PFHMC_SDDATALOW_PMSDVALID_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S 1
#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE_M (0x1 << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_M (0x3ff << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S)
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S 12
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_M (0xfffff << IRDMA_PFHMC_SDDATALOW_PMSDDATALOW_S)
#define IRDMA_PFHMC_SDCMD_PMSDWR_S 31
#define IRDMA_PFHMC_SDCMD_PMSDWR_M (0x1 << IRDMA_PFHMC_SDCMD_PMSDWR_S)
#define IRDMA_INVALID_CQ_IDX 0xffffffff
enum irdma_registers {
IRDMA_CQPTAIL,
IRDMA_CQPDB,
IRDMA_CCQPSTATUS,
IRDMA_CCQPHIGH,
IRDMA_CCQPLOW,
IRDMA_CQARM,
IRDMA_CQACK,
IRDMA_AEQALLOC,
IRDMA_CQPERRCODES,
IRDMA_WQEALLOC,
IRDMA_GLINT_DYN_CTL,
IRDMA_DB_ADDR_OFFSET,
IRDMA_GLPCI_LBARCTRL,
IRDMA_GLPE_CPUSTATUS0,
IRDMA_GLPE_CPUSTATUS1,
IRDMA_GLPE_CPUSTATUS2,
IRDMA_PFINT_AEQCTL,
IRDMA_GLINT_CEQCTL,
IRDMA_VSIQF_PE_CTL1,
IRDMA_PFHMC_PDINV,
IRDMA_GLHMC_VFPDINV,
IRDMA_GLPE_CRITERR,
IRDMA_GLINT_RATE,
IRDMA_MAX_REGS, /* Must be last entry */
};
enum irdma_shifts {
IRDMA_CCQPSTATUS_CCQP_DONE_S,
IRDMA_CCQPSTATUS_CCQP_ERR_S,
IRDMA_CQPSQ_STAG_PDID_S,
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
IRDMA_MAX_SHIFTS,
};
enum irdma_masks {
IRDMA_CCQPSTATUS_CCQP_DONE_M,
IRDMA_CCQPSTATUS_CCQP_ERR_M,
IRDMA_CQPSQ_STAG_PDID_M,
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
#define IRDMA_MAX_MGS_PER_CTX 8
struct irdma_mcast_grp_ctx_entry_info {
u32 qp_id;
bool valid_entry;
u16 dest_port;
u32 use_cnt;
};
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
u8 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
u32 no_of_mgs;
u32 dest_ip_addr[4];
u16 qs_handle;
struct irdma_dma_mem dma_mem_mc;
struct irdma_mcast_grp_ctx_entry_info mg_ctx_info[IRDMA_MAX_MGS_PER_CTX];
};
enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
};
struct irdma_uk_attrs {
u64 feature_flags;
u32 max_hw_wq_frags;
u32 max_hw_read_sges;
u32 max_hw_inline;
u32 max_hw_rq_quanta;
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
u16 max_hw_wq_size;
u16 min_sw_wq_size;
u8 hw_rev;
};
struct irdma_hw_attrs {
struct irdma_uk_attrs uk_attrs;
u64 max_hw_outbound_msg_size;
u64 max_hw_inbound_msg_size;
u64 max_mr_size;
u32 min_hw_qp_id;
u32 min_hw_aeq_size;
u32 max_hw_aeq_size;
u32 min_hw_ceq_size;
u32 max_hw_ceq_size;
u32 max_hw_device_pages;
u32 max_hw_vf_fpm_id;
u32 first_hw_vf_fpm_id;
u32 max_hw_ird;
u32 max_hw_ord;
u32 max_hw_wqes;
u32 max_hw_pds;
u32 max_hw_ena_vf_count;
u32 max_qp_wr;
u32 max_pe_ready_count;
u32 max_done_count;
u32 max_sleep_count;
u32 max_cqp_compl_wait_time_ms;
u16 max_stat_inst;
u16 max_stat_idx;
};
void icrdma_init_hw(struct irdma_sc_dev *dev);
void irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp);
#endif /* IRDMA_H*/

4253
sys/dev/irdma/irdma_cm.c Normal file

File diff suppressed because it is too large Load Diff

453
sys/dev/irdma/irdma_cm.h Normal file
View File

@ -0,0 +1,453 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_CM_H
#define IRDMA_CM_H
#define IRDMA_MPA_REQUEST_ACCEPT 1
#define IRDMA_MPA_REQUEST_REJECT 2
/* IETF MPA -- defines */
#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VER 1
#define IETF_MAX_PRIV_DATA_LEN 512
#define IETF_MPA_FRAME_SIZE 20
#define IETF_RTR_MSG_SIZE 4
#define IETF_MPA_V2_FLAG 0x10
#define SNDMARKER_SEQNMASK 0x000001ff
#define IRDMA_MAX_IETF_SIZE 32
/* IETF RTR MSG Fields */
#define IETF_PEER_TO_PEER 0x8000
#define IETF_FLPDU_ZERO_LEN 0x4000
#define IETF_RDMA0_WRITE 0x8000
#define IETF_RDMA0_READ 0x4000
#define IETF_NO_IRD_ORD 0x3fff
#define MAX_PORTS 65536
#define IRDMA_PASSIVE_STATE_INDICATED 0
#define IRDMA_DO_NOT_SEND_RESET_EVENT 1
#define IRDMA_SEND_RESET_EVENT 2
#define MAX_IRDMA_IFS 4
#define SET_ACK 1
#define SET_SYN 2
#define SET_FIN 4
#define SET_RST 8
#define TCP_OPTIONS_PADDING 3
#define IRDMA_DEFAULT_RETRYS 64
#define IRDMA_DEFAULT_RETRANS 8
#define IRDMA_DEFAULT_TTL 0x40
#define IRDMA_DEFAULT_RTT_VAR 6
#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
#define IRDMA_DEFAULT_REXMIT_THRESH 8
#define IRDMA_RETRY_TIMEOUT HZ
#define IRDMA_SHORT_TIME 10
#define IRDMA_LONG_TIME (2 * HZ)
#define IRDMA_MAX_TIMEOUT ((unsigned long)(12 * HZ))
#define IRDMA_CM_HASHTABLE_SIZE 1024
#define IRDMA_CM_TCP_TIMER_INTERVAL 3000
#define IRDMA_CM_DEFAULT_MTU 1540
#define IRDMA_CM_DEFAULT_FRAME_CNT 10
#define IRDMA_CM_THREAD_STACK_SIZE 256
#define IRDMA_CM_DEFAULT_RCV_WND 64240
#define IRDMA_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC
#define IRDMA_CM_DEFAULT_RCV_WND_SCALE 2
#define IRDMA_CM_DEFAULT_FREE_PKTS 10
#define IRDMA_CM_FREE_PKT_LO_WATERMARK 2
#define IRDMA_CM_DEFAULT_MSS 536
#define IRDMA_CM_DEFAULT_MPA_VER 2
#define IRDMA_CM_DEFAULT_SEQ 0x159bf75f
#define IRDMA_CM_DEFAULT_LOCAL_ID 0x3b47
#define IRDMA_CM_DEFAULT_SEQ2 0x18ed5740
#define IRDMA_CM_DEFAULT_LOCAL_ID2 0xb807
#define IRDMA_MAX_CM_BUF (IRDMA_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
enum ietf_mpa_flags {
IETF_MPA_FLAGS_REJECT = 0x20,
IETF_MPA_FLAGS_CRC = 0x40,
IETF_MPA_FLAGS_MARKERS = 0x80,
};
enum irdma_timer_type {
IRDMA_TIMER_TYPE_SEND,
IRDMA_TIMER_TYPE_CLOSE,
};
enum option_nums {
OPTION_NUM_EOL,
OPTION_NUM_NONE,
OPTION_NUM_MSS,
OPTION_NUM_WINDOW_SCALE,
OPTION_NUM_SACK_PERM,
OPTION_NUM_SACK,
OPTION_NUM_WRITE0 = 0xbc,
};
/* cm node transition states */
enum irdma_cm_node_state {
IRDMA_CM_STATE_UNKNOWN,
IRDMA_CM_STATE_INITED,
IRDMA_CM_STATE_LISTENING,
IRDMA_CM_STATE_SYN_RCVD,
IRDMA_CM_STATE_SYN_SENT,
IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED,
IRDMA_CM_STATE_ESTABLISHED,
IRDMA_CM_STATE_ACCEPTING,
IRDMA_CM_STATE_MPAREQ_SENT,
IRDMA_CM_STATE_MPAREQ_RCVD,
IRDMA_CM_STATE_MPAREJ_RCVD,
IRDMA_CM_STATE_OFFLOADED,
IRDMA_CM_STATE_FIN_WAIT1,
IRDMA_CM_STATE_FIN_WAIT2,
IRDMA_CM_STATE_CLOSE_WAIT,
IRDMA_CM_STATE_TIME_WAIT,
IRDMA_CM_STATE_LAST_ACK,
IRDMA_CM_STATE_CLOSING,
IRDMA_CM_STATE_LISTENER_DESTROYED,
IRDMA_CM_STATE_CLOSED,
};
enum mpa_frame_ver {
IETF_MPA_V1 = 1,
IETF_MPA_V2 = 2,
};
enum mpa_frame_key {
MPA_KEY_REQUEST,
MPA_KEY_REPLY,
};
enum send_rdma0 {
SEND_RDMA_READ_ZERO = 1,
SEND_RDMA_WRITE_ZERO = 2,
};
enum irdma_tcpip_pkt_type {
IRDMA_PKT_TYPE_UNKNOWN,
IRDMA_PKT_TYPE_SYN,
IRDMA_PKT_TYPE_SYNACK,
IRDMA_PKT_TYPE_ACK,
IRDMA_PKT_TYPE_FIN,
IRDMA_PKT_TYPE_RST,
};
enum irdma_cm_listener_state {
IRDMA_CM_LISTENER_PASSIVE_STATE = 1,
IRDMA_CM_LISTENER_ACTIVE_STATE = 2,
IRDMA_CM_LISTENER_EITHER_STATE = 3,
};
/* CM event codes */
enum irdma_cm_event_type {
IRDMA_CM_EVENT_UNKNOWN,
IRDMA_CM_EVENT_ESTABLISHED,
IRDMA_CM_EVENT_MPA_REQ,
IRDMA_CM_EVENT_MPA_CONNECT,
IRDMA_CM_EVENT_MPA_ACCEPT,
IRDMA_CM_EVENT_MPA_REJECT,
IRDMA_CM_EVENT_MPA_ESTABLISHED,
IRDMA_CM_EVENT_CONNECTED,
IRDMA_CM_EVENT_RESET,
IRDMA_CM_EVENT_ABORTED,
};
struct irdma_bth { /* Base Trasnport Header */
u8 opcode;
u8 flags;
__be16 pkey;
__be32 qpn;
__be32 apsn;
};
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
u8 priv_data[];
};
struct ietf_rtr_msg {
__be16 ctrl_ird;
__be16 ctrl_ord;
};
struct ietf_mpa_v2 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
struct ietf_rtr_msg rtr_msg;
u8 priv_data[];
};
struct option_base {
u8 optionnum;
u8 len;
};
struct option_mss {
u8 optionnum;
u8 len;
__be16 mss;
};
struct option_windowscale {
u8 optionnum;
u8 len;
u8 shiftcount;
};
union all_known_options {
char eol;
struct option_base base;
struct option_mss mss;
struct option_windowscale windowscale;
};
struct irdma_timer_entry {
struct list_head list;
unsigned long timetosend; /* jiffies */
struct irdma_puda_buf *sqbuf;
u32 type;
u32 retrycount;
u32 retranscount;
u32 context;
u32 send_retrans;
int close_when_complete;
};
/* CM context params */
struct irdma_cm_tcp_context {
u8 client;
u32 loc_seq_num;
u32 loc_ack_num;
u32 rem_ack_num;
u32 rcv_nxt;
u32 loc_id;
u32 rem_id;
u32 snd_wnd;
u32 max_snd_wnd;
u32 rcv_wnd;
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
};
struct irdma_apbvt_entry {
struct hlist_node hlist;
u32 use_cnt;
u16 port;
};
struct irdma_cm_listener {
struct list_head list;
struct iw_cm_id *cm_id;
struct irdma_cm_core *cm_core;
struct irdma_device *iwdev;
struct list_head child_listen_list;
struct irdma_apbvt_entry *apbvt_entry;
enum irdma_cm_listener_state listener_state;
atomic_t refcnt;
atomic_t pend_accepts_cnt;
u32 loc_addr[4];
u32 reused_node;
int backlog;
u16 loc_port;
u16 vlan_id;
u8 loc_mac[ETH_ALEN];
u8 user_pri;
u8 tos;
bool qhash_set:1;
bool ipv4:1;
};
struct irdma_kmem_info {
void *addr;
u32 size;
};
struct irdma_mpa_priv_info {
const void *addr;
u32 size;
};
struct irdma_cm_node {
struct irdma_qp *iwqp;
struct irdma_device *iwdev;
struct irdma_sc_dev *dev;
struct irdma_cm_tcp_context tcp_cntxt;
struct irdma_cm_core *cm_core;
struct irdma_timer_entry *send_entry;
struct irdma_timer_entry *close_entry;
struct irdma_cm_listener *listener;
struct list_head timer_entry;
struct list_head reset_entry;
struct list_head teardown_entry;
struct irdma_apbvt_entry *apbvt_entry;
struct rcu_head rcu_head;
struct irdma_mpa_priv_info pdata;
struct irdma_sc_ah *ah;
struct irdma_kmem_info mpa_hdr;
struct iw_cm_id *cm_id;
struct hlist_node list;
struct completion establish_comp;
spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/
atomic_t passive_state;
atomic_t refcnt;
enum irdma_cm_node_state state;
enum send_rdma0 send_rdma0_op;
enum mpa_frame_ver mpa_frame_rev;
u32 loc_addr[4], rem_addr[4];
u16 loc_port, rem_port;
int apbvt_set;
int accept_pend;
u16 vlan_id;
u16 ird_size;
u16 ord_size;
u16 mpav2_ird_ord;
u16 lsmm_size;
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
u8 user_pri;
u8 tos;
bool ack_rcvd:1;
bool qhash_set:1;
bool ipv4:1;
bool snd_mark_en:1;
bool rcv_mark_en:1;
bool do_lpb:1;
bool accelerated:1;
struct ietf_mpa_v2 mpa_v2_frame;
};
/* Used by internal CM APIs to pass CM information*/
struct irdma_cm_info {
struct iw_cm_id *cm_id;
u16 loc_port;
u16 rem_port;
u32 loc_addr[4];
u32 rem_addr[4];
u32 qh_qpid;
u16 vlan_id;
int backlog;
u8 user_pri;
u8 tos;
bool ipv4;
};
struct irdma_cm_event {
enum irdma_cm_event_type type;
struct irdma_cm_info cm_info;
struct work_struct event_work;
struct irdma_cm_node *cm_node;
};
struct irdma_cm_core {
struct irdma_device *iwdev;
struct irdma_sc_dev *dev;
struct list_head listen_list;
DECLARE_HASHTABLE(cm_hash_tbl, 8);
DECLARE_HASHTABLE(apbvt_hash_tbl, 8);
struct timer_list tcp_timer;
struct workqueue_struct *event_wq;
spinlock_t ht_lock; /* protect CM node (active side) list */
spinlock_t listen_list_lock; /* protect listener list */
spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
u64 stats_listen_created;
u64 stats_listen_destroyed;
u64 stats_listen_nodes_created;
u64 stats_listen_nodes_destroyed;
u64 stats_lpbs;
u64 stats_accepts;
u64 stats_rejects;
u64 stats_connect_errs;
u64 stats_passive_errs;
u64 stats_pkt_retrans;
u64 stats_backlog_drops;
struct irdma_puda_buf *(*form_cm_frame)(struct irdma_cm_node *cm_node,
struct irdma_kmem_info *options,
struct irdma_kmem_info *hdr,
struct irdma_mpa_priv_info *pdata,
u8 flags);
int (*cm_create_ah)(struct irdma_cm_node *cm_node, bool wait);
void (*cm_free_ah)(struct irdma_cm_node *cm_node);
};
int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
struct irdma_puda_buf *sqbuf,
enum irdma_timer_type type, int send_retrans,
int close_when_complete);
static inline u8 irdma_tos2dscp(u8 tos)
{
#define IRDMA_DSCP_S 2
#define IRDMA_DSCP_M (0x3f << IRDMA_DSCP_S)
return RS_32(tos, IRDMA_DSCP);
}
int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
int irdma_cm_start(struct irdma_device *dev);
int irdma_cm_stop(struct irdma_device *dev);
bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr,
u8 *mac_addr, u32 action);
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
void irdma_send_ack(struct irdma_cm_node *cm_node);
void irdma_lpb_nop(struct irdma_sc_qp *qp);
void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
#endif /* IRDMA_CM_H */

5644
sys/dev/irdma/irdma_ctrl.c Normal file

File diff suppressed because it is too large Load Diff

2337
sys/dev/irdma/irdma_defs.h Normal file

File diff suppressed because it is too large Load Diff

734
sys/dev/irdma/irdma_hmc.c Normal file
View File

@ -0,0 +1,734 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_hmc.h"
#include "irdma_defs.h"
#include "irdma_type.h"
#include "irdma_protos.h"
/**
* irdma_find_sd_index_limit - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
* @type: type of HMC resources we're searching
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @sd_idx: pointer to return index of the segment descriptor in question
* @sd_limit: pointer to return the maximum number of segment descriptors
*
* This function calculates the segment descriptor index and index limit
* for the resource defined by irdma_hmc_rsrc_type.
*/
static void
irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
u32 idx, u32 cnt, u32 *sd_idx,
u32 *sd_limit)
{
u64 fpm_addr, fpm_limit;
fpm_addr = hmc_info->hmc_obj[(type)].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
*sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
*sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
*sd_limit += 1;
}
/**
* irdma_find_pd_index_limit - finds page descriptor index limit
* @hmc_info: pointer to the HMC configuration information struct
* @type: HMC resource type we're examining
* @idx: starting index for the object
* @cnt: number of objects we're trying to create
* @pd_idx: pointer to return page descriptor index
* @pd_limit: pointer to return page descriptor index limit
*
* Calculates the page descriptor index and index limit for the resource
* defined by irdma_hmc_rsrc_type.
*/
static void
irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
u32 idx, u32 cnt, u32 *pd_idx,
u32 *pd_limit)
{
u64 fpm_adr, fpm_limit;
fpm_adr = hmc_info->hmc_obj[type].base +
hmc_info->hmc_obj[type].size * idx;
fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
*pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
*pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
*pd_limit += 1;
}
/**
* irdma_set_sd_entry - setup entry for sd programming
* @pa: physical addr
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static void
irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
entry->data = pa | (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
(((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S) |
(1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S);
entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
}
/**
* irdma_clr_sd_entry - setup entry for sd clear
* @idx: sd index
* @type: paged or direct sd
* @entry: sd entry ptr
*/
static void
irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
struct irdma_update_sd_entry *entry)
{
entry->data = (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
(((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S);
entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
}
/**
* irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
* @dev: pointer to our device struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
*/
static inline void
irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
u32 pd_idx)
{
u32 val = LS_32(sd_idx, IRDMA_PFHMC_PDINV_PMSDIDX) |
LS_32(1, IRDMA_PFHMC_PDINV_PMSDPARTSEL) |
LS_32(pd_idx, IRDMA_PFHMC_PDINV_PMPDIDX);
writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
}
/**
* irdma_hmc_sd_one - setup 1 sd entry for cqp
* @dev: pointer to the device structure
* @hmc_fn_id: hmc's function id
* @pa: physical addr
* @sd_idx: sd index
* @type: paged or direct sd
* @setsd: flag to set or clear sd
*/
int
irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type, bool setsd)
{
struct irdma_update_sds_info sdinfo;
sdinfo.cnt = 1;
sdinfo.hmc_fn_id = hmc_fn_id;
if (setsd)
irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
else
irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
return dev->cqp->process_cqp_sds(dev, &sdinfo);
}
/**
* irdma_hmc_sd_grp - setup group of sd entries for cqp
* @dev: pointer to the device structure
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: sd index
* @sd_cnt: number of sd entries
* @setsd: flag to set or clear sd
*/
static int
irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, u32 sd_index,
u32 sd_cnt, bool setsd)
{
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_update_sds_info sdinfo = {0};
u64 pa;
u32 i;
int ret_code = 0;
sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
for (i = sd_index; i < sd_index + sd_cnt; i++) {
sd_entry = &hmc_info->sd_table.sd_entry[i];
if (!sd_entry || (!sd_entry->valid && setsd) ||
(sd_entry->valid && !setsd))
continue;
if (setsd) {
pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
irdma_set_sd_entry(pa, i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
} else {
irdma_clr_sd_entry(i, sd_entry->entry_type,
&sdinfo.entry[sdinfo.cnt]);
}
sdinfo.cnt++;
if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
if (ret_code) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
"sd_programming failed err=%d\n",
ret_code);
return ret_code;
}
sdinfo.cnt = 0;
}
}
if (sdinfo.cnt)
ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
return ret_code;
}
/**
* irdma_hmc_finish_add_sd_reg - program sd entries for objects
* @dev: pointer to the device structure
* @info: create obj info
*/
static int
irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info)
{
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return -EINVAL;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return -EINVAL;
if (!info->add_sd_cnt)
return 0;
return irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0], info->add_sd_cnt,
true);
}
/**
* irdma_sc_create_hmc_obj - allocate backing store for hmc objects
* @dev: pointer to the device structure
* @info: pointer to irdma_hmc_create_obj_info struct
*
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
*/
int
irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info)
{
struct irdma_hmc_sd_entry *sd_entry;
u32 sd_idx, sd_lmt;
u32 pd_idx = 0, pd_lmt = 0;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 i, j;
bool pd_error = false;
int ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return -EINVAL;
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
"error type %u, start = %u, req cnt %u, cnt = %u\n",
info->rsrc_type, info->start_idx, info->count,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return -EINVAL;
}
irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &sd_idx,
&sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
return -EINVAL;
}
irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx,
&pd_lmt);
for (j = sd_idx; j < sd_lmt; j++) {
ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
info->entry_type,
IRDMA_HMC_DIRECT_BP_SIZE);
if (ret_code)
goto exit_sd_error;
sd_entry = &info->hmc_info->sd_table.sd_entry[j];
if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
(dev->hmc_info == info->hmc_info &&
info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
for (i = pd_idx1; i < pd_lmt1; i++) {
/* update the pd table entry */
ret_code = irdma_add_pd_table_entry(dev,
info->hmc_info,
i, NULL);
if (ret_code) {
pd_error = true;
break;
}
}
if (pd_error) {
while (i && (i > pd_idx1)) {
irdma_remove_pd_bp(dev, info->hmc_info,
i - 1);
i--;
}
}
}
if (sd_entry->valid)
continue;
info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
info->add_sd_cnt++;
sd_entry->valid = true;
}
return irdma_hmc_finish_add_sd_reg(dev, info);
exit_sd_error:
while (j && (j > sd_idx)) {
sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
switch (sd_entry->entry_type) {
case IRDMA_SD_TYPE_PAGED:
pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++)
irdma_prep_remove_pd_page(info->hmc_info, i);
break;
case IRDMA_SD_TYPE_DIRECT:
irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
break;
default:
ret_code = -EINVAL;
break;
}
j--;
}
return ret_code;
}
/**
* irdma_finish_del_sd_reg - delete sd entries for objects
* @dev: pointer to the device structure
* @info: dele obj info
* @reset: true if called before reset
*/
static int
irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info,
bool reset)
{
struct irdma_hmc_sd_entry *sd_entry;
int ret_code = 0;
struct irdma_dma_mem *mem;
u32 i, sd_idx;
if (!reset)
ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
if (ret_code)
irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd sd_grp\n");
for (i = 0; i < info->del_sd_cnt; i++) {
sd_idx = info->hmc_info->sd_indexes[i];
sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
&sd_entry->u.pd_table.pd_page_addr :
&sd_entry->u.bp.addr;
if (!mem || !mem->va)
irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd mem\n");
else
irdma_free_dma_mem(dev->hw, mem);
}
return ret_code;
}
/**
* irdma_sc_del_hmc_obj - remove pe hmc objects
* @dev: pointer to the device structure
* @info: pointer to irdma_hmc_del_obj_info struct
* @reset: true if called before reset
*
* This will de-populate the SDs and PDs. It frees
* the memory for PDS and backing storage. After this function is returned,
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
*/
int
irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info, bool reset)
{
struct irdma_hmc_pd_table *pd_table;
u32 sd_idx, sd_lmt;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 i, j;
int ret_code = 0;
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
"error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
info->start_idx, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return -EINVAL;
}
if ((info->start_idx + info->count) >
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
"error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
info->start_idx, info->count, info->rsrc_type,
info->hmc_info->hmc_obj[info->rsrc_type].cnt);
return -EINVAL;
}
irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &pd_idx,
&pd_lmt);
for (j = pd_idx; j < pd_lmt; j++) {
sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
continue;
if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
IRDMA_SD_TYPE_PAGED)
continue;
rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry &&
pd_table->pd_entry[rel_pd_idx].valid) {
ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
if (ret_code) {
irdma_debug(dev, IRDMA_DEBUG_HMC,
"remove_pd_bp error\n");
return ret_code;
}
}
}
irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
info->start_idx, info->count, &sd_idx,
&sd_lmt);
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
irdma_debug(dev, IRDMA_DEBUG_HMC, "invalid sd_idx\n");
return -EINVAL;
}
for (i = sd_idx; i < sd_lmt; i++) {
pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
if (!info->hmc_info->sd_table.sd_entry[i].valid)
continue;
switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
case IRDMA_SD_TYPE_DIRECT:
ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
if (!ret_code) {
info->hmc_info->sd_indexes[info->del_sd_cnt] =
(u16)i;
info->del_sd_cnt++;
}
break;
case IRDMA_SD_TYPE_PAGED:
ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
if (ret_code)
break;
if (dev->hmc_info != info->hmc_info &&
info->rsrc_type == IRDMA_HMC_IW_PBLE &&
pd_table->pd_entry) {
kfree(pd_table->pd_entry_virt_mem.va);
pd_table->pd_entry = NULL;
}
info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
info->del_sd_cnt++;
break;
default:
break;
}
}
return irdma_finish_del_sd_reg(dev, info, reset);
}
/**
* irdma_add_sd_table_entry - Adds a segment descriptor to the table
* @hw: pointer to our hw struct
* @hmc_info: pointer to the HMC configuration information struct
* @sd_index: segment descriptor index to manipulate
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
*/
int
irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info, u32 sd_index,
enum irdma_sd_entry_type type, u64 direct_mode_sz)
{
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_dma_mem dma_mem;
u64 alloc_len;
sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
if (!sd_entry->valid) {
if (type == IRDMA_SD_TYPE_PAGED)
alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
else
alloc_len = direct_mode_sz;
/* allocate a 4K pd page or 2M backing page */
dma_mem.size = alloc_len;
dma_mem.va = irdma_allocate_dma_mem(hw, &dma_mem, dma_mem.size,
IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
if (!dma_mem.va)
return -ENOMEM;
if (type == IRDMA_SD_TYPE_PAGED) {
struct irdma_virt_mem *vmem =
&sd_entry->u.pd_table.pd_entry_virt_mem;
vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
vmem->va = kzalloc(vmem->size, GFP_ATOMIC);
if (!vmem->va) {
irdma_free_dma_mem(hw, &dma_mem);
return -ENOMEM;
}
sd_entry->u.pd_table.pd_entry = vmem->va;
irdma_memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
sizeof(sd_entry->u.pd_table.pd_page_addr));
} else {
irdma_memcpy(&sd_entry->u.bp.addr, &dma_mem,
sizeof(sd_entry->u.bp.addr));
sd_entry->u.bp.sd_pd_index = sd_index;
}
hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
hmc_info->sd_table.use_cnt++;
}
if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
sd_entry->u.bp.use_cnt++;
return 0;
}
/**
* irdma_add_pd_table_entry - Adds page descriptor to the specified table
* @dev: pointer to our device structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
* @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
* 2. Adds pd_entry in the pd_table
* 3. Mark the entry valid in irdma_hmc_pd_entry structure
* 4. Initializes the pd_entry's ref count to 1
* assumptions:
* 1. The memory for pd should be pinned down, physically contiguous and
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
*/
int
irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, u32 pd_index,
struct irdma_dma_mem *rsrc_pg)
{
struct irdma_hmc_pd_table *pd_table;
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_dma_mem mem;
struct irdma_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
return -EINVAL;
sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
IRDMA_SD_TYPE_PAGED)
return 0;
rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
if (rsrc_pg) {
pd_entry->rsrc_pg = true;
page = rsrc_pg;
} else {
page->size = IRDMA_HMC_PAGED_BP_SIZE;
page->va = irdma_allocate_dma_mem(dev->hw, page,
page->size,
IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
if (!page->va)
return -ENOMEM;
pd_entry->rsrc_pg = false;
}
irdma_memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
page_desc = page->pa | 0x1;
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
irdma_memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
pd_table->use_cnt++;
irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
}
pd_entry->bp.use_cnt++;
return 0;
}
/**
* irdma_remove_pd_bp - remove a backing page from a page descriptor
* @dev: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*
* This function:
* 1. Marks the entry in pd table (for paged address mode) or in sd table
* (for direct address mode) invalid.
* 2. Write to register PMPDINV to invalidate the backing page in FV cache
* 3. Decrement the ref count for the pd _entry
* assumptions:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
*/
int
irdma_remove_pd_bp(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_hmc_pd_table *pd_table;
struct irdma_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
struct irdma_dma_mem *mem;
u64 *pd_addr;
sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
if (sd_idx >= hmc_info->sd_table.sd_cnt)
return -EINVAL;
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
return -EINVAL;
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (--pd_entry->bp.use_cnt)
return 0;
pd_entry->valid = false;
pd_table->use_cnt--;
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
irdma_memset(pd_addr, 0, sizeof(u64));
irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
if (!mem || !mem->va)
return -EINVAL;
irdma_free_dma_mem(dev->hw, mem);
}
if (!pd_table->use_cnt)
kfree(pd_table->pd_entry_virt_mem.va);
return 0;
}
/**
* irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
*/
int
irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (--sd_entry->u.bp.use_cnt)
return -EBUSY;
hmc_info->sd_table.use_cnt--;
sd_entry->valid = false;
return 0;
}
/**
* irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
*/
int
irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
{
struct irdma_hmc_sd_entry *sd_entry;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
if (sd_entry->u.pd_table.use_cnt)
return -EBUSY;
sd_entry->valid = false;
hmc_info->sd_table.use_cnt--;
return 0;
}

202
sys/dev/irdma/irdma_hmc.h Normal file
View File

@ -0,0 +1,202 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_HMC_H
#define IRDMA_HMC_H
#include "irdma_defs.h"
#define IRDMA_HMC_MAX_BP_COUNT 512
#define IRDMA_MAX_SD_ENTRIES 11
#define IRDMA_HW_DBG_HMC_INVALID_BP_MARK 0xca
#define IRDMA_HMC_INFO_SIGNATURE 0x484d5347
#define IRDMA_HMC_PD_CNT_IN_SD 512
#define IRDMA_HMC_DIRECT_BP_SIZE 0x200000
#define IRDMA_HMC_MAX_SD_COUNT 8192
#define IRDMA_HMC_PAGED_BP_SIZE 4096
#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
#define IRDMA_FIRST_VF_FPM_ID 8
#define FPM_MULTIPLIER 1024
enum irdma_hmc_rsrc_type {
IRDMA_HMC_IW_QP = 0,
IRDMA_HMC_IW_CQ = 1,
IRDMA_HMC_IW_RESERVED = 2,
IRDMA_HMC_IW_HTE = 3,
IRDMA_HMC_IW_ARP = 4,
IRDMA_HMC_IW_APBVT_ENTRY = 5,
IRDMA_HMC_IW_MR = 6,
IRDMA_HMC_IW_XF = 7,
IRDMA_HMC_IW_XFFL = 8,
IRDMA_HMC_IW_Q1 = 9,
IRDMA_HMC_IW_Q1FL = 10,
IRDMA_HMC_IW_TIMER = 11,
IRDMA_HMC_IW_FSIMC = 12,
IRDMA_HMC_IW_FSIAV = 13,
IRDMA_HMC_IW_PBLE = 14,
IRDMA_HMC_IW_RRF = 15,
IRDMA_HMC_IW_RRFFL = 16,
IRDMA_HMC_IW_HDR = 17,
IRDMA_HMC_IW_MD = 18,
IRDMA_HMC_IW_OOISC = 19,
IRDMA_HMC_IW_OOISCFFL = 20,
IRDMA_HMC_IW_MAX, /* Must be last entry */
};
enum irdma_sd_entry_type {
IRDMA_SD_TYPE_INVALID = 0,
IRDMA_SD_TYPE_PAGED = 1,
IRDMA_SD_TYPE_DIRECT = 2,
};
struct irdma_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
};
struct irdma_hmc_bp {
enum irdma_sd_entry_type entry_type;
struct irdma_dma_mem addr;
u32 sd_pd_index;
u32 use_cnt;
};
struct irdma_hmc_pd_entry {
struct irdma_hmc_bp bp;
u32 sd_index;
bool rsrc_pg:1;
bool valid:1;
};
struct irdma_hmc_pd_table {
struct irdma_dma_mem pd_page_addr;
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_virt_mem pd_entry_virt_mem;
u32 use_cnt;
u32 sd_index;
};
struct irdma_hmc_sd_entry {
enum irdma_sd_entry_type entry_type;
bool valid;
union {
struct irdma_hmc_pd_table pd_table;
struct irdma_hmc_bp bp;
} u;
};
struct irdma_hmc_sd_table {
struct irdma_virt_mem addr;
u32 sd_cnt;
u32 use_cnt;
struct irdma_hmc_sd_entry *sd_entry;
};
struct irdma_hmc_info {
u32 signature;
u8 hmc_fn_id;
u16 first_sd_index;
struct irdma_hmc_obj_info *hmc_obj;
struct irdma_virt_mem hmc_obj_virt_mem;
struct irdma_hmc_sd_table sd_table;
u16 sd_indexes[IRDMA_HMC_MAX_SD_COUNT];
};
struct irdma_update_sd_entry {
u64 cmd;
u64 data;
};
struct irdma_update_sds_info {
u32 cnt;
u8 hmc_fn_id;
struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
};
struct irdma_ccq_cqe_info;
struct irdma_hmc_fcn_info {
u32 vf_id;
u8 free_fcn;
};
struct irdma_hmc_create_obj_info {
struct irdma_hmc_info *hmc_info;
struct irdma_virt_mem add_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 add_sd_cnt;
enum irdma_sd_entry_type entry_type;
bool privileged;
};
struct irdma_hmc_del_obj_info {
struct irdma_hmc_info *hmc_info;
struct irdma_virt_mem del_sd_virt_mem;
u32 rsrc_type;
u32 start_idx;
u32 count;
u32 del_sd_cnt;
bool privileged;
};
int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
struct irdma_dma_mem *src_mem, u64 src_offset, u64 size);
int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info);
int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_del_obj_info *info, bool reset);
int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
enum irdma_sd_entry_type type,
bool setsd);
int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id);
int irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info, u32 sd_index,
enum irdma_sd_entry_type type, u64 direct_mode_sz);
int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, u32 pd_index,
struct irdma_dma_mem *rsrc_pg);
int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, u32 idx);
int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx);
int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
#endif /* IRDMA_HMC_H */

2829
sys/dev/irdma/irdma_hw.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

589
sys/dev/irdma/irdma_main.h Normal file
View File

@ -0,0 +1,589 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_MAIN_H
#define IRDMA_MAIN_H
#include <linux/in.h>
#include <netinet/ip6.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <sys/socket.h>
#include <netinet/if_ether.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
#include "osdep.h"
#include "irdma_defs.h"
#include "irdma_hmc.h"
#include "irdma_type.h"
#include "irdma_ws.h"
#include "irdma_protos.h"
#include "irdma_pble.h"
#include "irdma_cm.h"
#include "fbsd_kcompat.h"
#include "irdma-abi.h"
#include "irdma_verbs.h"
#include "irdma_user.h"
#include "irdma_puda.h"
extern struct list_head irdma_handlers;
extern spinlock_t irdma_handler_lock;
extern bool irdma_upload_context;
#define IRDMA_FW_VER_DEFAULT 2
#define IRDMA_HW_VER 2
#define IRDMA_ARP_ADD 1
#define IRDMA_ARP_DELETE 2
#define IRDMA_ARP_RESOLVE 3
#define IRDMA_MACIP_ADD 1
#define IRDMA_MACIP_DELETE 2
#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
#define RX_BUF_SIZE (1536 + 8)
#define IW_REG0_SIZE (4 * 1024)
#define IW_TX_TIMEOUT (6 * HZ)
#define IW_FIRST_QPN 1
#define IW_SW_CONTEXT_ALIGN 1024
#define MAX_DPC_ITERATIONS 128
#define IRDMA_EVENT_TIMEOUT_MS 5000
#define IRDMA_VCHNL_EVENT_TIMEOUT_MS 10000
#define IRDMA_RST_TIMEOUT_HZ 4
#define IRDMA_NO_QSET 0xffff
#define IW_CFG_FPM_QP_COUNT 32768
#define IRDMA_MAX_PAGES_PER_FMR 512
#define IRDMA_MIN_PAGES_PER_FMR 1
#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
#define IRDMA_Q_TYPE_PE_AEQ 0x80
#define IRDMA_Q_INVALID_IDX 0xffff
#define IRDMA_REM_ENDPOINT_TRK_QPID 3
#define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
#define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
#define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
#define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
#define IRDMA_DRV_OPT_ENA_MSI 0x00000010
#define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
#define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
#define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
#define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
#define IRDMA_DRV_OPT_ENA_PAU 0x00000400
#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define VSI_RXSWCTRL(_VSI) (0x00205000 + ((_VSI) * 4))
#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_M BIT(8)
#define VSI_RXSWCTRL_SRCPRUNEENABLE_M BIT(13)
#define IRDMA_ROCE_CWND_DEFAULT 0x400
#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
#define IRDMA_FLUSH_SQ BIT(0)
#define IRDMA_FLUSH_RQ BIT(1)
#define IRDMA_REFLUSH BIT(2)
#define IRDMA_FLUSH_WAIT BIT(3)
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
CQP_CREATED,
HMC_OBJS_CREATED,
HW_RSRC_INITIALIZED,
CCQ_CREATED,
CEQ0_CREATED, /* Last state of probe */
ILQ_CREATED,
IEQ_CREATED,
REM_ENDPOINT_TRK_CREATED,
CEQS_CREATED,
PBLE_CHUNK_MEM,
AEQ_CREATED,
IP_ADDR_REGISTERED, /* Last state of open */
};
struct irdma_rsrc_limits {
u32 qplimit;
u32 mrlimit;
u32 cqlimit;
};
struct irdma_cqp_err_info {
u16 maj;
u16 min;
const char *desc;
};
struct irdma_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
u16 min_err_code;
bool error;
u8 op_code;
};
struct irdma_cqp_request {
struct cqp_cmds_info info;
wait_queue_head_t waitq;
struct list_head list;
atomic_t refcnt;
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
void *param;
struct irdma_cqp_compl_info compl_info;
bool waiting:1;
bool request_done:1;
bool dynamic:1;
};
struct irdma_cqp {
struct irdma_sc_cqp sc_cqp;
spinlock_t req_lock; /* protect CQP request list */
spinlock_t compl_lock; /* protect CQP completion processing */
wait_queue_head_t waitq;
wait_queue_head_t remove_wq;
struct irdma_dma_mem sq;
struct irdma_dma_mem host_ctx;
u64 *scratch_array;
struct irdma_cqp_request *cqp_requests;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
struct irdma_ccq {
struct irdma_sc_cq sc_cq;
struct irdma_dma_mem mem_cq;
struct irdma_dma_mem shadow_area;
};
struct irdma_ceq {
struct irdma_sc_ceq sc_ceq;
struct irdma_dma_mem mem;
u32 irq;
u32 msix_idx;
struct irdma_pci_f *rf;
struct tasklet_struct dpc_tasklet;
spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
};
struct irdma_aeq {
struct irdma_sc_aeq sc_aeq;
struct irdma_dma_mem mem;
struct irdma_pble_alloc palloc;
bool virtual_map;
};
struct irdma_arp_entry {
u32 ip_addr[4];
u8 mac_addr[ETH_ALEN];
};
struct irdma_msix_vector {
u32 idx;
u32 irq;
u32 cpu_affinity;
u32 ceq_id;
struct resource *res;
void *tag;
};
struct irdma_mc_table_info {
u32 mgn;
u32 dest_ip[4];
bool lan_fwd:1;
bool ipv4_valid:1;
};
struct mc_table_list {
struct list_head list;
struct irdma_mc_table_info mc_info;
struct irdma_mcast_grp_info mc_grp_ctx;
};
struct irdma_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
struct irdma_qvlist_info {
u32 num_vectors;
struct irdma_qv_info qv_info[1];
};
struct irdma_gen_ops {
void (*request_reset)(struct irdma_pci_f *rf);
int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
};
struct irdma_pci_f {
bool reset:1;
bool rsrc_created:1;
bool msix_shared:1;
bool ftype:1;
u8 rsrc_profile;
u8 *hmc_info_mem;
u8 *mem_rsrc;
u8 rdma_ver;
u8 rst_to;
enum irdma_protocol_used protocol_used;
bool en_rem_endpoint_trk:1;
bool dcqcn_ena:1;
u32 sd_type;
u32 msix_count;
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 max_ah;
u32 next_ah;
u32 max_mcg;
u32 next_mcg;
u32 max_pd;
u32 next_qp;
u32 next_cq;
u32 next_pd;
u32 max_mr_size;
u32 max_cqe;
u32 mr_stagmask;
u32 used_pds;
u32 used_cqs;
u32 used_mrs;
u32 used_qps;
u32 arp_table_size;
u32 next_arp_index;
u32 ceqs_count;
u32 next_ws_node_id;
u32 max_ws_node_id;
u32 limits_sel;
unsigned long *allocated_ws_nodes;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_mcgs;
unsigned long *allocated_ahs;
unsigned long *allocated_arps;
enum init_completion_state init_state;
struct irdma_sc_dev sc_dev;
struct irdma_dev_ctx dev_ctx;
struct irdma_tunable_info tun_info;
eventhandler_tag irdma_ifaddr_event;
struct irdma_handler *hdl;
struct pci_dev *pcidev;
struct ice_rdma_peer *peer_info;
struct irdma_hw hw;
struct irdma_cqp cqp;
struct irdma_ccq ccq;
struct irdma_aeq aeq;
struct irdma_ceq *ceqlist;
struct irdma_hmc_pble_rsrc *pble_rsrc;
struct irdma_arp_entry *arp_table;
spinlock_t arp_lock; /*protect ARP table access*/
spinlock_t rsrc_lock; /* protect HW resource array access */
spinlock_t qptable_lock; /*protect QP table access*/
spinlock_t cqtable_lock; /*protect CQ table access*/
struct irdma_qp **qp_table;
struct irdma_cq **cq_table;
spinlock_t qh_list_lock; /* protect mc_qht_list */
struct mc_table_list mc_qht_list;
struct irdma_msix_vector *iw_msixtbl;
struct irdma_qvlist_info *iw_qvlist;
struct tasklet_struct dpc_tasklet;
struct msix_entry msix_info;
struct irdma_dma_mem obj_mem;
struct irdma_dma_mem obj_next;
atomic_t vchnl_msgs;
wait_queue_head_t vchnl_waitq;
struct workqueue_struct *cqp_cmpl_wq;
struct work_struct cqp_cmpl_work;
struct irdma_sc_vsi default_vsi;
void *back_fcn;
struct irdma_gen_ops gen_ops;
void (*check_fc)(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp);
struct irdma_dcqcn_cc_params dcqcn_params;
struct irdma_device *iwdev;
};
struct irdma_device {
struct ib_device ibdev;
struct irdma_pci_f *rf;
struct ifnet *netdev;
struct irdma_handler *hdl;
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
u32 vendor_part_id;
u32 device_cap_flags;
u32 push_mode;
u32 rcv_wnd;
u16 mac_ip_table_idx;
u16 vsi_num;
u8 rcv_wscale;
u8 iw_status;
u8 rd_fence_rate;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
bool iw_ooo:1;
enum init_completion_state init_state;
wait_queue_head_t suspend_wq;
};
struct irdma_handler {
struct list_head list;
struct irdma_device *iwdev;
struct task deferred_task;
struct taskqueue *deferred_tq;
bool shared_res_created;
};
static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct irdma_device, ibdev);
}
static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct irdma_ucontext, ibucontext);
}
static inline struct irdma_user_mmap_entry *
to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
{
return container_of(rdma_entry, struct irdma_user_mmap_entry,
rdma_entry);
}
static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct irdma_pd, ibpd);
}
static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
{
return container_of(ibah, struct irdma_ah, ibah);
}
static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct irdma_mr, ibmr);
}
static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct irdma_mr, ibmw);
}
static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct irdma_cq, ibcq);
}
static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct irdma_qp, ibqp);
}
static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
{
return container_of(dev, struct irdma_pci_f, sc_dev);
}
/**
* irdma_alloc_resource - allocate a resource
* @iwdev: device pointer
* @resource_array: resource bit array:
* @max_resources: maximum resource number
* @req_resources_num: Allocated resource number
* @next: next free id
**/
static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
unsigned long *rsrc_array, u32 max_rsrc,
u32 *req_rsrc_num, u32 *next)
{
u32 rsrc_num;
unsigned long flags;
spin_lock_irqsave(&rf->rsrc_lock, flags);
rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
if (rsrc_num >= max_rsrc) {
rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
if (rsrc_num >= max_rsrc) {
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
"resource [%d] allocation failed\n",
rsrc_num);
return -EOVERFLOW;
}
}
__set_bit(rsrc_num, rsrc_array);
*next = rsrc_num + 1;
if (*next == max_rsrc)
*next = 0;
*req_rsrc_num = rsrc_num;
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
return 0;
}
/**
* irdma_free_resource - free a resource
* @iwdev: device pointer
* @resource_array: resource array for the resource_num
* @resource_num: resource number to free
**/
static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
unsigned long *rsrc_array, u32 rsrc_num)
{
unsigned long flags;
spin_lock_irqsave(&rf->rsrc_lock, flags);
__clear_bit(rsrc_num, rsrc_array);
spin_unlock_irqrestore(&rf->rsrc_lock, flags);
}
int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
int irdma_rt_init_hw(struct irdma_device *iwdev,
struct irdma_l2params *l2params);
void irdma_rt_deinit_hw(struct irdma_device *iwdev);
void irdma_qp_add_ref(struct ib_qp *ibqp);
void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
u32 *ip_addr, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
void irdma_del_apbvt(struct irdma_device *iwdev,
struct irdma_apbvt_entry *entry);
struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
bool wait);
void irdma_free_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
void irdma_put_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);
void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
void irdma_port_ibevent(struct irdma_device *iwdev);
void irdma_cm_disconn(struct irdma_qp *qp);
bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
u16 maj_err_code, u16 min_err_code);
int irdma_handle_cqp_op(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request);
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void irdma_cq_add_ref(struct ib_cq *ibcq);
void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
struct irdma_modify_qp_info *info, bool wait);
int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
enum irdma_quad_entry_type etype,
enum irdma_quad_hash_manage_type mtype, void *cmnode,
bool wait);
void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
u8 term_len);
int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
int irdma_send_reset(struct irdma_cm_node *cm_node);
struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
u16 rem_port, u32 *rem_addr, u16 loc_port,
u32 *loc_addr, u16 vlan_id);
int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
struct irdma_qp_flush_info *info, bool wait);
void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
struct irdma_gen_ae_info *info, bool wait);
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(u32 *addr);
struct ifnet *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
int acc, u64 *iova_start);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info, bool privileged,
bool reset, enum irdma_vers vers);
void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
bool wait,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
bool irdma_cq_empty(struct irdma_cq *iwcq);
void irdma_set_rf_user_cfg_params(struct irdma_pci_f *rf);
void irdma_add_ip(struct irdma_device *iwdev);
void irdma_add_handler(struct irdma_handler *hdl);
void irdma_del_handler(struct irdma_handler *hdl);
void cqp_compl_worker(struct work_struct *work);
#endif /* IRDMA_MAIN_H */

557
sys/dev/irdma/irdma_pble.c Normal file
View File

@ -0,0 +1,557 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_hmc.h"
#include "irdma_defs.h"
#include "irdma_type.h"
#include "irdma_protos.h"
#include "irdma_pble.h"
static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
/**
* irdma_destroy_pble_prm - destroy prm during module unload
* @pble_rsrc: pble resources
*/
void
irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_chunk *chunk;
struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
while (!list_empty(&pinfo->clist)) {
chunk = (struct irdma_chunk *)(&pinfo->clist)->next;
list_del(&chunk->list);
if (chunk->type == PBLE_SD_PAGED)
irdma_pble_free_paged_mem(chunk);
if (chunk->bitmapbuf)
irdma_prm_rem_bitmapmem(pble_rsrc->dev->hw, chunk);
kfree(chunk->chunkmem.va);
}
spin_lock_destroy(&pinfo->prm_lock);
mutex_destroy(&pble_rsrc->pble_mutex_lock);
}
/**
* irdma_hmc_init_pble - Initialize pble resources during module load
* @dev: irdma_sc_dev struct
* @pble_rsrc: pble resources
*/
int
irdma_hmc_init_pble(struct irdma_sc_dev *dev,
struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_hmc_info *hmc_info;
u32 fpm_idx = 0;
int status = 0;
hmc_info = dev->hmc_info;
pble_rsrc->dev = dev;
pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
/* Start pble' on 4k boundary */
if (pble_rsrc->fpm_base_addr & 0xfff)
fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
pble_rsrc->unallocated_pble =
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
mutex_init(&pble_rsrc->pble_mutex_lock);
spin_lock_init(&pble_rsrc->pinfo.prm_lock);
INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
if (add_pble_prm(pble_rsrc)) {
irdma_destroy_pble_prm(pble_rsrc);
status = -ENOMEM;
}
return status;
}
/**
* get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
* @pble_rsrc: structure containing fpm address
* @idx: where to return indexes
*/
static void
get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
}
/**
* add_sd_direct - add sd direct for pble
* @pble_rsrc: pble resource ptr
* @info: page info for sd
*/
static int
add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_add_page_info *info)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
int ret_code = 0;
struct sd_pd_idx *idx = &info->idx;
struct irdma_chunk *chunk = info->chunk;
struct irdma_hmc_info *hmc_info = info->hmc_info;
struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
u32 offset = 0;
if (!sd_entry->valid) {
ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx,
IRDMA_SD_TYPE_DIRECT,
IRDMA_HMC_DIRECT_BP_SIZE);
if (ret_code)
return ret_code;
chunk->type = PBLE_SD_CONTIGOUS;
}
offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
chunk->vaddr = (u8 *)sd_entry->u.bp.addr.va + offset;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
irdma_debug(dev, IRDMA_DEBUG_PBLE,
"chunk_size[%ld] = 0x%lx vaddr=0x%p fpm_addr = %lx\n",
chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
return 0;
}
/**
* fpm_to_idx - given fpm address, get pble index
* @pble_rsrc: pble resource management
* @addr: fpm address for index
*/
static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr){
u64 idx;
idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
return (u32)idx;
}
/**
* add_bp_pages - add backing pages for sd
* @pble_rsrc: pble resource management
* @info: page info for sd
*/
static int
add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_add_page_info *info)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
u8 *addr;
struct irdma_dma_mem mem;
struct irdma_hmc_pd_entry *pd_entry;
struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
struct irdma_hmc_info *hmc_info = info->hmc_info;
struct irdma_chunk *chunk = info->chunk;
int status = 0;
u32 rel_pd_idx = info->idx.rel_pd_idx;
u32 pd_idx = info->idx.pd_idx;
u32 i;
if (irdma_pble_get_paged_mem(chunk, info->pages))
return -ENOMEM;
status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
IRDMA_SD_TYPE_PAGED,
IRDMA_HMC_DIRECT_BP_SIZE);
if (status)
goto error;
addr = chunk->vaddr;
for (i = 0; i < info->pages; i++) {
mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
mem.size = 4096;
mem.va = addr;
pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
if (!pd_entry->valid) {
status = irdma_add_pd_table_entry(dev, hmc_info,
pd_idx++, &mem);
if (status)
goto error;
addr += 4096;
}
}
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
return 0;
error:
irdma_pble_free_paged_mem(chunk);
return status;
}
/**
* irdma_get_type - add a sd entry type for sd
* @dev: irdma_sc_dev struct
* @idx: index of sd
* @pages: pages in the sd
*/
static enum irdma_sd_entry_type
irdma_get_type(struct irdma_sc_dev *dev,
struct sd_pd_idx *idx, u32 pages)
{
enum irdma_sd_entry_type sd_entry_type;
sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
return sd_entry_type;
}
/**
* add_pble_prm - add a sd entry for pble resoure
* @pble_rsrc: pble resource management
*/
static int
add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
{
struct irdma_sc_dev *dev = pble_rsrc->dev;
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_hmc_info *hmc_info;
struct irdma_chunk *chunk;
struct irdma_add_page_info info;
struct sd_pd_idx *idx = &info.idx;
int ret_code = 0;
enum irdma_sd_entry_type sd_entry_type;
u64 sd_reg_val = 0;
struct irdma_virt_mem chunkmem;
u32 pages;
if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
return -ENOMEM;
if (pble_rsrc->next_fpm_addr & 0xfff)
return -EINVAL;
chunkmem.size = sizeof(*chunk);
chunkmem.va = kzalloc(chunkmem.size, GFP_ATOMIC);
if (!chunkmem.va)
return -ENOMEM;
chunk = chunkmem.va;
chunk->chunkmem = chunkmem;
hmc_info = dev->hmc_info;
chunk->dev = dev;
chunk->fpm_addr = pble_rsrc->next_fpm_addr;
get_sd_pd_idx(pble_rsrc, idx);
sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
IRDMA_HMC_PD_CNT_IN_SD;
pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
info.chunk = chunk;
info.hmc_info = hmc_info;
info.pages = pages;
info.sd_entry = sd_entry;
if (!sd_entry->valid)
sd_entry_type = irdma_get_type(dev, idx, pages);
else
sd_entry_type = sd_entry->entry_type;
irdma_debug(dev, IRDMA_DEBUG_PBLE,
"pages = %d, unallocated_pble[%d] current_fpm_addr = %lx\n",
pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
irdma_debug(dev, IRDMA_DEBUG_PBLE, "sd_entry_type = %d\n",
sd_entry_type);
if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
ret_code = add_sd_direct(pble_rsrc, &info);
if (ret_code)
sd_entry_type = IRDMA_SD_TYPE_PAGED;
else
pble_rsrc->stats_direct_sds++;
if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
ret_code = add_bp_pages(pble_rsrc, &info);
if (ret_code)
goto error;
else
pble_rsrc->stats_paged_sds++;
}
ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
if (ret_code)
goto error;
pble_rsrc->next_fpm_addr += chunk->size;
irdma_debug(dev, IRDMA_DEBUG_PBLE,
"next_fpm_addr = %lx chunk_size[%lu] = 0x%lx\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
if (!sd_entry->valid) {
ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
idx->sd_idx, sd_entry->entry_type, true);
if (ret_code)
goto error;
}
sd_entry->valid = true;
list_add(&chunk->list, &pble_rsrc->pinfo.clist);
return 0;
error:
if (chunk->bitmapbuf)
irdma_prm_rem_bitmapmem(pble_rsrc->dev->hw, chunk);
kfree(chunk->chunkmem.va);
return ret_code;
}
/**
* free_lvl2 - fee level 2 pble
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
static void
free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
u32 i;
struct irdma_pble_level2 *lvl2 = &palloc->level2;
struct irdma_pble_info *root = &lvl2->root;
struct irdma_pble_info *leaf = lvl2->leaf;
for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
if (leaf->addr)
irdma_prm_return_pbles(&pble_rsrc->pinfo,
&leaf->chunkinfo);
else
break;
}
if (root->addr)
irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
kfree(lvl2->leafmem.va);
lvl2->leaf = NULL;
}
/**
* get_lvl2_pble - get level 2 pble resource
* @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation
*/
static int
get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
u32 lf4k, lflast, total, i;
u32 pblcnt = PBLE_PER_PAGE;
u64 *addr;
struct irdma_pble_level2 *lvl2 = &palloc->level2;
struct irdma_pble_info *root = &lvl2->root;
struct irdma_pble_info *leaf;
int ret_code;
u64 fpm_addr;
/* number of full 512 (4K) leafs) */
lf4k = palloc->total_cnt >> 9;
lflast = palloc->total_cnt % PBLE_PER_PAGE;
total = (lflast == 0) ? lf4k : lf4k + 1;
lvl2->leaf_cnt = total;
lvl2->leafmem.size = (sizeof(*leaf) * total);
lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_ATOMIC);
if (!lvl2->leafmem.va)
return -ENOMEM;
lvl2->leaf = lvl2->leafmem.va;
leaf = lvl2->leaf;
ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
total << 3, &root->addr, &fpm_addr);
if (ret_code) {
kfree(lvl2->leafmem.va);
lvl2->leaf = NULL;
return -ENOMEM;
}
root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
root->cnt = total;
addr = root->addr;
for (i = 0; i < total; i++, leaf++) {
pblcnt = (lflast && ((i + 1) == total)) ?
lflast : PBLE_PER_PAGE;
ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
&leaf->chunkinfo, pblcnt << 3,
&leaf->addr, &fpm_addr);
if (ret_code)
goto error;
leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
leaf->cnt = pblcnt;
*addr = (u64)leaf->idx;
addr++;
}
palloc->level = PBLE_LEVEL_2;
pble_rsrc->stats_lvl2++;
return 0;
error:
free_lvl2(pble_rsrc, palloc);
return -ENOMEM;
}
/**
* get_lvl1_pble - get level 1 pble resource
* @pble_rsrc: pble resource management
* @palloc: level 1 pble allocation
*/
static int
get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
int ret_code;
u64 fpm_addr;
struct irdma_pble_info *lvl1 = &palloc->level1;
ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
palloc->total_cnt << 3, &lvl1->addr,
&fpm_addr);
if (ret_code)
return -ENOMEM;
palloc->level = PBLE_LEVEL_1;
lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
lvl1->cnt = palloc->total_cnt;
pble_rsrc->stats_lvl1++;
return 0;
}
/**
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @level1_only: flag for a level 1 PBLE
*/
static int
get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, bool level1_only)
{
int status = 0;
status = get_lvl1_pble(pble_rsrc, palloc);
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
return status;
status = get_lvl2_pble(pble_rsrc, palloc);
return status;
}
/**
* irdma_get_pble - allocate pbles from the prm
* @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pble_cnt: #of pbles requested
* @level1_only: true if only pble level 1 to acquire
*/
int
irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, u32 pble_cnt,
bool level1_only)
{
int status = 0;
int max_sds = 0;
int i;
palloc->total_cnt = pble_cnt;
palloc->level = PBLE_LEVEL_0;
mutex_lock(&pble_rsrc->pble_mutex_lock);
/*
* check first to see if we can get pble's without acquiring additional sd's
*/
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
if (!status)
goto exit;
max_sds = (palloc->total_cnt >> 18) + 1;
for (i = 0; i < max_sds; i++) {
status = add_pble_prm(pble_rsrc);
if (status)
break;
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
/* if level1_only, only go through it once */
if (!status || level1_only)
break;
}
exit:
if (!status) {
pble_rsrc->allocdpbles += pble_cnt;
pble_rsrc->stats_alloc_ok++;
} else {
pble_rsrc->stats_alloc_fail++;
}
mutex_unlock(&pble_rsrc->pble_mutex_lock);
return status;
}
/**
* irdma_free_pble - put pbles back into prm
* @pble_rsrc: pble resources
* @palloc: contains all information regarding pble resource being freed
*/
void
irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
pble_rsrc->freedpbles += palloc->total_cnt;
if (palloc->level == PBLE_LEVEL_2)
free_lvl2(pble_rsrc, palloc);
else
irdma_prm_return_pbles(&pble_rsrc->pinfo,
&palloc->level1.chunkinfo);
pble_rsrc->stats_alloc_freed++;
}

166
sys/dev/irdma/irdma_pble.h Normal file
View File

@ -0,0 +1,166 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_PBLE_H
#define IRDMA_PBLE_H
#define PBLE_SHIFT 6
#define PBLE_PER_PAGE 512
#define HMC_PAGED_BP_SHIFT 12
#define PBLE_512_SHIFT 9
#define PBLE_INVALID_IDX 0xffffffff
enum irdma_pble_level {
PBLE_LEVEL_0 = 0,
PBLE_LEVEL_1 = 1,
PBLE_LEVEL_2 = 2,
};
enum irdma_alloc_type {
PBLE_NO_ALLOC = 0,
PBLE_SD_CONTIGOUS = 1,
PBLE_SD_PAGED = 2,
};
struct irdma_chunk;
struct irdma_pble_chunkinfo {
struct irdma_chunk *pchunk;
u64 bit_idx;
u64 bits_used;
};
struct irdma_pble_info {
u64 *addr;
u32 idx;
u32 cnt;
struct irdma_pble_chunkinfo chunkinfo;
};
struct irdma_pble_level2 {
struct irdma_pble_info root;
struct irdma_pble_info *leaf;
struct irdma_virt_mem leafmem;
u32 leaf_cnt;
};
struct irdma_pble_alloc {
u32 total_cnt;
enum irdma_pble_level level;
union {
struct irdma_pble_info level1;
struct irdma_pble_level2 level2;
};
};
struct sd_pd_idx {
u32 sd_idx;
u32 pd_idx;
u32 rel_pd_idx;
};
struct irdma_add_page_info {
struct irdma_chunk *chunk;
struct irdma_hmc_sd_entry *sd_entry;
struct irdma_hmc_info *hmc_info;
struct sd_pd_idx idx;
u32 pages;
};
struct irdma_chunk {
struct list_head list;
struct irdma_dma_info dmainfo;
void *bitmapbuf;
u32 sizeofbitmap;
u64 size;
void *vaddr;
u64 fpm_addr;
u32 pg_cnt;
enum irdma_alloc_type type;
struct irdma_sc_dev *dev;
struct irdma_virt_mem bitmapmem;
struct irdma_virt_mem chunkmem;
};
struct irdma_pble_prm {
struct list_head clist;
spinlock_t prm_lock; /* protect prm bitmap */
u64 total_pble_alloc;
u64 free_pble_cnt;
u8 pble_shift;
};
struct irdma_hmc_pble_rsrc {
u32 unallocated_pble;
struct mutex pble_mutex_lock; /* protect PBLE resource */
struct irdma_sc_dev *dev;
u64 fpm_base_addr;
u64 next_fpm_addr;
struct irdma_pble_prm pinfo;
u64 allocdpbles;
u64 freedpbles;
u32 stats_direct_sds;
u32 stats_paged_sds;
u64 stats_alloc_ok;
u64 stats_alloc_fail;
u64 stats_alloc_freed;
u64 stats_lvl1;
u64 stats_lvl2;
};
void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
struct irdma_hmc_pble_rsrc *pble_rsrc);
void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc);
int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, u32 pble_cnt,
bool level1_only);
int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
struct irdma_chunk *pchunk);
int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
u64 **vaddr, u64 *fpm_addr);
void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
struct irdma_pble_chunkinfo *chunkinfo);
void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags);
void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags);
void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt);
void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
#endif /* IRDMA_PBLE_H */

View File

@ -0,0 +1,141 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2016 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_PROTOS_H
#define IRDMA_PROTOS_H
#define PAUSE_TIMER_VAL 0xffff
#define REFRESH_THRESHOLD 0x7fff
#define HIGH_THRESHOLD 0x800
#define LOW_THRESHOLD 0x200
#define ALL_TC2PFC 0xff
#define CQP_COMPL_WAIT_TIME_MS 10
#define CQP_TIMEOUT_THRESHOLD 500
/* init operations */
int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
struct irdma_device_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
struct irdma_fast_reg_stag_info *info,
bool post_sq);
void irdma_init_config_check(struct irdma_config_check *cc,
u8 traffic_class,
u16 qs_handle);
/* HMC/FPM functions */
int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
/* stats misc */
int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat, bool wait);
int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_ws_node_info *node_info);
int irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_up_info *map_info);
int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
u8 op);
int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
u8 op);
int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
struct irdma_stats_inst_info *stats_info);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
struct irdma_gather_stats *gather_stats,
struct irdma_gather_stats *last_gather_stats,
const struct irdma_hw_stat_map *map,
u16 max_stat_idx);
/* vsi functions */
int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_stats_info *info);
void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info);
int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
/* misc L2 param change functions */
void irdma_change_l2params(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params);
void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 cmd);
void irdma_qp_add_qos(struct irdma_sc_qp *qp);
void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
struct irdma_sc_qp *qp);
void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
/* terminate functions*/
void irdma_terminate_send_fin(struct irdma_sc_qp *qp);
void irdma_terminate_connection(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info);
void irdma_terminate_received(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info);
/* dynamic memory allocation */
void *irdma_allocate_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem,
u64 size, u32 alignment);
void *irdma_allocate_virt_mem(struct irdma_hw *hw, struct irdma_virt_mem *mem, u32 size);
int irdma_free_dma_mem(struct irdma_hw *hw, struct irdma_dma_mem *mem);
/* misc */
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
bool poll_registers);
int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
int irdma_get_rdma_features(struct irdma_sc_dev *dev);
void free_sd_mem(struct irdma_sc_dev *dev);
int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
struct cqp_cmds_info *pcmdinfo);
int irdma_process_bh(struct irdma_sc_dev *dev);
extern void dump_ctx(struct irdma_sc_dev *dev, u32 pf_num, u32 qp_num);
void dumpCSR(struct irdma_sc_dev *dev);
void dumpCSRx(struct irdma_sc_dev *dev);
void dumpcls(struct irdma_sc_dev *dev);
int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
#endif /* IRDMA_PROTOS_H */

1856
sys/dev/irdma/irdma_puda.c Normal file

File diff suppressed because it is too large Load Diff

221
sys/dev/irdma/irdma_puda.h Normal file
View File

@ -0,0 +1,221 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_PUDA_H
#define IRDMA_PUDA_H
#define IRDMA_IEQ_MPA_FRAMING 6
#define IRDMA_TCP_OFFSET 40
#define IRDMA_IPV4_PAD 20
#define IRDMA_MRK_BLK_SZ 512
enum puda_rsrc_type {
IRDMA_PUDA_RSRC_TYPE_ILQ = 1,
IRDMA_PUDA_RSRC_TYPE_IEQ,
IRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */
};
enum puda_rsrc_complete {
PUDA_CQ_CREATED = 1,
PUDA_QP_CREATED,
PUDA_TX_COMPLETE,
PUDA_RX_COMPLETE,
PUDA_HASH_CRC_COMPLETE,
};
struct irdma_sc_dev;
struct irdma_sc_qp;
struct irdma_sc_cq;
struct irdma_puda_cmpl_info {
struct irdma_qp_uk *qp;
u8 q_type;
u8 l3proto;
u8 l4proto;
u16 vlan;
u32 payload_len;
u32 compl_error; /* No_err=0, else major and minor err code */
u32 qp_id;
u32 wqe_idx;
bool ipv4:1;
bool smac_valid:1;
bool vlan_valid:1;
u8 smac[ETH_ALEN];
};
struct irdma_puda_send_info {
u64 paddr; /* Physical address */
u32 len;
u32 ah_id;
u8 tcplen;
u8 maclen;
bool ipv4:1;
bool do_lpb:1;
void *scratch;
};
struct irdma_puda_buf {
struct list_head list; /* MUST be first entry */
struct irdma_dma_mem mem; /* DMA memory for the buffer */
struct irdma_puda_buf *next; /* for alloclist in rsrc struct */
struct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */
void *scratch;
u8 *iph;
u8 *tcph;
u8 *data;
u16 datalen;
u16 vlan_id;
u8 tcphlen; /* tcp length in bytes */
u8 maclen; /* mac length in bytes */
u32 totallen; /* machlen+iphlen+tcphlen+datalen */
atomic_t refcount;
u8 hdrlen;
bool virtdma:1;
bool ipv4:1;
bool vlan_valid:1;
bool do_lpb:1; /* Loopback buffer */
bool smac_valid:1;
u32 seqnum;
u32 ah_id;
u8 smac[ETH_ALEN];
struct irdma_sc_vsi *vsi;
};
struct irdma_puda_rsrc_info {
void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
enum puda_rsrc_type type; /* ILQ or IEQ */
u32 count;
u32 pd_id;
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
u16 buf_size;
u16 mss; /* FIXME: Windows driver still using this */
u8 stats_idx;
bool stats_idx_valid:1;
int abi_ver;
};
struct irdma_puda_rsrc {
struct irdma_sc_cq cq;
struct irdma_sc_qp qp;
struct irdma_sc_pd sc_pd;
struct irdma_sc_dev *dev;
struct irdma_sc_vsi *vsi;
struct irdma_dma_mem cqmem;
struct irdma_dma_mem qpmem;
struct irdma_virt_mem ilq_mem;
enum puda_rsrc_complete cmpl;
enum puda_rsrc_type type;
u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */
u32 cq_id;
u32 qp_id;
u32 sq_size;
u32 rq_size;
u32 cq_size;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
u32 compl_rxwqe_idx;
u32 rx_wqe_idx;
u32 rxq_invalid_cnt;
u32 tx_wqe_avail_cnt;
void *hash_desc;
struct list_head txpend;
struct list_head bufpool; /* free buffers pool list for recv and xmit */
u32 alloc_buf_count;
u32 avail_buf_count; /* snapshot of currently available buffers */
spinlock_t bufpool_lock;
struct irdma_puda_buf *alloclist;
void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
/* puda stats */
u64 stats_buf_alloc_fail;
u64 stats_pkt_rcvd;
u64 stats_pkt_sent;
u64 stats_rcvd_pkt_err;
u64 stats_sent_pkt_q;
u64 stats_bad_qp_id;
/* IEQ stats */
u64 fpdu_processed;
u64 bad_seq_num;
u64 crc_err;
u64 pmode_count;
u64 partials_handled;
u16 mss; /* FIXME: Windows driver still using this */
u8 stats_idx;
bool check_crc:1;
bool stats_idx_valid:1;
};
struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf);
int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
struct irdma_puda_rsrc_info *info);
void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
bool reset);
int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
u32 *compl_err);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf);
int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
struct irdma_puda_buf *buf);
int irdma_ieq_check_mpacrc(void *desc, void *addr, u32 len, u32 val);
int irdma_init_hash_desc(void **desc);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_free_hash_desc(void *desc);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
struct irdma_ah_info *ah_info);
int irdma_puda_create_ah(struct irdma_sc_dev *dev,
struct irdma_ah_info *ah_info, bool wait,
enum puda_rsrc_type type, void *cb_param,
struct irdma_sc_ah **ah);
void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
struct irdma_puda_rsrc *ieq);
void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);
#endif /*IRDMA_PROTOS_H */

1533
sys/dev/irdma/irdma_type.h Normal file

File diff suppressed because it is too large Load Diff

318
sys/dev/irdma/irdma_uda.c Normal file
View File

@ -0,0 +1,318 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2016 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_hmc.h"
#include "irdma_defs.h"
#include "irdma_type.h"
#include "irdma_protos.h"
#include "irdma_uda.h"
#include "irdma_uda_d.h"
/**
* irdma_sc_access_ah() - Create, modify or delete AH
* @cqp: struct for cqp hw
* @info: ah information
* @op: Operation
* @scratch: u64 saved to be used during cqp completion
*/
int
irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
u32 op, u64 scratch)
{
__le64 *wqe;
u64 qw1, qw2;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOSPC;
set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->mac_addr[5], 16) |
LS_64_1(info->mac_addr[4], 24) |
LS_64_1(info->mac_addr[3], 32) |
LS_64_1(info->mac_addr[2], 40) |
LS_64_1(info->mac_addr[1], 48) |
LS_64_1(info->mac_addr[0], 56));
qw1 = LS_64(info->pd_idx, IRDMA_UDA_CQPSQ_MAV_PDINDEXLO) |
LS_64(info->tc_tos, IRDMA_UDA_CQPSQ_MAV_TC) |
LS_64(info->vlan_tag, IRDMA_UDAQPC_VLANTAG);
qw2 = LS_64(info->dst_arpindex, IRDMA_UDA_CQPSQ_MAV_ARPINDEX) |
LS_64(info->flow_label, IRDMA_UDA_CQPSQ_MAV_FLOWLABEL) |
LS_64(info->hop_ttl, IRDMA_UDA_CQPSQ_MAV_HOPLIMIT) |
LS_64(info->pd_idx >> 16, IRDMA_UDA_CQPSQ_MAV_PDINDEXHI);
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_40,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
LS_64(info->dest_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
set_64bit_val(wqe, IRDMA_BYTE_32,
LS_64(info->dest_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
LS_64(info->dest_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
set_64bit_val(wqe, IRDMA_BYTE_56,
LS_64(info->src_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
LS_64(info->src_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->src_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
LS_64(info->src_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
} else {
set_64bit_val(wqe, IRDMA_BYTE_32,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->src_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
}
set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
irdma_wmb(); /* need write block before writing WQE header */
set_64bit_val(
wqe, IRDMA_BYTE_24,
LS_64(cqp->polarity, IRDMA_UDA_CQPSQ_MAV_WQEVALID) |
LS_64(op, IRDMA_UDA_CQPSQ_MAV_OPCODE) |
LS_64(info->do_lpbk, IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK) |
LS_64(info->ipv4_valid, IRDMA_UDA_CQPSQ_MAV_IPV4VALID) |
LS_64(info->ah_idx, IRDMA_UDA_CQPSQ_MAV_AVIDX) |
LS_64(info->insert_vlan_tag,
IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG));
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_AH WQE", wqe,
IRDMA_CQP_WQE_SIZE * 8);
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_create_mg_ctx() - create a mcg context
* @info: multicast group context info
*/
static int
irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
{
struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
u8 idx = 0; /* index in the array */
u8 ctx_idx = 0; /* index in the MG context */
memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
entry_info = &info->mg_ctx_info[idx];
if (entry_info->valid_entry) {
set_64bit_val((__le64 *) info->dma_mem_mc.va,
ctx_idx * sizeof(u64),
LS_64(entry_info->dest_port, IRDMA_UDA_MGCTX_DESTPORT) |
LS_64(entry_info->valid_entry, IRDMA_UDA_MGCTX_VALIDENT) |
LS_64(entry_info->qp_id, IRDMA_UDA_MGCTX_QPID));
ctx_idx++;
}
}
return 0;
}
/**
* irdma_access_mcast_grp() - Access mcast group based on op
* @cqp: Control QP
* @info: multicast group context info
* @op: operation to perform
* @scratch: u64 saved to be used during cqp completion
*/
int
irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, u32 op,
u64 scratch)
{
__le64 *wqe;
int ret_code = 0;
if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, "mg_id out of range\n");
return -EINVAL;
}
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) {
irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, "ring full\n");
return -ENOSPC;
}
ret_code = irdma_create_mg_ctx(info);
if (ret_code)
return ret_code;
set_64bit_val(wqe, IRDMA_BYTE_32, info->dma_mem_mc.pa);
set_64bit_val(wqe, IRDMA_BYTE_16,
LS_64(info->vlan_id, IRDMA_UDA_CQPSQ_MG_VLANID) |
LS_64(info->qs_handle, IRDMA_UDA_CQPSQ_QS_HANDLE));
set_64bit_val(wqe, IRDMA_BYTE_0, LS_64_1(info->dest_mac_addr[5], 0) |
LS_64_1(info->dest_mac_addr[4], 8) |
LS_64_1(info->dest_mac_addr[3], 16) |
LS_64_1(info->dest_mac_addr[2], 24) |
LS_64_1(info->dest_mac_addr[1], 32) |
LS_64_1(info->dest_mac_addr[0], 40));
set_64bit_val(wqe, IRDMA_BYTE_8,
LS_64(info->hmc_fcn_id, IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID));
if (!info->ipv4_valid) {
set_64bit_val(wqe, IRDMA_BYTE_56,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR0) |
LS_64(info->dest_ip_addr[1], IRDMA_UDA_CQPSQ_MAV_ADDR1));
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->dest_ip_addr[2], IRDMA_UDA_CQPSQ_MAV_ADDR2) |
LS_64(info->dest_ip_addr[3], IRDMA_UDA_CQPSQ_MAV_ADDR3));
} else {
set_64bit_val(wqe, IRDMA_BYTE_48,
LS_64(info->dest_ip_addr[0], IRDMA_UDA_CQPSQ_MAV_ADDR3));
}
irdma_wmb(); /* need write memory block before writing the WQE header. */
set_64bit_val(wqe, IRDMA_BYTE_24,
LS_64(cqp->polarity, IRDMA_UDA_CQPSQ_MG_WQEVALID) |
LS_64(op, IRDMA_UDA_CQPSQ_MG_OPCODE) |
LS_64(info->mg_id, IRDMA_UDA_CQPSQ_MG_MGIDX) |
LS_64(info->vlan_valid, IRDMA_UDA_CQPSQ_MG_VLANVALID) |
LS_64(info->ipv4_valid, IRDMA_UDA_CQPSQ_MG_IPV4VALID));
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_MCG WQE", wqe,
IRDMA_CQP_WQE_SIZE * 8);
irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MCG_HOST CTX WQE",
info->dma_mem_mc.va, IRDMA_MAX_MGS_PER_CTX * 8);
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_compare_mgs - Compares two multicast group structures
* @entry1: Multcast group info
* @entry2: Multcast group info in context
*/
static bool
irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
struct irdma_mcast_grp_ctx_entry_info *entry2)
{
if (entry1->dest_port == entry2->dest_port &&
entry1->qp_id == entry2->qp_id)
return true;
return false;
}
/**
* irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
* @ctx: Multcast group context
* @mg: Multcast group info
*/
int
irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg)
{
u32 idx;
bool free_entry_found = false;
u32 free_entry_idx = 0;
/* find either an identical or a free entry for a multicast group */
for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
if (ctx->mg_ctx_info[idx].valid_entry) {
if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
ctx->mg_ctx_info[idx].use_cnt++;
return 0;
}
continue;
}
if (!free_entry_found) {
free_entry_found = true;
free_entry_idx = idx;
}
}
if (free_entry_found) {
ctx->mg_ctx_info[free_entry_idx] = *mg;
ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
ctx->no_of_mgs++;
return 0;
}
return -ENOMEM;
}
/**
* irdma_sc_del_mcast_grp - Delete mcast group
* @ctx: Multcast group context
* @mg: Multcast group info
*
* Finds and removes a specific mulicast group from context, all
* parameters must match to remove a multicast group.
*/
int
irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg)
{
u32 idx;
/* find an entry in multicast group context */
for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
if (!ctx->mg_ctx_info[idx].valid_entry)
continue;
if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
ctx->mg_ctx_info[idx].use_cnt--;
if (!ctx->mg_ctx_info[idx].use_cnt) {
ctx->mg_ctx_info[idx].valid_entry = false;
ctx->no_of_mgs--;
/* Remove gap if element was not the last */
if (idx != ctx->no_of_mgs &&
ctx->no_of_mgs > 0) {
irdma_memcpy(&ctx->mg_ctx_info[idx],
&ctx->mg_ctx_info[ctx->no_of_mgs - 1],
sizeof(ctx->mg_ctx_info[idx]));
ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
}
}
return 0;
}
}
return -EINVAL;
}

120
sys/dev/irdma/irdma_uda.h Normal file
View File

@ -0,0 +1,120 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2016 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_UDA_H
#define IRDMA_UDA_H
#define IRDMA_UDA_MAX_FSI_MGS 4096
#define IRDMA_UDA_MAX_PFS 16
#define IRDMA_UDA_MAX_VFS 128
struct irdma_sc_cqp;
struct irdma_ah_info {
struct irdma_sc_vsi *vsi;
u32 pd_idx;
u32 dst_arpindex;
u32 dest_ip_addr[4];
u32 src_ip_addr[4];
u32 flow_label;
u32 ah_idx;
u16 vlan_tag;
u8 insert_vlan_tag;
u8 tc_tos;
u8 hop_ttl;
u8 mac_addr[ETH_ALEN];
bool ah_valid:1;
bool ipv4_valid:1;
bool do_lpbk:1;
};
struct irdma_sc_ah {
struct irdma_sc_dev *dev;
struct irdma_ah_info ah_info;
};
int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg);
int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg);
int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
u32 op, u64 scratch);
int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, u32 op,
u64 scratch);
static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
{
ah->dev = dev;
}
static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info, u64 scratch)
{
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
scratch);
}
static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info, u64 scratch)
{
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
scratch);
}
static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
scratch);
}
static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
scratch);
}
static inline int irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info,
u64 scratch)
{
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
scratch);
}
#endif /* IRDMA_UDA_H */

415
sys/dev/irdma/irdma_uda_d.h Normal file
View File

@ -0,0 +1,415 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2019 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_UDA_D_H
#define IRDMA_UDA_D_H
/* L4 packet type */
#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
#define IRDMA_E_UDA_SQ_L4T_TCP 1
#define IRDMA_E_UDA_SQ_L4T_SCTP 2
#define IRDMA_E_UDA_SQ_L4T_UDP 3
/* Inner IP header type */
#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
/* UDA defined fields for transmit descriptors */
#define IRDMA_UDA_QPSQ_PUSHWQE_S 56
#define IRDMA_UDA_QPSQ_PUSHWQE_M BIT_ULL(IRDMA_UDA_QPSQ_PUSHWQE_S)
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG_S 57
#define IRDMA_UDA_QPSQ_INLINEDATAFLAG_M \
BIT_ULL(IRDMA_UDA_QPSQ_INLINEDATAFLAG_S)
#define IRDMA_UDA_QPSQ_INLINEDATALEN_S 48
#define IRDMA_UDA_QPSQ_INLINEDATALEN_M \
((u64)0xff << IRDMA_UDA_QPSQ_INLINEDATALEN_S)
#define IRDMA_UDA_QPSQ_ADDFRAGCNT_S 38
#define IRDMA_UDA_QPSQ_ADDFRAGCNT_M \
((u64)0x0F << IRDMA_UDA_QPSQ_ADDFRAGCNT_S)
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS_S 42
#define IRDMA_UDA_QPSQ_IPFRAGFLAGS_M \
((u64)0x3 << IRDMA_UDA_QPSQ_IPFRAGFLAGS_S)
#define IRDMA_UDA_QPSQ_NOCHECKSUM_S 45
#define IRDMA_UDA_QPSQ_NOCHECKSUM_M \
BIT_ULL(IRDMA_UDA_QPSQ_NOCHECKSUM_S)
#define IRDMA_UDA_QPSQ_AHIDXVALID_S 46
#define IRDMA_UDA_QPSQ_AHIDXVALID_M \
BIT_ULL(IRDMA_UDA_QPSQ_AHIDXVALID_S)
#define IRDMA_UDA_QPSQ_LOCAL_FENCE_S 61
#define IRDMA_UDA_QPSQ_LOCAL_FENCE_M \
BIT_ULL(IRDMA_UDA_QPSQ_LOCAL_FENCE_S)
#define IRDMA_UDA_QPSQ_AHIDX_S 0
#define IRDMA_UDA_QPSQ_AHIDX_M ((u64)0x1ffff << IRDMA_UDA_QPSQ_AHIDX_S)
#define IRDMA_UDA_QPSQ_PROTOCOL_S 16
#define IRDMA_UDA_QPSQ_PROTOCOL_M \
((u64)0xff << IRDMA_UDA_QPSQ_PROTOCOL_S)
#define IRDMA_UDA_QPSQ_EXTHDRLEN_S 32
#define IRDMA_UDA_QPSQ_EXTHDRLEN_M \
((u64)0x1ff << IRDMA_UDA_QPSQ_EXTHDRLEN_S)
#define IRDMA_UDA_QPSQ_MULTICAST_S 63
#define IRDMA_UDA_QPSQ_MULTICAST_M \
BIT_ULL(IRDMA_UDA_QPSQ_MULTICAST_S)
#define IRDMA_UDA_QPSQ_MACLEN_S 56
#define IRDMA_UDA_QPSQ_MACLEN_M \
((u64)0x7f << IRDMA_UDA_QPSQ_MACLEN_S)
#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
#define IRDMA_UDA_QPSQ_IPLEN_S 48
#define IRDMA_UDA_QPSQ_IPLEN_M \
((u64)0x7f << IRDMA_UDA_QPSQ_IPLEN_S)
#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
#define IRDMA_UDA_QPSQ_L4T_S 30
#define IRDMA_UDA_QPSQ_L4T_M ((u64)0x3 << IRDMA_UDA_QPSQ_L4T_S)
#define IRDMA_UDA_QPSQ_L4T_LINE 2
#define IRDMA_UDA_QPSQ_IIPT_S 28
#define IRDMA_UDA_QPSQ_IIPT_M ((u64)0x3 << IRDMA_UDA_QPSQ_IIPT_S)
#define IRDMA_UDA_QPSQ_IIPT_LINE 2
#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_S 45
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_M \
BIT_ULL(IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_S)
#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
#define IRDMA_UDA_QPSQ_IMMDATA_S 0
#define IRDMA_UDA_QPSQ_IMMDATA_M \
((u64)0xffffffffffffffff << IRDMA_UDA_QPSQ_IMMDATA_S)
/* Byte Offset 0 */
#define IRDMA_UDAQPC_IPV4_S 3
#define IRDMA_UDAQPC_IPV4_M BIT_ULL(IRDMAQPC_IPV4_S)
#define IRDMA_UDAQPC_INSERTVLANTAG_S 5
#define IRDMA_UDAQPC_INSERTVLANTAG_M BIT_ULL(IRDMA_UDAQPC_INSERTVLANTAG_S)
#define IRDMA_UDAQPC_ISQP1_S 6
#define IRDMA_UDAQPC_ISQP1_M BIT_ULL(IRDMA_UDAQPC_ISQP1_S)
#define IRDMA_UDAQPC_RQWQESIZE_S IRDMAQPC_RQWQESIZE_S
#define IRDMA_UDAQPC_RQWQESIZE_M IRDMAQPC_RQWQESIZE_M
#define IRDMA_UDAQPC_ECNENABLE_S 14
#define IRDMA_UDAQPC_ECNENABLE_M BIT_ULL(IRDMA_UDAQPC_ECNENABLE_S)
#define IRDMA_UDAQPC_PDINDEXHI_S 20
#define IRDMA_UDAQPC_PDINDEXHI_M ((u64)3 << IRDMA_UDAQPC_PDINDEXHI_S)
#define IRDMA_UDAQPC_DCTCPENABLE_S 25
#define IRDMA_UDAQPC_DCTCPENABLE_M BIT_ULL(IRDMA_UDAQPC_DCTCPENABLE_S)
#define IRDMA_UDAQPC_RCVTPHEN_S IRDMAQPC_RCVTPHEN_S
#define IRDMA_UDAQPC_RCVTPHEN_M IRDMAQPC_RCVTPHEN_M
#define IRDMA_UDAQPC_XMITTPHEN_S IRDMAQPC_XMITTPHEN_S
#define IRDMA_UDAQPC_XMITTPHEN_M IRDMAQPC_XMITTPHEN_M
#define IRDMA_UDAQPC_RQTPHEN_S IRDMAQPC_RQTPHEN_S
#define IRDMA_UDAQPC_RQTPHEN_M IRDMAQPC_RQTPHEN_M
#define IRDMA_UDAQPC_SQTPHEN_S IRDMAQPC_SQTPHEN_S
#define IRDMA_UDAQPC_SQTPHEN_M IRDMAQPC_SQTPHEN_M
#define IRDMA_UDAQPC_PPIDX_S IRDMAQPC_PPIDX_S
#define IRDMA_UDAQPC_PPIDX_M IRDMAQPC_PPIDX_M
#define IRDMA_UDAQPC_PMENA_S IRDMAQPC_PMENA_S
#define IRDMA_UDAQPC_PMENA_M IRDMAQPC_PMENA_M
#define IRDMA_UDAQPC_INSERTTAG2_S 11
#define IRDMA_UDAQPC_INSERTTAG2_M BIT_ULL(IRDMA_UDAQPC_INSERTTAG2_S)
#define IRDMA_UDAQPC_INSERTTAG3_S 14
#define IRDMA_UDAQPC_INSERTTAG3_M BIT_ULL(IRDMA_UDAQPC_INSERTTAG3_S)
#define IRDMA_UDAQPC_RQSIZE_S IRDMAQPC_RQSIZE_S
#define IRDMA_UDAQPC_RQSIZE_M IRDMAQPC_RQSIZE_M
#define IRDMA_UDAQPC_SQSIZE_S IRDMAQPC_SQSIZE_S
#define IRDMA_UDAQPC_SQSIZE_M IRDMAQPC_SQSIZE_M
#define IRDMA_UDAQPC_TXCQNUM_S IRDMAQPC_TXCQNUM_S
#define IRDMA_UDAQPC_TXCQNUM_M IRDMAQPC_TXCQNUM_M
#define IRDMA_UDAQPC_RXCQNUM_S IRDMAQPC_RXCQNUM_S
#define IRDMA_UDAQPC_RXCQNUM_M IRDMAQPC_RXCQNUM_M
#define IRDMA_UDAQPC_QPCOMPCTX_S IRDMAQPC_QPCOMPCTX_S
#define IRDMA_UDAQPC_QPCOMPCTX_M IRDMAQPC_QPCOMPCTX_M
#define IRDMA_UDAQPC_SQTPHVAL_S IRDMAQPC_SQTPHVAL_S
#define IRDMA_UDAQPC_SQTPHVAL_M IRDMAQPC_SQTPHVAL_M
#define IRDMA_UDAQPC_RQTPHVAL_S IRDMAQPC_RQTPHVAL_S
#define IRDMA_UDAQPC_RQTPHVAL_M IRDMAQPC_RQTPHVAL_M
#define IRDMA_UDAQPC_QSHANDLE_S IRDMAQPC_QSHANDLE_S
#define IRDMA_UDAQPC_QSHANDLE_M IRDMAQPC_QSHANDLE_M
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE_S 48
#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE_M \
((u64)0x3 << IRDMA_UDAQPC_RQHDRRINGBUFSIZE_S)
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE_S 32
#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE_M \
((u64)0x3 << IRDMA_UDAQPC_SQHDRRINGBUFSIZE_S)
#define IRDMA_UDAQPC_PRIVILEGEENABLE_S 25
#define IRDMA_UDAQPC_PRIVILEGEENABLE_M \
BIT_ULL(IRDMA_UDAQPC_PRIVILEGEENABLE_S)
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_S 26
#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_M \
BIT_ULL(IRDMA_UDAQPC_USE_STATISTICS_INSTANCE_S)
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_S 0
#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_M \
((u64)0x7F << IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX_S)
#define IRDMA_UDAQPC_PRIVHDRGENENABLE_S 0
#define IRDMA_UDAQPC_PRIVHDRGENENABLE_M \
BIT_ULL(IRDMA_UDAQPC_PRIVHDRGENENABLE_S)
#define IRDMA_UDAQPC_RQHDRSPLITENABLE_S 3
#define IRDMA_UDAQPC_RQHDRSPLITENABLE_M \
BIT_ULL(IRDMA_UDAQPC_RQHDRSPLITENABLE_S)
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE_S 2
#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE_M \
BIT_ULL(IRDMA_UDAQPC_RQHDRRINGBUFENABLE_S)
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE_S 1
#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE_M \
BIT_ULL(IRDMA_UDAQPC_SQHDRRINGBUFENABLE_S)
#define IRDMA_UDAQPC_IPID_S 32
#define IRDMA_UDAQPC_IPID_M ((u64)0xffff << IRDMA_UDAQPC_IPID_S)
#define IRDMA_UDAQPC_SNDMSS_S 16
#define IRDMA_UDAQPC_SNDMSS_M ((u64)0x3fff << IRDMA_UDAQPC_SNDMSS_S)
#define IRDMA_UDAQPC_VLANTAG_S 0
#define IRDMA_UDAQPC_VLANTAG_M ((u64)0xffff << IRDMA_UDAQPC_VLANTAG_S)
/* Address Handle */
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_S 20
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_M \
((u64)0x3 << IRDMA_UDA_CQPSQ_MAV_PDINDEXHI_S)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_S 48
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_MAV_PDINDEXLO_S)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S 24
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_M \
((u64)0x3f << IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX_S 48
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_MAV_ARPINDEX_S)
#define IRDMA_UDA_CQPSQ_MAV_TC_S 32
#define IRDMA_UDA_CQPSQ_MAV_TC_M ((u64)0xff << IRDMA_UDA_CQPSQ_MAV_TC_S)
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_S 32
#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_M \
((u64)0xff << IRDMA_UDA_CQPSQ_MAV_HOPLIMIT_S)
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_S 0
#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_M \
((u64)0xfffff << IRDMA_UDA_CQPSQ_MAV_FLOWLABEL_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR0_S 32
#define IRDMA_UDA_CQPSQ_MAV_ADDR0_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR0_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR1_S 0
#define IRDMA_UDA_CQPSQ_MAV_ADDR1_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR1_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR2_S 32
#define IRDMA_UDA_CQPSQ_MAV_ADDR2_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR2_S)
#define IRDMA_UDA_CQPSQ_MAV_ADDR3_S 0
#define IRDMA_UDA_CQPSQ_MAV_ADDR3_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_MAV_ADDR3_S)
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID_S 63
#define IRDMA_UDA_CQPSQ_MAV_WQEVALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MAV_WQEVALID_S)
#define IRDMA_UDA_CQPSQ_MAV_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_MAV_OPCODE_M \
((u64)0x3f << IRDMA_UDA_CQPSQ_MAV_OPCODE_S)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S 62
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK_S)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S 59
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MAV_IPV4VALID_S)
#define IRDMA_UDA_CQPSQ_MAV_AVIDX_S 0
#define IRDMA_UDA_CQPSQ_MAV_AVIDX_M \
((u64)0x1ffff << IRDMA_UDA_CQPSQ_MAV_AVIDX_S)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_S 60
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_M BIT_ULL(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG_S)
/* UDA multicast group */
#define IRDMA_UDA_MGCTX_VFFLAG_S 29
#define IRDMA_UDA_MGCTX_VFFLAG_M BIT_ULL(IRDMA_UDA_MGCTX_VFFLAG_S)
#define IRDMA_UDA_MGCTX_DESTPORT_S 32
#define IRDMA_UDA_MGCTX_DESTPORT_M ((u64)0xffff << IRDMA_UDA_MGCTX_DESTPORT_S)
#define IRDMA_UDA_MGCTX_VFID_S 22
#define IRDMA_UDA_MGCTX_VFID_M ((u64)0x7f << IRDMA_UDA_MGCTX_VFID_S)
#define IRDMA_UDA_MGCTX_VALIDENT_S 31
#define IRDMA_UDA_MGCTX_VALIDENT_M BIT_ULL(IRDMA_UDA_MGCTX_VALIDENT_S)
#define IRDMA_UDA_MGCTX_PFID_S 18
#define IRDMA_UDA_MGCTX_PFID_M ((u64)0xf << IRDMA_UDA_MGCTX_PFID_S)
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_S 30
#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_M \
BIT_ULL(IRDMA_UDA_MGCTX_FLAGIGNOREDPORT_S)
#define IRDMA_UDA_MGCTX_QPID_S 0
#define IRDMA_UDA_MGCTX_QPID_M ((u64)0x3ffff << IRDMA_UDA_MGCTX_QPID_S)
/* multicast group create CQP command */
#define IRDMA_UDA_CQPSQ_MG_WQEVALID_S 63
#define IRDMA_UDA_CQPSQ_MG_WQEVALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_MG_WQEVALID_S)
#define IRDMA_UDA_CQPSQ_MG_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_MG_OPCODE_M ((u64)0x3f << IRDMA_UDA_CQPSQ_MG_OPCODE_S)
#define IRDMA_UDA_CQPSQ_MG_MGIDX_S 0
#define IRDMA_UDA_CQPSQ_MG_MGIDX_M ((u64)0x1fff << IRDMA_UDA_CQPSQ_MG_MGIDX_S)
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID_S 60
#define IRDMA_UDA_CQPSQ_MG_IPV4VALID_M BIT_ULL(IRDMA_UDA_CQPSQ_MG_IPV4VALID_S)
#define IRDMA_UDA_CQPSQ_MG_VLANVALID_S 59
#define IRDMA_UDA_CQPSQ_MG_VLANVALID_M BIT_ULL(IRDMA_UDA_CQPSQ_MG_VLANVALID_S)
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_S 0
#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_M ((u64)0x3F << IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID_S)
#define IRDMA_UDA_CQPSQ_MG_VLANID_S 32
#define IRDMA_UDA_CQPSQ_MG_VLANID_M ((u64)0xFFF << IRDMA_UDA_CQPSQ_MG_VLANID_S)
#define IRDMA_UDA_CQPSQ_QS_HANDLE_S 0
#define IRDMA_UDA_CQPSQ_QS_HANDLE_M ((u64)0x3FF << IRDMA_UDA_CQPSQ_QS_HANDLE_S)
/* Quad hash table */
#define IRDMA_UDA_CQPSQ_QHASH_QPN_S 32
#define IRDMA_UDA_CQPSQ_QHASH_QPN_M \
((u64)0x3ffff << IRDMA_UDA_CQPSQ_QHASH_QPN_S)
#define IRDMA_UDA_CQPSQ_QHASH__S 0
#define IRDMA_UDA_CQPSQ_QHASH__M BIT_ULL(IRDMA_UDA_CQPSQ_QHASH__S)
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_S 16
#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_QHASH_SRC_PORT_S)
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_S 0
#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_M \
((u64)0xffff << IRDMA_UDA_CQPSQ_QHASH_DEST_PORT_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0_S 32
#define IRDMA_UDA_CQPSQ_QHASH_ADDR0_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR0_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1_S 0
#define IRDMA_UDA_CQPSQ_QHASH_ADDR1_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR1_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2_S 32
#define IRDMA_UDA_CQPSQ_QHASH_ADDR2_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR2_S)
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3_S 0
#define IRDMA_UDA_CQPSQ_QHASH_ADDR3_M \
((u64)0xffffffff << IRDMA_UDA_CQPSQ_QHASH_ADDR3_S)
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID_S 63
#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID_M \
BIT_ULL(IRDMA_UDA_CQPSQ_QHASH_WQEVALID_S)
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE_S 32
#define IRDMA_UDA_CQPSQ_QHASH_OPCODE_M \
((u64)0x3f << IRDMA_UDA_CQPSQ_QHASH_OPCODE_S)
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE_S 61
#define IRDMA_UDA_CQPSQ_QHASH_MANAGE_M \
((u64)0x3 << IRDMA_UDA_CQPSQ_QHASH_MANAGE_S)
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_S 60
#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_M \
((u64)0x1 << IRDMA_UDA_CQPSQ_QHASH_IPV4VALID_S)
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD_S 59
#define IRDMA_UDA_CQPSQ_QHASH_LANFWD_M \
((u64)0x1 << IRDMA_UDA_CQPSQ_QHASH_LANFWD_S)
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_S 42
#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_M \
((u64)0x7 << IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE_S)
#endif /* IRDMA_UDA_D_H */

1808
sys/dev/irdma/irdma_uk.c Normal file

File diff suppressed because it is too large Load Diff

477
sys/dev/irdma/irdma_user.h Normal file
View File

@ -0,0 +1,477 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_USER_H
#define IRDMA_USER_H
#define irdma_handle void *
#define irdma_adapter_handle irdma_handle
#define irdma_qp_handle irdma_handle
#define irdma_cq_handle irdma_handle
#define irdma_pd_id irdma_handle
#define irdma_stag_handle irdma_handle
#define irdma_stag_index u32
#define irdma_stag u32
#define irdma_stag_key u8
#define irdma_tagged_offset u64
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
#define IRDMA_ACCESS_FLAGS_ALL 0x3f
#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
#define IRDMA_OP_TYPE_RDMA_READ 0x01
#define IRDMA_OP_TYPE_SEND 0x03
#define IRDMA_OP_TYPE_SEND_INV 0x04
#define IRDMA_OP_TYPE_SEND_SOL 0x05
#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
#define IRDMA_OP_TYPE_BIND_MW 0x08
#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
#define IRDMA_OP_TYPE_INV_STAG 0x0a
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
#define IRDMA_OP_TYPE_NOP 0x0c
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
IRDMA_CQP_WQE_SIZE = 8,
IRDMA_CQE_SIZE = 4,
IRDMA_EXTENDED_CQE_SIZE = 8,
IRDMA_AEQE_SIZE = 2,
IRDMA_CEQE_SIZE = 1,
IRDMA_CQP_CTX_SIZE = 8,
IRDMA_SHADOW_AREA_SIZE = 8,
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_QUERY_FPM_BUF_SIZE = 176,
IRDMA_COMMIT_FPM_BUF_SIZE = 176,
IRDMA_MAX_IW_QP_ID = 262143,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
IRDMA_MIN_CQID = 0,
IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
IRDMA_MIN_CEQ_ENTRIES = 1,
IRDMA_MAX_CEQ_ENTRIES = 262143,
IRDMA_MIN_CQ_SIZE = 1,
IRDMA_MAX_CQ_SIZE = 1048575,
IRDMA_DB_ID_ZERO = 0,
/* 64K + 1 */
IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
/* 64K +1 */
IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
IRDMA_MAX_VF_FPM_ID = 47,
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
IRDMA_MAX_INLINE_DATA_SIZE = 101,
IRDMA_MAX_WQ_ENTRIES = 32768,
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
};
enum irdma_addressing_type {
IRDMA_ADDR_TYPE_ZERO_BASED = 0,
IRDMA_ADDR_TYPE_VA_BASED = 1,
};
enum irdma_flush_opcode {
FLUSH_INVALID = 0,
FLUSH_GENERAL_ERR,
FLUSH_PROT_ERR,
FLUSH_REM_ACCESS_ERR,
FLUSH_LOC_QP_OP_ERR,
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
FLUSH_RETRY_EXC_ERR,
};
enum irdma_cmpl_status {
IRDMA_COMPL_STATUS_SUCCESS = 0,
IRDMA_COMPL_STATUS_FLUSHED,
IRDMA_COMPL_STATUS_INVALID_WQE,
IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
IRDMA_COMPL_STATUS_INVALID_STAG,
IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
IRDMA_COMPL_STATUS_INVALID_PD_ID,
IRDMA_COMPL_STATUS_WRAP_ERROR,
IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
IRDMA_COMPL_STATUS_INVALID_FBO,
IRDMA_COMPL_STATUS_INVALID_LEN,
IRDMA_COMPL_STATUS_INVALID_ACCESS,
IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
IRDMA_COMPL_STATUS_INVALID_REGION,
IRDMA_COMPL_STATUS_INVALID_WINDOW,
IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
IRDMA_COMPL_STATUS_UNKNOWN,
};
enum irdma_cmpl_notify {
IRDMA_CQ_COMPL_EVENT = 0,
IRDMA_CQ_COMPL_SOLICITED = 1,
};
enum irdma_qp_caps {
IRDMA_WRITE_WITH_IMM = 1,
IRDMA_SEND_WITH_IMM = 2,
IRDMA_ROCE = 4,
IRDMA_PUSH_MODE = 8,
};
struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
struct irdma_sge {
irdma_tagged_offset tag_off;
u32 len;
irdma_stag stag;
};
struct irdma_ring {
volatile u32 head;
volatile u32 tail;
u32 size;
};
struct irdma_cqe {
__le64 buf[IRDMA_CQE_SIZE];
};
struct irdma_extended_cqe {
__le64 buf[IRDMA_EXTENDED_CQE_SIZE];
};
struct irdma_post_send {
irdma_sgl sg_list;
u32 num_sges;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_inline_send {
void *data;
u32 len;
u32 qkey;
u32 dest_qp;
u32 ah_id;
};
struct irdma_post_rq_info {
u64 wr_id;
irdma_sgl sg_list;
u32 num_sges;
};
struct irdma_rdma_write {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
struct irdma_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
struct irdma_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
struct irdma_sge rem_addr;
};
struct irdma_bind_window {
irdma_stag mr_stag;
u64 bind_len;
void *va;
enum irdma_addressing_type addressing_type;
bool ena_reads:1;
bool ena_writes:1;
irdma_stag mw_stag;
bool mem_window_type_1:1;
};
struct irdma_inv_local_stag {
irdma_stag target_stag;
};
struct irdma_post_sq_info {
u64 wr_id;
u8 op_type;
u8 l4len;
bool signaled:1;
bool read_fence:1;
bool local_fence:1;
bool inline_data:1;
bool imm_data_valid:1;
bool push_wqe:1;
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
u32 imm_data;
u32 stag_to_inv;
union {
struct irdma_post_send send;
struct irdma_rdma_write rdma_write;
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
struct irdma_inline_rdma_write inline_rdma_write;
struct irdma_post_inline_send inline_send;
} op;
};
struct irdma_cq_poll_info {
u64 wr_id;
irdma_qp_handle qp_handle;
u32 bytes_xfered;
u32 tcp_seq_num_rtt;
u32 qp_id;
u32 ud_src_qpn;
u32 imm_data;
irdma_stag inv_stag; /* or L_R_Key */
enum irdma_cmpl_status comp_status;
u16 major_err;
u16 minor_err;
u16 ud_vlan;
u8 ud_smac[6];
u8 op_type;
bool stag_invalid_set:1; /* or L_R_Key set */
bool push_dropped:1;
bool error:1;
bool solicited_event:1;
bool ipv4:1;
bool ud_vlan_valid:1;
bool ud_smac_valid:1;
bool imm_valid:1;
bool signaled:1;
};
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
bool post_sq);
int irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info);
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool inv_stag, bool post_sq);
int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq);
int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
};
int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
int irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
u16 quanta;
u8 signaled;
u8 reserved[1];
};
struct irdma_qp_quanta {
__le64 elem[IRDMA_WQE_SIZE];
};
struct irdma_qp_uk {
struct irdma_qp_quanta *sq_base;
struct irdma_qp_quanta *rq_base;
struct irdma_uk_attrs *uk_attrs;
u32 IOMEM *wqe_alloc_db;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
u64 *rq_wrid_array;
__le64 *shadow_area;
__le32 *push_db;
__le64 *push_wqe;
struct irdma_ring sq_ring;
struct irdma_ring sq_sig_ring;
struct irdma_ring rq_ring;
struct irdma_ring initial_ring;
u32 qp_id;
u32 qp_caps;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u32 last_rx_cmpl_idx;
u32 last_tx_cmpl_idx;
struct irdma_wqe_uk_ops wqe_ops;
u16 conn_wqes;
u8 qp_type;
u8 swqe_polarity;
u8 swqe_polarity_deferred;
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool deferred_flag:1;
bool push_mode:1; /* whether the last post wqe was pushed */
bool push_dropped:1;
bool first_sq_wq:1;
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
spinlock_t *lock;
bool force_fence;
u8 dbg_rq_flushed;
u16 ord_cnt;
u8 sq_flush_seen;
u8 rq_flush_seen;
u8 rd_fence_rate;
};
struct irdma_cq_uk {
struct irdma_cqe *cq_base;
u32 IOMEM *cqe_alloc_db;
u32 IOMEM *cq_ack_db;
__le64 *shadow_area;
u32 cq_id;
u32 cq_size;
struct irdma_ring cq_ring;
u8 polarity;
bool armed:1;
bool avoid_mem_cflct:1;
};
struct irdma_qp_uk_init_info {
struct irdma_qp_quanta *sq;
struct irdma_qp_quanta *rq;
struct irdma_uk_attrs *uk_attrs;
u32 IOMEM *wqe_alloc_db;
__le64 *shadow_area;
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
struct irdma_sig_wr_trk_info *sq_sigwrtrk_array;
u64 *rq_wrid_array;
u32 qp_id;
u32 qp_caps;
u32 sq_size;
u32 rq_size;
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u8 first_sq_wq;
u8 type;
u8 rd_fence_rate;
int abi_ver;
bool legacy_mode;
};
struct irdma_cq_uk_init_info {
u32 IOMEM *cqe_alloc_db;
u32 IOMEM *cq_ack_db;
struct irdma_cqe *cq_base;
__le64 *shadow_area;
u32 cq_size;
u32 cq_id;
bool avoid_mem_cflct;
};
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
int irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
int irdma_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth);
int irdma_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */

2325
sys/dev/irdma/irdma_utils.c Normal file

File diff suppressed because it is too large Load Diff

3364
sys/dev/irdma/irdma_verbs.c Normal file

File diff suppressed because it is too large Load Diff

313
sys/dev/irdma/irdma_verbs.h Normal file
View File

@ -0,0 +1,313 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_VERBS_H
#define IRDMA_VERBS_H
#define IRDMA_MAX_SAVED_PHY_PGADDR 4
#define IRDMA_FLUSH_DELAY_MS 1500
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
#define iwdev_to_idev(iwdev) (&(iwdev)->rf->sc_dev)
struct irdma_ucontext {
struct ib_ucontext ibucontext;
struct irdma_device *iwdev;
struct rdma_user_mmap_entry *db_mmap_entry;
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
/* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */
struct list_head vma_list;
struct mutex vma_list_mutex; /* protect the vma_list */
int abi_ver;
bool legacy_mode;
};
struct irdma_pd {
struct ib_pd ibpd;
struct irdma_sc_pd sc_pd;
};
struct irdma_av {
u8 macaddr[16];
struct ib_ah_attr attrs;
union {
struct sockaddr saddr;
struct sockaddr_in saddr_in;
struct sockaddr_in6 saddr_in6;
} sgid_addr, dgid_addr;
u8 net_type;
};
struct irdma_ah {
struct ib_ah ibah;
struct irdma_sc_ah sc_ah;
struct irdma_pd *pd;
struct irdma_av av;
u8 sgid_index;
union ib_gid dgid;
};
struct irdma_hmc_pble {
union {
u32 idx;
dma_addr_t addr;
};
};
struct irdma_cq_mr {
struct irdma_hmc_pble cq_pbl;
dma_addr_t shadow;
bool split;
};
struct irdma_qp_mr {
struct irdma_hmc_pble sq_pbl;
struct irdma_hmc_pble rq_pbl;
dma_addr_t shadow;
struct page *sq_page;
};
struct irdma_cq_buf {
struct irdma_dma_mem kmem_buf;
struct irdma_cq_uk cq_uk;
struct irdma_hw *hw;
struct list_head list;
struct work_struct work;
};
struct irdma_pbl {
struct list_head list;
union {
struct irdma_qp_mr qp_mr;
struct irdma_cq_mr cq_mr;
};
bool pbl_allocated:1;
bool on_list:1;
u64 user_base;
struct irdma_pble_alloc pble_alloc;
struct irdma_mr *iwmr;
};
struct irdma_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
};
struct ib_umem *region;
u16 type;
u32 page_cnt;
u64 page_size;
u64 page_msk;
u32 npages;
u32 stag;
u64 len;
u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
struct irdma_pbl iwpbl;
};
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
u16 cq_head;
u16 cq_size;
u16 cq_num;
bool user_mode;
bool armed;
enum irdma_cmpl_notify last_notify;
u32 polled_cmpls;
u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
struct completion free_cq;
atomic_t refcnt;
spinlock_t lock; /* for poll cq */
struct irdma_pbl *iwpbl;
struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
struct list_head cmpl_generated;
};
struct irdma_cmpl_gen {
struct list_head list;
struct irdma_cq_poll_info cpi;
};
struct disconn_work {
struct work_struct work;
struct irdma_qp *iwqp;
};
struct iw_cm_id;
struct irdma_qp_kmode {
struct irdma_dma_mem dma_mem;
u32 *sig_trk_mem;
struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
u64 *rq_wrid_mem;
};
struct irdma_qp {
struct ib_qp ibqp;
struct irdma_sc_qp sc_qp;
struct irdma_device *iwdev;
struct irdma_cq *iwscq;
struct irdma_cq *iwrcq;
struct irdma_pd *iwpd;
struct rdma_user_mmap_entry *push_wqe_mmap_entry;
struct rdma_user_mmap_entry *push_db_mmap_entry;
struct irdma_qp_host_ctx_info ctx_info;
union {
struct irdma_iwarp_offload_info iwarp_info;
struct irdma_roce_offload_info roce_info;
};
union {
struct irdma_tcp_offload_info tcp_info;
struct irdma_udp_offload_info udp_info;
};
struct irdma_ah roce_ah;
struct list_head teardown_entry;
atomic_t refcnt;
struct iw_cm_id *cm_id;
struct irdma_cm_node *cm_node;
struct delayed_work dwork_flush;
struct ib_mr *lsmm_mr;
atomic_t hw_mod_qp_pend;
enum ib_qp_state ibqp_state;
u32 qp_mem_size;
u32 last_aeq;
int max_send_wr;
int max_recv_wr;
atomic_t close_timer_started;
spinlock_t lock; /* serialize posting WRs to SQ/RQ */
struct irdma_qp_context *iwqp_context;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
u8 active_conn : 1;
u8 user_mode : 1;
u8 hte_added : 1;
u8 flush_issued : 1;
u8 sig_all : 1;
u8 pau_mode : 1;
u8 rsvd : 1;
u8 iwarp_state;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
u8 hw_iwarp_state;
u8 hw_tcp_state;
struct irdma_qp_kmode kqp;
struct irdma_dma_mem host_ctx;
struct timer_list terminate_timer;
struct irdma_pbl *iwpbl;
struct irdma_sge *sg_list;
struct irdma_dma_mem q2_ctx_mem;
struct irdma_dma_mem ietf_mem;
struct completion free_qp;
wait_queue_head_t waitq;
wait_queue_head_t mod_qp_waitq;
u8 rts_ae_rcvd;
};
enum irdma_mmap_flag {
IRDMA_MMAP_IO_NC,
IRDMA_MMAP_IO_WC,
};
struct irdma_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
u64 bar_offset;
u8 mmap_flag;
};
static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
{
return (u16)RS_64(dev->feature_info[IRDMA_FEATURE_FW_INFO], IRDMA_FW_VER_MAJOR);
}
static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
{
return (u16)RS_64(dev->feature_info[IRDMA_FEATURE_FW_INFO], IRDMA_FW_VER_MINOR);
}
/**
* irdma_mcast_mac_v4 - Get the multicast MAC for an IP address
* @ip_addr: IPv4 address
* @mac: pointer to result MAC address
*
*/
static inline void irdma_mcast_mac_v4(u32 *ip_addr, u8 *mac)
{
u8 *ip = (u8 *)ip_addr;
unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, ip[2] & 0x7F, ip[1],
ip[0]};
ether_addr_copy(mac, mac4);
}
/**
* irdma_mcast_mac_v6 - Get the multicast MAC for an IP address
* @ip_addr: IPv6 address
* @mac: pointer to result MAC address
*
*/
static inline void irdma_mcast_mac_v6(u32 *ip_addr, u8 *mac)
{
u8 *ip = (u8 *)ip_addr;
unsigned char mac6[ETH_ALEN] = {0x33, 0x33, ip[3], ip[2], ip[1], ip[0]};
ether_addr_copy(mac, mac6);
}
struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_dealloc_device(struct ib_device *ibdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
void irdma_generate_flush_completions(struct irdma_qp *iwqp);
void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
void irdma_flush_worker(struct work_struct *work);
#endif /* IRDMA_VERBS_H */

447
sys/dev/irdma/irdma_ws.c Normal file
View File

@ -0,0 +1,447 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2017 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_hmc.h"
#include "irdma_defs.h"
#include "irdma_type.h"
#include "irdma_protos.h"
#include "irdma_ws.h"
/**
* irdma_alloc_node - Allocate a WS node and init
* @vsi: vsi pointer
* @user_pri: user priority
* @node_type: Type of node, leaf or parent
* @parent: parent node pointer
*/
static struct irdma_ws_node *
irdma_alloc_node(struct irdma_sc_vsi *vsi,
u8 user_pri,
enum irdma_ws_node_type node_type,
struct irdma_ws_node *parent)
{
struct irdma_virt_mem ws_mem;
struct irdma_ws_node *node;
u16 node_index = 0;
ws_mem.size = sizeof(struct irdma_ws_node);
ws_mem.va = kzalloc(ws_mem.size, GFP_ATOMIC);
if (!ws_mem.va)
return NULL;
if (parent) {
node_index = irdma_alloc_ws_node_id(vsi->dev);
if (node_index == IRDMA_WS_NODE_INVALID) {
kfree(ws_mem.va);
return NULL;
}
}
node = ws_mem.va;
node->index = node_index;
node->vsi_index = vsi->vsi_idx;
INIT_LIST_HEAD(&node->child_list_head);
if (node_type == WS_NODE_TYPE_LEAF) {
node->type_leaf = true;
node->traffic_class = vsi->qos[user_pri].traffic_class;
node->user_pri = user_pri;
node->rel_bw = vsi->qos[user_pri].rel_bw;
if (!node->rel_bw)
node->rel_bw = 1;
node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
} else {
node->rel_bw = 1;
node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
node->enable = true;
}
node->parent = parent;
return node;
}
/**
* irdma_free_node - Free a WS node
* @vsi: VSI stricture of device
* @node: Pointer to node to free
*/
static void
irdma_free_node(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *node)
{
struct irdma_virt_mem ws_mem;
if (node->index)
irdma_free_ws_node_id(vsi->dev, node->index);
ws_mem.va = node;
ws_mem.size = sizeof(struct irdma_ws_node);
kfree(ws_mem.va);
}
/**
* irdma_ws_cqp_cmd - Post CQP work scheduler node cmd
* @vsi: vsi pointer
* @node: pointer to node
* @cmd: add, remove or modify
*/
static int
irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *node, u8 cmd)
{
struct irdma_ws_node_info node_info = {0};
node_info.id = node->index;
node_info.vsi = node->vsi_index;
if (node->parent)
node_info.parent_id = node->parent->index;
else
node_info.parent_id = node_info.id;
node_info.weight = node->rel_bw;
node_info.tc = node->traffic_class;
node_info.prio_type = node->prio_type;
node_info.type_leaf = node->type_leaf;
node_info.enable = node->enable;
if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
irdma_debug(vsi->dev, IRDMA_DEBUG_WS, "CQP WS CMD failed\n");
return -ENOMEM;
}
if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
node->qs_handle = node_info.qs_handle;
vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
}
return 0;
}
/**
* ws_find_node - Find SC WS node based on VSI id or TC
* @parent: parent node of First VSI or TC node
* @match_val: value to match
* @type: match type VSI/TC
*/
static struct irdma_ws_node *
ws_find_node(struct irdma_ws_node *parent,
u16 match_val,
enum irdma_ws_match_type type)
{
struct irdma_ws_node *node;
switch (type) {
case WS_MATCH_TYPE_VSI:
list_for_each_entry(node, &parent->child_list_head, siblings) {
if (node->vsi_index == match_val)
return node;
}
break;
case WS_MATCH_TYPE_TC:
list_for_each_entry(node, &parent->child_list_head, siblings) {
if (node->traffic_class == match_val)
return node;
}
break;
default:
break;
}
return NULL;
}
/**
* irdma_ws_in_use - Checks to see if a leaf node is in use
* @vsi: vsi pointer
* @user_pri: user priority
*/
static bool
irdma_ws_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
{
int i;
mutex_lock(&vsi->qos[user_pri].qos_mutex);
if (!list_empty(&vsi->qos[user_pri].qplist)) {
mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return true;
}
/*
* Check if the qs handle associated with the given user priority is in use by any other user priority. If so,
* nothing left to do
*/
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].qs_handle == vsi->qos[user_pri].qs_handle &&
!list_empty(&vsi->qos[i].qplist)) {
mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return true;
}
}
mutex_unlock(&vsi->qos[user_pri].qos_mutex);
return false;
}
/**
* irdma_remove_leaf - Remove leaf node unconditionally
* @vsi: vsi pointer
* @user_pri: user priority
*/
static void
irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
{
struct irdma_ws_node *ws_tree_root, *vsi_node, *tc_node;
u16 qs_handle;
int i;
qs_handle = vsi->qos[user_pri].qs_handle;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
if (vsi->qos[i].qs_handle == qs_handle)
vsi->qos[i].valid = false;
ws_tree_root = vsi->dev->ws_tree_root;
if (!ws_tree_root)
return;
vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
WS_MATCH_TYPE_VSI);
if (!vsi_node)
return;
tc_node = ws_find_node(vsi_node,
vsi->qos[user_pri].traffic_class,
WS_MATCH_TYPE_TC);
if (!tc_node)
return;
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
vsi->unregister_qset(vsi, tc_node);
list_del(&tc_node->siblings);
irdma_free_node(vsi, tc_node);
/* Check if VSI node can be freed */
if (list_empty(&vsi_node->child_list_head)) {
irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE);
list_del(&vsi_node->siblings);
irdma_free_node(vsi, vsi_node);
/* Free head node there are no remaining VSI nodes */
if (list_empty(&ws_tree_root->child_list_head)) {
irdma_ws_cqp_cmd(vsi, ws_tree_root,
IRDMA_OP_WS_DELETE_NODE);
irdma_free_node(vsi, ws_tree_root);
vsi->dev->ws_tree_root = NULL;
}
}
}
/**
* irdma_ws_add - Build work scheduler tree, set RDMA qs_handle
* @vsi: vsi pointer
* @user_pri: user priority
*/
int
irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
struct irdma_ws_node *ws_tree_root;
struct irdma_ws_node *vsi_node;
struct irdma_ws_node *tc_node;
u16 traffic_class;
int ret = 0;
int i;
mutex_lock(&vsi->dev->ws_mutex);
if (vsi->tc_change_pending) {
ret = -EBUSY;
goto exit;
}
if (vsi->qos[user_pri].valid)
goto exit;
ws_tree_root = vsi->dev->ws_tree_root;
if (!ws_tree_root) {
irdma_debug(vsi->dev, IRDMA_DEBUG_WS, "Creating root node\n");
ws_tree_root = irdma_alloc_node(vsi, user_pri,
WS_NODE_TYPE_PARENT, NULL);
if (!ws_tree_root) {
ret = -ENOMEM;
goto exit;
}
ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
if (ret) {
irdma_free_node(vsi, ws_tree_root);
goto exit;
}
vsi->dev->ws_tree_root = ws_tree_root;
}
/* Find a second tier node that matches the VSI */
vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
WS_MATCH_TYPE_VSI);
/* If VSI node doesn't exist, add one */
if (!vsi_node) {
irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
"Node not found matching VSI %d\n", vsi->vsi_idx);
vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
ws_tree_root);
if (!vsi_node) {
ret = -ENOMEM;
goto vsi_add_err;
}
ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE);
if (ret) {
irdma_free_node(vsi, vsi_node);
goto vsi_add_err;
}
list_add(&vsi_node->siblings, &ws_tree_root->child_list_head);
}
irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
"Using node %d which represents VSI %d\n", vsi_node->index,
vsi->vsi_idx);
traffic_class = vsi->qos[user_pri].traffic_class;
tc_node = ws_find_node(vsi_node, traffic_class,
WS_MATCH_TYPE_TC);
if (!tc_node) {
/* Add leaf node */
irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
"Node not found matching VSI %d and TC %d\n",
vsi->vsi_idx, traffic_class);
tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
vsi_node);
if (!tc_node) {
ret = -ENOMEM;
goto leaf_add_err;
}
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE);
if (ret) {
irdma_free_node(vsi, tc_node);
goto leaf_add_err;
}
list_add(&tc_node->siblings, &vsi_node->child_list_head);
/*
* callback to LAN to update the LAN tree with our node
*/
ret = vsi->register_qset(vsi, tc_node);
if (ret)
goto reg_err;
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
if (ret) {
vsi->unregister_qset(vsi, tc_node);
goto reg_err;
}
}
irdma_debug(vsi->dev, IRDMA_DEBUG_WS,
"Using node %d which represents VSI %d TC %d\n",
tc_node->index, vsi->vsi_idx, traffic_class);
/*
* Iterate through other UPs and update the QS handle if they have a matching traffic class.
*/
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->qos[i].traffic_class == traffic_class) {
vsi->qos[i].qs_handle = tc_node->qs_handle;
vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle;
vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
vsi->qos[i].valid = true;
}
}
goto exit;
reg_err:
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
list_del(&tc_node->siblings);
irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
goto exit;
list_del(&vsi_node->siblings);
irdma_free_node(vsi, vsi_node);
}
vsi_add_err:
/* Free head node there are no remaining VSI nodes */
if (list_empty(&ws_tree_root->child_list_head)) {
irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE);
vsi->dev->ws_tree_root = NULL;
irdma_free_node(vsi, ws_tree_root);
}
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
}
/**
* irdma_ws_remove - Free WS scheduler node, update WS tree
* @vsi: vsi pointer
* @user_pri: user priority
*/
void
irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
{
mutex_lock(&vsi->dev->ws_mutex);
if (irdma_ws_in_use(vsi, user_pri))
goto exit;
irdma_remove_leaf(vsi, user_pri);
exit:
mutex_unlock(&vsi->dev->ws_mutex);
}
/**
* irdma_ws_reset - Reset entire WS tree
* @vsi: vsi pointer
*/
void
irdma_ws_reset(struct irdma_sc_vsi *vsi)
{
u8 i;
mutex_lock(&vsi->dev->ws_mutex);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; ++i)
irdma_remove_leaf(vsi, i);
mutex_unlock(&vsi->dev->ws_mutex);
}

74
sys/dev/irdma/irdma_ws.h Normal file
View File

@ -0,0 +1,74 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2015 - 2021 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef IRDMA_WS_H
#define IRDMA_WS_H
#include "osdep.h"
enum irdma_ws_node_type {
WS_NODE_TYPE_PARENT,
WS_NODE_TYPE_LEAF,
};
enum irdma_ws_match_type {
WS_MATCH_TYPE_VSI,
WS_MATCH_TYPE_TC,
};
struct irdma_ws_node {
struct list_head siblings;
struct list_head child_list_head;
struct irdma_ws_node *parent;
u64 lan_qs_handle; /* opaque handle used by LAN */
u32 l2_sched_node_id;
u16 index;
u16 qs_handle;
u16 vsi_index;
u8 traffic_class;
u8 user_pri;
u8 rel_bw;
u8 abstraction_layer; /* used for splitting a TC */
u8 prio_type;
bool type_leaf:1;
bool enable:1;
};
struct irdma_sc_vsi;
int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_reset(struct irdma_sc_vsi *vsi);
#endif /* IRDMA_WS_H */

247
sys/dev/irdma/osdep.h Normal file
View File

@ -0,0 +1,247 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
* Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenFabrics.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*$FreeBSD$*/
#ifndef _ICRDMA_OSDEP_H_
#define _ICRDMA_OSDEP_H_
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/jhash.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <sys/bus.h>
#include <machine/bus.h>
#define ATOMIC atomic_t
#define IOMEM
#define IRDMA_NTOHS(a) ntohs(a)
#define MAKEMASK(m, s) ((m) << (s))
#define OS_TIMER timer_list
#define DECLARE_HASHTABLE(n, b) struct hlist_head (n)[1 << (b)]
#define HASH_MIN(v, b) (sizeof(v) <= 4 ? hash_32(v, b) : hash_long(v, b))
#define HASH_FOR_EACH_RCU(n, b, o, m) for ((b) = 0, o = NULL; o == NULL && (b) < ARRAY_SIZE(n);\
(b)++)\
hlist_for_each_entry_rcu(o, &n[(b)], m)
#define HASH_FOR_EACH_POSSIBLE_RCU(n, o, m, k) \
hlist_for_each_entry_rcu(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
m)
#define HASH_FOR_EACH_POSSIBLE(n, o, m, k) \
hlist_for_each_entry(o, &n[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(n)))],\
m)
#define HASH_ADD_RCU(h, n, k) \
hlist_add_head_rcu(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
#define HASH_DEL_RCU(tbl, node) hlist_del_rcu(node)
#define HASH_ADD(h, n, k) \
hlist_add_head(n, &h[jhash(&k, sizeof(k), 0) >> (32 - ilog2(ARRAY_SIZE(h)))])
#define HASH_DEL(tbl, node) hlist_del(node)
#define WQ_UNBOUND_MAX_ACTIVE max_t(int, 512, num_possible_cpus() * 4)
#define if_addr_rlock(x)
#define if_addr_runlock(x)
/* constants */
#define STATS_TIMER_DELAY 60000
/* a couple of linux size defines */
#define SZ_128 128
#define SZ_2K SZ_128 * 16
#define SZ_1G (SZ_1K * SZ_1K * SZ_1K)
#define SPEED_1000 1000
#define SPEED_10000 10000
#define SPEED_20000 20000
#define SPEED_25000 25000
#define SPEED_40000 40000
#define SPEED_100000 100000
#define irdma_mb() mb()
#define irdma_wmb() wmb()
#define irdma_get_virt_to_phy vtophys
#define __aligned_u64 uint64_t __aligned(8)
#define VLAN_PRIO_SHIFT 13
/*
* debug definition section
*/
#define irdma_print(S, ...) printf("%s:%d "S, __FUNCTION__, __LINE__, ##__VA_ARGS__)
#define irdma_debug_buf(dev, mask, desc, buf, size) \
do { \
u32 i; \
if (!((mask) & (dev)->debug_mask)) { \
break; \
} \
irdma_debug(dev, mask, "%s\n", desc); \
irdma_debug(dev, mask, "starting address virt=%p phy=%lxh\n", buf, irdma_get_virt_to_phy(buf)); \
for (i = 0; i < size ; i += 8) \
irdma_debug(dev, mask, "index %03d val: %016lx\n", i, ((unsigned long *)buf)[i / 8]); \
} while(0)
#define irdma_debug(h, m, s, ...) \
do { \
if (!(h)) { \
if ((m) == IRDMA_DEBUG_INIT) \
printf("irdma INIT " s, ##__VA_ARGS__); \
} else if (((m) & (h)->debug_mask)) { \
printf("irdma " s, ##__VA_ARGS__); \
} \
} while (0)
#define irdma_dev_err(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_dev_warn(a, b, ...) printf(b, ##__VA_ARGS__) /*dev_warn(a, b)*/
#define irdma_dev_info(a, b, ...) printf(b, ##__VA_ARGS__)
#define irdma_pr_warn printf
#define ibdev_err(ibdev, fmt, ...) irdma_dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__)
#define dump_struct(s, sz, name) \
do { \
unsigned char *a; \
printf("%s %u", (name), (unsigned int)(sz)); \
for (a = (unsigned char*)(s); a < (unsigned char *)(s) + (sz) ; a ++) { \
if ((u64)a % 8 == 0) \
printf("\n%p ", a); \
printf("%2x ", *a); \
} \
printf("\n"); \
}while(0)
/*
* debug definition end
*/
typedef __be16 BE16;
typedef __be32 BE32;
typedef uintptr_t irdma_uintptr;
struct irdma_hw;
struct irdma_pci_f;
struct irdma_sc_dev;
struct irdma_sc_qp;
struct irdma_sc_vsi;
struct irdma_task_arg {
struct irdma_device *iwdev;
struct ice_rdma_peer *peer;
atomic_t open_ongoing;
atomic_t close_ongoing;
};
struct irdma_dev_ctx {
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
void *dev;
struct irdma_task_arg task_arg;
};
#define irdma_pr_info(fmt, args ...) printf("%s: WARN "fmt, __func__, ## args)
#define irdma_pr_err(fmt, args ...) printf("%s: ERR "fmt, __func__, ## args)
#define irdma_memcpy(a, b, c) memcpy((a), (b), (c))
#define irdma_memset(a, b, c) memset((a), (b), (c))
#define irdma_usec_delay(x) DELAY(x)
#define mdelay(x) DELAY((x) * 1000)
#define rt_tos2priority(tos) (((tos >> 1) & 0x8 >> 1) | ((tos >> 2) ^ ((tos >> 3) << 1)))
#define ah_attr_to_dmac(attr) ((attr).dmac)
#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \
ib_gid_to_network_type(gid_type, gid)
#define irdma_del_timer_compat(tt) del_timer((tt))
#define IRDMA_TAILQ_FOREACH CK_STAILQ_FOREACH
#define IRDMA_TAILQ_FOREACH_SAFE CK_STAILQ_FOREACH_SAFE
#define between(a, b, c) (bool)(c-a >= b-a)
#define rd32(a, reg) irdma_rd32((a)->dev_context, (reg))
#define wr32(a, reg, value) irdma_wr32((a)->dev_context, (reg), (value))
#define rd64(a, reg) irdma_rd64((a)->dev_context, (reg))
#define wr64(a, reg, value) irdma_wr64((a)->dev_context, (reg), (value))
#define db_wr32(value, a) writel((value), (a))
void *hw_to_dev(struct irdma_hw *hw);
struct irdma_dma_mem {
void *va;
u64 pa;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_size_t size;
int nseg;
int flags;
};
struct irdma_virt_mem {
void *va;
u32 size;
};
struct irdma_dma_info {
dma_addr_t *dmaaddrs;
};
struct list_head;
u32 irdma_rd32(struct irdma_dev_ctx *dev_ctx, u32 reg);
void irdma_wr32(struct irdma_dev_ctx *dev_ctx, u32 reg, u32 value);
u64 irdma_rd64(struct irdma_dev_ctx *dev_ctx, u32 reg);
void irdma_wr64(struct irdma_dev_ctx *dev_ctx, u32 reg, u64 value);
void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred);
void irdma_terminate_start_timer(struct irdma_sc_qp *qp);
void irdma_terminate_del_timer(struct irdma_sc_qp *qp);
void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi);
void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
u8* irdma_get_hw_addr(void *par);
void irdma_unmap_vm_page_list(struct irdma_hw *hw, u64 *pg_arr, u32 pg_cnt);
int irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
u64 *pg_arr, u32 pg_cnt);
struct ib_device *irdma_get_ibdev(struct irdma_sc_dev *dev);
#endif /* _ICRDMA_OSDEP_H_ */

View File

@ -145,6 +145,7 @@ SUBDIR= \
${_ichwd} \
${_ice} \
${_ice_ddp} \
${_irdma} \
${_ida} \
if_bridge \
${_if_cgem} \
@ -575,6 +576,9 @@ _ice= ice
.if ${MK_SOURCELESS_UCODE} != "no"
_ice_ddp= ice_ddp
.endif
.if ${MK_OFED} != "no" || defined(ALL_MODULES)
_irdma= irdma
.endif
.endif
.if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "arm" || \

View File

@ -0,0 +1,23 @@
.include <bsd.own.mk>
OFED_INC_DIR = ${.CURDIR}/../../ofed/include
ICE_DIR = ${.CURDIR}/../../dev/ice
.PATH: ${.CURDIR}/../../dev/irdma
KMOD= irdma
SRCS= icrdma.c
SRCS+= fbsd_kcompat.c irdma_hw.c irdma_verbs.c irdma_utils.c
SRCS+= irdma_cm.c irdma_kcompat.c
SRCS+= irdma_if.h irdma_di_if.h ice_rdma.h vnode_if.h device_if.h bus_if.h opt_inet.h pci_if.h opt_inet6.h
# Shared source
SRCS+= irdma_ctrl.c irdma_hmc.c icrdma_hw.c irdma_pble.c irdma_puda.c irdma_uda.c irdma_uk.c irdma_ws.c
CFLAGS+= -I${ICE_DIR} -I${OFED_INC_DIR}
CFLAGS+= -I${OFED_INC_DIR}/uapi
CFLAGS+= -I${.CURDIR}/../../compat/linuxkpi/common/include
CFLAGS+= -DCONFIG_INFINIBAND_USER_MEM
ice_rdma.h:
cp $(ICE_DIR)/ice_rdma.h .
.include <bsd.kmod.mk>

View File

@ -5,6 +5,6 @@ _spath=${SRCTOP}/contrib/ofed/libibverbs
BINDIR?= /usr/bin
CFLAGS+= -I${_spath}
LIBADD+= ibverbs mlx4 mlx5 cxgb4 pthread
LIBADD+= ibverbs mlx4 mlx5 cxgb4 irdma pthread
WARNS?= 2

View File

@ -5,6 +5,6 @@ _spath=${SRCTOP}/contrib/ofed/librdmacm
BINDIR?= /usr/bin
CFLAGS+= -I${SRCTOP}/contrib/ofed
LIBADD+= ibverbs rdmacm mlx4 mlx5 cxgb4 pthread
LIBADD+= ibverbs rdmacm irdma mlx4 mlx5 cxgb4 pthread
WARNS?= 0