net/mlx5/hws: add command layer

This adds the command layer which is used to communicate with
the FW, to query capabilities and allocate FW resources needed
for HWS.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
This commit is contained in:
Erez Shitrit 2022-10-20 18:57:38 +03:00 committed by Raslan Darawsheh
parent dfceca7e7e
commit 365cdf5f8c
3 changed files with 1775 additions and 10 deletions

View File

@ -289,6 +289,8 @@
/* The alignment needed for CQ buffer. */
#define MLX5_CQE_BUF_ALIGNMENT rte_mem_page_size()
#define MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
/* Completion mode. */
enum mlx5_completion_mode {
MLX5_COMP_ONLY_ERR = 0x0,
@ -677,6 +679,10 @@ enum {
MLX5_MODIFICATION_TYPE_SET = 0x1,
MLX5_MODIFICATION_TYPE_ADD = 0x2,
MLX5_MODIFICATION_TYPE_COPY = 0x3,
MLX5_MODIFICATION_TYPE_INSERT = 0x4,
MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
MLX5_MODIFICATION_TYPE_NOP = 0x6,
MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
};
/* The field of packet to be modified. */
@ -1111,6 +1117,10 @@ enum {
MLX5_CMD_OP_QUERY_TIS = 0x915,
MLX5_CMD_OP_CREATE_RQT = 0x916,
MLX5_CMD_OP_MODIFY_RQT = 0x917,
MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@ -1299,6 +1309,7 @@ enum {
MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION = 0x13 << 1,
MLX5_GET_HCA_CAP_OP_MOD_CRYPTO = 0x1A << 1,
MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP = 0x1C << 1,
MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
};
@ -1317,6 +1328,14 @@ enum {
(1ULL << MLX5_GENERAL_OBJ_TYPE_GENEVE_TLV_OPT)
#define MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD \
(1ULL << MLX5_GENERAL_OBJ_TYPE_CONN_TRACK_OFFLOAD)
#define MLX5_GENERAL_OBJ_TYPES_CAP_RTC \
(1ULL << MLX5_GENERAL_OBJ_TYPE_RTC)
#define MLX5_GENERAL_OBJ_TYPES_CAP_STC \
(1ULL << MLX5_GENERAL_OBJ_TYPE_STC)
#define MLX5_GENERAL_OBJ_TYPES_CAP_STE \
(1ULL << MLX5_GENERAL_OBJ_TYPE_STE)
#define MLX5_GENERAL_OBJ_TYPES_CAP_DEFINER \
(1ULL << MLX5_GENERAL_OBJ_TYPE_DEFINER)
#define MLX5_GENERAL_OBJ_TYPES_CAP_DEK \
(1ULL << MLX5_GENERAL_OBJ_TYPE_DEK)
#define MLX5_GENERAL_OBJ_TYPES_CAP_IMPORT_KEK \
@ -1373,6 +1392,11 @@ enum {
#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)
#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)
#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)
#define MLX5_HCA_FLEX_GTPU_ENABLED (1UL << 11)
#define MLX5_HCA_FLEX_GTPU_DW_2_ENABLED (1UL << 16)
#define MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED (1UL << 17)
#define MLX5_HCA_FLEX_GTPU_DW_0_ENABLED (1UL << 18)
#define MLX5_HCA_FLEX_GTPU_TEID_ENABLED (1UL << 19)
/* The device steering logic format. */
#define MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 0x0
@ -1505,7 +1529,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 wol_u[0x1];
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
u8 reserved_at_1f0[0xc];
u8 reserved_at_1ef[0xb];
u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
u8 striding_rq[0x1];
@ -1681,7 +1706,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cqe_compression[0x1];
u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10];
u8 reserved_at_5e0[0x10];
u8 reserved_at_5e0[0x8];
u8 flex_parser_id_gtpu_dw_0[0x4];
u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1];
@ -1691,17 +1718,38 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 affiliate_nic_vport_criteria[0x8];
u8 native_port_num[0x8];
u8 num_vhca_ports[0x8];
u8 reserved_at_618[0x6];
u8 flex_parser_id_gtpu_teid[0x4];
u8 reserved_at_61c[0x2];
u8 sw_owner_id[0x1];
u8 reserved_at_61f[0x6C];
u8 wait_on_data[0x1];
u8 wait_on_time[0x1];
u8 reserved_at_68d[0xBB];
u8 reserved_at_68d[0x37];
u8 flex_parser_id_geneve_opt_0[0x4];
u8 flex_parser_id_icmp_dw1[0x4];
u8 flex_parser_id_icmp_dw0[0x4];
u8 flex_parser_id_icmpv6_dw1[0x4];
u8 flex_parser_id_icmpv6_dw0[0x4];
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
u8 reserved_at_6e0[0x20];
u8 flex_parser_id_gtpu_dw_2[0x4];
u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
u8 reserved_at_708[0x40];
u8 dma_mmo_qp[0x1];
u8 regexp_mmo_qp[0x1];
u8 compress_mmo_qp[0x1];
u8 decompress_mmo_qp[0x1];
u8 reserved_at_624[0xd4];
u8 reserved_at_74c[0x14];
u8 reserved_at_760[0x3];
u8 log_max_num_header_modify_argument[0x5];
u8 log_header_modify_argument_granularity_offset[0x4];
u8 log_header_modify_argument_granularity[0x4];
u8 reserved_at_770[0x3];
u8 log_header_modify_argument_max_alloc[0x5];
u8 reserved_at_778[0x8];
u8 reserved_at_780[0x40];
u8 match_definer_format_supported[0x40];
};
struct mlx5_ifc_qos_cap_bits {
@ -1876,7 +1924,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 log_max_ft_sampler_num[8];
u8 metadata_reg_b_width[0x8];
u8 metadata_reg_a_width[0x8];
u8 reserved_at_60[0x18];
u8 reserved_at_60[0xa];
u8 reparse[0x1];
u8 reserved_at_6b[0xd];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
u8 log_max_flow_counter[0x8];
@ -2061,7 +2111,17 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 hairpin_sq_wqe_bb_size[0x5];
u8 hairpin_sq_wq_in_host_mem[0x1];
u8 hairpin_data_buffer_locked[0x1];
u8 reserved_at_16a[0x696];
u8 reserved_at_16a[0x36];
u8 reserved_at_1a0[0xb];
u8 format_select_dw_8_6_ext[0x1];
u8 reserved_at_1ac[0x14];
u8 general_obj_types_127_64[0x40];
u8 reserved_at_200[0x80];
u8 format_select_dw_gtpu_dw_0[0x8];
u8 format_select_dw_gtpu_dw_1[0x8];
u8 format_select_dw_gtpu_dw_2[0x8];
u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
u8 reserved_at_2a0[0x560];
};
struct mlx5_ifc_esw_cap_bits {
@ -2074,6 +2134,37 @@ struct mlx5_ifc_esw_cap_bits {
u8 reserved_at_80[0x780];
};
struct mlx5_ifc_wqe_based_flow_table_cap_bits {
u8 reserved_at_0[0x3];
u8 log_max_num_ste[0x5];
u8 reserved_at_8[0x3];
u8 log_max_num_stc[0x5];
u8 reserved_at_10[0x3];
u8 log_max_num_rtc[0x5];
u8 reserved_at_18[0x3];
u8 log_max_num_header_modify_pattern[0x5];
u8 reserved_at_20[0x3];
u8 stc_alloc_log_granularity[0x5];
u8 reserved_at_28[0x3];
u8 stc_alloc_log_max[0x5];
u8 reserved_at_30[0x3];
u8 ste_alloc_log_granularity[0x5];
u8 reserved_at_38[0x3];
u8 ste_alloc_log_max[0x5];
u8 reserved_at_40[0xb];
u8 rtc_reparse_mode[0x5];
u8 reserved_at_50[0x3];
u8 rtc_index_mode[0x5];
u8 reserved_at_58[0x3];
u8 rtc_log_depth_max[0x5];
u8 reserved_at_60[0x10];
u8 ste_format[0x10];
u8 stc_action_type[0x80];
u8 header_insert_type[0x10];
u8 header_remove_type[0x10];
u8 trivial_match_definer[0x20];
};
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@ -2085,6 +2176,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_esw_cap_bits flow_table_esw_cap;
struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_roce_caps_bits roce_caps;
struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
u8 reserved_at_0[0x8000];
};
@ -2098,6 +2190,20 @@ struct mlx5_ifc_set_action_in_bits {
u8 data[0x20];
};
struct mlx5_ifc_copy_action_in_bits {
u8 action_type[0x4];
u8 src_field[0xc];
u8 reserved_at_10[0x3];
u8 src_offset[0x5];
u8 reserved_at_18[0x3];
u8 length[0x5];
u8 reserved_at_20[0x4];
u8 dst_field[0xc];
u8 reserved_at_30[0x3];
u8 dst_offset[0x5];
u8 reserved_at_38[0x8];
};
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@ -2978,6 +3084,7 @@ enum {
MLX5_GENERAL_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_GENERAL_OBJ_TYPE_DEK = 0x000c,
MLX5_GENERAL_OBJ_TYPE_VIRTQ = 0x000d,
MLX5_GENERAL_OBJ_TYPE_DEFINER = 0x0018,
MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_GENERAL_OBJ_TYPE_IMPORT_KEK = 0x001d,
MLX5_GENERAL_OBJ_TYPE_CREDENTIAL = 0x001e,
@ -2986,6 +3093,11 @@ enum {
MLX5_GENERAL_OBJ_TYPE_FLOW_METER_ASO = 0x0024,
MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO = 0x0025,
MLX5_GENERAL_OBJ_TYPE_CONN_TRACK_OFFLOAD = 0x0031,
MLX5_GENERAL_OBJ_TYPE_ARG = 0x0023,
MLX5_GENERAL_OBJ_TYPE_STC = 0x0040,
MLX5_GENERAL_OBJ_TYPE_RTC = 0x0041,
MLX5_GENERAL_OBJ_TYPE_STE = 0x0042,
MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN = 0x0043,
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
@ -2993,9 +3105,14 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
u8 reserved_at_10[0x20];
u8 obj_type[0x10];
u8 obj_id[0x20];
u8 reserved_at_60[0x3];
u8 log_obj_range[0x5];
u8 reserved_at_58[0x18];
union {
struct {
u8 reserved_at_60[0x3];
u8 log_obj_range[0x5];
u8 reserved_at_58[0x18];
};
u8 obj_offset[0x20];
};
};
struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
@ -3029,6 +3146,243 @@ struct mlx5_ifc_geneve_tlv_option_bits {
u8 reserved_at_80[0x180];
};
enum mlx5_ifc_rtc_update_mode {
MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
};
enum mlx5_ifc_rtc_ste_format {
MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
};
enum mlx5_ifc_rtc_reparse_mode {
MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
};
struct mlx5_ifc_rtc_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x40];
u8 update_index_mode[0x2];
u8 reparse_mode[0x2];
u8 reserved_at_84[0x4];
u8 pd[0x18];
u8 reserved_at_a0[0x13];
u8 log_depth[0x5];
u8 log_hash_size[0x8];
u8 ste_format[0x8];
u8 table_type[0x8];
u8 reserved_at_d0[0x10];
u8 match_definer_id[0x20];
u8 stc_id[0x20];
u8 ste_table_base_id[0x20];
u8 ste_table_offset[0x20];
u8 reserved_at_160[0x8];
u8 miss_flow_table_id[0x18];
u8 reserved_at_180[0x280];
};
enum mlx5_ifc_stc_action_type {
MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
};
struct mlx5_ifc_stc_ste_param_ste_table_bits {
u8 ste_obj_id[0x20];
u8 match_definer_id[0x20];
u8 reserved_at_40[0x3];
u8 log_hash_size[0x5];
u8 reserved_at_48[0x38];
};
struct mlx5_ifc_stc_ste_param_tir_bits {
u8 reserved_at_0[0x8];
u8 tirn[0x18];
u8 reserved_at_20[0x60];
};
struct mlx5_ifc_stc_ste_param_table_bits {
u8 reserved_at_0[0x8];
u8 table_id[0x18];
u8 reserved_at_20[0x60];
};
struct mlx5_ifc_stc_ste_param_flow_counter_bits {
u8 flow_counter_id[0x20];
};
enum {
MLX5_ASO_CT_NUM_PER_OBJ = 1,
MLX5_ASO_METER_NUM_PER_OBJ = 2,
};
struct mlx5_ifc_stc_ste_param_execute_aso_bits {
u8 aso_object_id[0x20];
u8 return_reg_id[0x4];
u8 aso_type[0x4];
u8 reserved_at_28[0x18];
};
struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
u8 header_modify_pattern_id[0x20];
u8 header_modify_argument_id[0x20];
};
enum mlx5_ifc_header_anchors {
MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
};
struct mlx5_ifc_stc_ste_param_remove_bits {
u8 action_type[0x4];
u8 decap[0x1];
u8 reserved_at_5[0x5];
u8 remove_start_anchor[0x6];
u8 reserved_at_10[0x2];
u8 remove_end_anchor[0x6];
u8 reserved_at_18[0x8];
};
struct mlx5_ifc_stc_ste_param_remove_words_bits {
u8 action_type[0x4];
u8 reserved_at_4[0x6];
u8 remove_start_anchor[0x6];
u8 reserved_at_10[0x1];
u8 remove_offset[0x7];
u8 reserved_at_18[0x2];
u8 remove_size[0x6];
};
struct mlx5_ifc_stc_ste_param_insert_bits {
u8 action_type[0x4];
u8 encap[0x1];
u8 inline_data[0x1];
u8 reserved_at_6[0x4];
u8 insert_anchor[0x6];
u8 reserved_at_10[0x1];
u8 insert_offset[0x7];
u8 reserved_at_18[0x1];
u8 insert_size[0x7];
u8 insert_argument[0x20];
};
struct mlx5_ifc_stc_ste_param_vport_bits {
u8 eswitch_owner_vhca_id[0x10];
u8 vport_number[0x10];
u8 eswitch_owner_vhca_id_valid[0x1];
u8 reserved_at_21[0x59];
};
union mlx5_ifc_stc_param_bits {
struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
struct mlx5_ifc_stc_ste_param_tir_bits tir;
struct mlx5_ifc_stc_ste_param_table_bits table;
struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
struct mlx5_ifc_set_action_in_bits add;
struct mlx5_ifc_set_action_in_bits set;
struct mlx5_ifc_copy_action_in_bits copy;
struct mlx5_ifc_stc_ste_param_vport_bits vport;
u8 reserved_at_0[0x80];
};
enum {
MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = 1 << 0,
};
struct mlx5_ifc_stc_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x48];
u8 table_type[0x8];
u8 ste_action_offset[0x8];
u8 action_type[0x8];
u8 reserved_at_a0[0x60];
union mlx5_ifc_stc_param_bits stc_param;
u8 reserved_at_180[0x280];
};
struct mlx5_ifc_ste_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x48];
u8 table_type[0x8];
u8 reserved_at_90[0x370];
};
enum {
MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
};
struct mlx5_ifc_definer_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x50];
u8 format_id[0x10];
u8 reserved_at_60[0x60];
u8 format_select_dw3[0x8];
u8 format_select_dw2[0x8];
u8 format_select_dw1[0x8];
u8 format_select_dw0[0x8];
u8 format_select_dw7[0x8];
u8 format_select_dw6[0x8];
u8 format_select_dw5[0x8];
u8 format_select_dw4[0x8];
u8 reserved_at_100[0x18];
u8 format_select_dw8[0x8];
u8 reserved_at_120[0x20];
u8 format_select_byte3[0x8];
u8 format_select_byte2[0x8];
u8 format_select_byte1[0x8];
u8 format_select_byte0[0x8];
u8 format_select_byte7[0x8];
u8 format_select_byte6[0x8];
u8 format_select_byte5[0x8];
u8 format_select_byte4[0x8];
u8 reserved_at_180[0x40];
u8 ctrl[0xa0];
u8 match_mask[0x160];
};
struct mlx5_ifc_arg_bits {
u8 rsvd0[0x88];
u8 access_pd[0x18];
};
struct mlx5_ifc_header_modify_pattern_in_bits {
u8 modify_field_select[0x40];
u8 reserved_at_40[0x40];
u8 pattern_length[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x60];
u8 pattern_data[MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
};
struct mlx5_ifc_create_virtio_q_counters_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
@ -3044,6 +3398,36 @@ struct mlx5_ifc_create_geneve_tlv_option_in_bits {
struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt;
};
struct mlx5_ifc_create_rtc_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_rtc_bits rtc;
};
struct mlx5_ifc_create_stc_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_stc_bits stc;
};
struct mlx5_ifc_create_ste_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_ste_bits ste;
};
struct mlx5_ifc_create_definer_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_definer_bits definer;
};
struct mlx5_ifc_create_arg_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_arg_bits arg;
};
struct mlx5_ifc_create_header_modify_pattern_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_header_modify_pattern_in_bits pattern;
};
enum {
MLX5_CRYPTO_KEY_SIZE_128b = 0x0,
MLX5_CRYPTO_KEY_SIZE_256b = 0x1,
@ -4253,6 +4637,209 @@ struct mlx5_ifc_query_q_counter_in_bits {
u8 counter_set_id[0x8];
};
enum {
FS_FT_NIC_RX = 0x0,
FS_FT_NIC_TX = 0x1,
FS_FT_FDB = 0x4,
FS_FT_FDB_RX = 0xa,
FS_FT_FDB_TX = 0xb,
};
struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1];
u8 decap_en[0x1];
u8 sw_owner[0x1];
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
u8 rtc_valid[0x1];
u8 reserved_at_11[0x7];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
u8 table_miss_id[0x18];
u8 reserved_at_40[0x8];
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0x60];
u8 rtc_id_0[0x20];
u8 rtc_id_1[0x20];
u8 reserved_at_100[0x40];
};
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x20];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_create_flow_table_out_bits {
u8 status[0x8];
u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
u8 icm_address_39_32[0x8];
u8 table_id[0x18];
u8 icm_address_31_0[0x20];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
};
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
};
struct mlx5_ifc_set_fte_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dest_format_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 packet_reformat[0x1];
u8 reserved_at_22[0xe];
u8 destination_eswitch_owner_vhca_id[0x10];
};
struct mlx5_ifc_flow_counter_list_bits {
u8 flow_counter_id[0x20];
u8 reserved_at_20[0x20];
};
union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
struct mlx5_ifc_dest_format_bits dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
u8 reserved_at_0[0x40];
};
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
u8 reserved_at_40[0x8];
u8 flow_tag[0x18];
u8 reserved_at_60[0x10];
u8 action[0x10];
u8 extended_destination[0x1];
u8 reserved_at_81[0x7];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
u8 reserved_at_c0[0x1740];
/* Currently only one destnation */
union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
};
struct mlx5_ifc_set_fte_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 ignore_flow_level[0x1];
u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_at_e0[0x20];
u8 flow_index[0x20];
u8 reserved_at_120[0xe0];
struct mlx5_ifc_flow_context_bits flow_context;
};
struct mlx5_ifc_create_flow_group_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x20];
u8 other_vport[0x1];
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 reserved_at_c0[0x1f40];
};
struct mlx5_ifc_create_flow_group_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x8];
u8 group_id[0x18];
u8 reserved_at_60[0x20];
};
enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = 1 << 0,
MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = 1 << 1,
};
enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
};
struct mlx5_ifc_modify_flow_table_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_modify_flow_table_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x60];
};
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc

View File

@ -0,0 +1,948 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2022 NVIDIA Corporation & Affiliates
*/
#include "mlx5dr_internal.h"
int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj)
{
int ret;
ret = mlx5_glue->devx_obj_destroy(devx_obj->obj);
simple_free(devx_obj);
return ret;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
struct mlx5dr_cmd_ft_create_attr *ft_attr)
{
uint32_t out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *ft_ctx;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for flow table object");
rte_errno = ENOMEM;
return NULL;
}
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FT");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(create_flow_table_out, out, table_id);
return devx_obj;
}
int
mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
struct mlx5dr_cmd_ft_modify_attr *ft_attr)
{
uint32_t out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
uint32_t in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
void *ft_ctx;
int ret;
MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
MLX5_SET(modify_flow_table_in, in, table_id, devx_obj->id);
ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
MLX5_SET(flow_table_context, ft_ctx, rtc_id_0, ft_attr->rtc_id_0);
MLX5_SET(flow_table_context, ft_ctx, rtc_id_1, ft_attr->rtc_id_1);
ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to modify FT");
rte_errno = errno;
}
return ret;
}
static struct mlx5dr_devx_obj *
mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
struct mlx5dr_cmd_fg_attr *fg_attr)
{
uint32_t out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_flow_group_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for flow group object");
rte_errno = ENOMEM;
return NULL;
}
MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create Flow group");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(create_flow_group_out, out, group_id);
return devx_obj;
}
static struct mlx5dr_devx_obj *
mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx,
uint32_t table_type,
uint32_t table_id,
uint32_t group_id,
uint32_t vport_id)
{
uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *in_flow_context;
void *in_dests;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
return NULL;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, table_type, table_type);
MLX5_SET(set_fte_in, in, table_id, table_id);
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
MLX5_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT);
MLX5_SET(dest_format, in_dests, destination_id, vport_id);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
return devx_obj;
}
void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
{
mlx5dr_cmd_destroy_obj(tbl->fte);
mlx5dr_cmd_destroy_obj(tbl->fg);
mlx5dr_cmd_destroy_obj(tbl->ft);
}
struct mlx5dr_cmd_forward_tbl *
mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
struct mlx5dr_cmd_ft_create_attr *ft_attr,
uint32_t vport)
{
struct mlx5dr_cmd_fg_attr fg_attr = {0};
struct mlx5dr_cmd_forward_tbl *tbl;
tbl = simple_calloc(1, sizeof(*tbl));
if (!tbl) {
DR_LOG(ERR, "Failed to allocate memory for forward default");
rte_errno = ENOMEM;
return NULL;
}
tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
if (!tbl->ft) {
DR_LOG(ERR, "Failed to create FT for miss-table");
goto free_tbl;
}
fg_attr.table_id = tbl->ft->id;
fg_attr.table_type = ft_attr->type;
tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
if (!tbl->fg) {
DR_LOG(ERR, "Failed to create FG for miss-table");
goto free_ft;
}
tbl->fte = mlx5dr_cmd_set_vport_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, vport);
if (!tbl->fte) {
DR_LOG(ERR, "Failed to create FTE for miss-table");
goto free_fg;
}
return tbl;
free_fg:
mlx5dr_cmd_destroy_obj(tbl->fg);
free_ft:
mlx5dr_cmd_destroy_obj(tbl->ft);
free_tbl:
simple_free(tbl);
return NULL;
}
void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
uint32_t fw_ft_type,
enum mlx5dr_table_type type,
struct mlx5dr_cmd_ft_modify_attr *ft_attr)
{
struct mlx5dr_devx_obj *default_miss_tbl;
if (type != MLX5DR_TABLE_TYPE_FDB)
return;
default_miss_tbl = ctx->common_res[type].default_miss->ft;
if (!default_miss_tbl) {
assert(false);
return;
}
ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
ft_attr->type = fw_ft_type;
ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
ft_attr->table_miss_id = default_miss_tbl->id;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
struct mlx5dr_cmd_rtc_create_attr *rtc_attr)
{
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *attr;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for RTC object");
rte_errno = ENOMEM;
return NULL;
}
attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, MLX5_GENERAL_OBJ_TYPE_RTC);
attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
MLX5_SET(rtc, attr, ste_format, rtc_attr->is_jumbo ?
MLX5_IFC_RTC_STE_FORMAT_11DW :
MLX5_IFC_RTC_STE_FORMAT_8DW);
MLX5_SET(rtc, attr, pd, rtc_attr->pd);
MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
MLX5_SET(rtc, attr, match_definer_id, rtc_attr->definer_id);
MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
MLX5_SET(rtc, attr, reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create RTC");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return devx_obj;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_stc_create(struct ibv_context *ctx,
struct mlx5dr_cmd_stc_create_attr *stc_attr)
{
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *attr;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for STC object");
rte_errno = ENOMEM;
return NULL;
}
attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
MLX5_SET(general_obj_in_cmd_hdr,
attr, log_obj_range, stc_attr->log_obj_range);
attr = MLX5_ADDR_OF(create_stc_in, in, stc);
MLX5_SET(stc, attr, table_type, stc_attr->table_type);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create STC");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return devx_obj;
}
static int
mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
void *stc_parm)
{
switch (stc_attr->action_type) {
case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
MLX5_SET(stc_ste_param_flow_counter, stc_parm, flow_counter_id, stc_attr->id);
break;
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
MLX5_SET(stc_ste_param_tir, stc_parm, tirn, stc_attr->dest_tir_num);
break;
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
MLX5_SET(stc_ste_param_table, stc_parm, table_id, stc_attr->dest_table_id);
break;
case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
header_modify_pattern_id, stc_attr->modify_header.pattern_id);
MLX5_SET(stc_ste_param_header_modify_list, stc_parm,
header_modify_argument_id, stc_attr->modify_header.arg_id);
break;
case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
MLX5_SET(stc_ste_param_remove, stc_parm, action_type,
MLX5_MODIFICATION_TYPE_REMOVE);
MLX5_SET(stc_ste_param_remove, stc_parm, decap,
stc_attr->remove_header.decap);
MLX5_SET(stc_ste_param_remove, stc_parm, remove_start_anchor,
stc_attr->remove_header.start_anchor);
MLX5_SET(stc_ste_param_remove, stc_parm, remove_end_anchor,
stc_attr->remove_header.end_anchor);
break;
case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
MLX5_SET(stc_ste_param_insert, stc_parm, action_type,
MLX5_MODIFICATION_TYPE_INSERT);
MLX5_SET(stc_ste_param_insert, stc_parm, encap,
stc_attr->insert_header.encap);
MLX5_SET(stc_ste_param_insert, stc_parm, inline_data,
stc_attr->insert_header.is_inline);
MLX5_SET(stc_ste_param_insert, stc_parm, insert_anchor,
stc_attr->insert_header.insert_anchor);
/* HW gets the next 2 sizes in words */
MLX5_SET(stc_ste_param_insert, stc_parm, insert_size,
stc_attr->insert_header.header_size / 2);
MLX5_SET(stc_ste_param_insert, stc_parm, insert_offset,
stc_attr->insert_header.insert_offset / 2);
MLX5_SET(stc_ste_param_insert, stc_parm, insert_argument,
stc_attr->insert_header.arg_id);
break;
case MLX5_IFC_STC_ACTION_TYPE_COPY:
case MLX5_IFC_STC_ACTION_TYPE_SET:
case MLX5_IFC_STC_ACTION_TYPE_ADD:
*(__be64 *)stc_parm = stc_attr->modify_action.data;
break;
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
MLX5_SET(stc_ste_param_vport, stc_parm, vport_number,
stc_attr->vport.vport_num);
MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id,
stc_attr->vport.esw_owner_vhca_id);
MLX5_SET(stc_ste_param_vport, stc_parm, eswitch_owner_vhca_id_valid, 1);
break;
case MLX5_IFC_STC_ACTION_TYPE_DROP:
case MLX5_IFC_STC_ACTION_TYPE_NOP:
case MLX5_IFC_STC_ACTION_TYPE_TAG:
case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
break;
case MLX5_IFC_STC_ACTION_TYPE_ASO:
MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_object_id,
stc_attr->aso.devx_obj_id);
MLX5_SET(stc_ste_param_execute_aso, stc_parm, return_reg_id,
stc_attr->aso.return_reg_id);
MLX5_SET(stc_ste_param_execute_aso, stc_parm, aso_type,
stc_attr->aso.aso_type);
break;
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
MLX5_SET(stc_ste_param_ste_table, stc_parm, ste_obj_id,
stc_attr->ste_table.ste_obj_id);
MLX5_SET(stc_ste_param_ste_table, stc_parm, match_definer_id,
stc_attr->ste_table.match_definer_id);
MLX5_SET(stc_ste_param_ste_table, stc_parm, log_hash_size,
stc_attr->ste_table.log_hash_size);
break;
case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
MLX5_SET(stc_ste_param_remove_words, stc_parm, action_type,
MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
MLX5_SET(stc_ste_param_remove_words, stc_parm, remove_start_anchor,
stc_attr->remove_words.start_anchor);
MLX5_SET(stc_ste_param_remove_words, stc_parm,
remove_size, stc_attr->remove_words.num_of_words);
break;
default:
DR_LOG(ERR, "Not supported type %d", stc_attr->action_type);
rte_errno = EINVAL;
return rte_errno;
}
return 0;
}
int
mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
struct mlx5dr_cmd_stc_modify_attr *stc_attr)
{
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
void *stc_parm;
void *attr;
int ret;
attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, devx_obj->id);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_offset, stc_attr->stc_offset);
attr = MLX5_ADDR_OF(create_stc_in, in, stc);
MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
MLX5_SET(stc, attr, action_type, stc_attr->action_type);
MLX5_SET64(stc, attr, modify_field_select,
MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
/* Set destination TIRN, TAG, FT ID, STE ID */
stc_parm = MLX5_ADDR_OF(stc, attr, stc_param);
ret = mlx5dr_cmd_stc_modify_set_stc_param(stc_attr, stc_parm);
if (ret)
return ret;
ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to modify STC FW action_type %d", stc_attr->action_type);
rte_errno = errno;
}
return ret;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_arg_create(struct ibv_context *ctx,
uint16_t log_obj_range,
uint32_t pd)
{
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *attr;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for ARG object");
rte_errno = ENOMEM;
return NULL;
}
attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, MLX5_GENERAL_OBJ_TYPE_ARG);
MLX5_SET(general_obj_in_cmd_hdr,
attr, log_obj_range, log_obj_range);
attr = MLX5_ADDR_OF(create_arg_in, in, arg);
MLX5_SET(arg, attr, access_pd, pd);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create ARG");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return devx_obj;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
uint32_t pattern_length,
uint8_t *actions)
{
uint32_t in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *pattern_data;
void *pattern;
void *attr;
if (pattern_length > MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
DR_LOG(ERR, "Pattern length %d exceeds limit %d",
pattern_length, MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
rte_errno = EINVAL;
return NULL;
}
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for header_modify_pattern object");
rte_errno = ENOMEM;
return NULL;
}
attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, MLX5_GENERAL_OBJ_TYPE_MODIFY_HEADER_PATTERN);
pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
/* Pattern_length is in ddwords */
MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
pattern_data = MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
memcpy(pattern_data, actions, pattern_length);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create header_modify_pattern");
rte_errno = errno;
goto free_obj;
}
devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return devx_obj;
free_obj:
simple_free(devx_obj);
return NULL;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_ste_create(struct ibv_context *ctx,
struct mlx5dr_cmd_ste_create_attr *ste_attr)
{
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *attr;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for STE object");
rte_errno = ENOMEM;
return NULL;
}
attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
attr, obj_type, MLX5_GENERAL_OBJ_TYPE_STE);
MLX5_SET(general_obj_in_cmd_hdr,
attr, log_obj_range, ste_attr->log_obj_range);
attr = MLX5_ADDR_OF(create_ste_in, in, ste);
MLX5_SET(ste, attr, table_type, ste_attr->table_type);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create STE");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return devx_obj;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_definer_create(struct ibv_context *ctx,
struct mlx5dr_cmd_definer_create_attr *def_attr)
{
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *ptr;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for definer object");
rte_errno = ENOMEM;
return NULL;
}
MLX5_SET(general_obj_in_cmd_hdr,
in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
in, obj_type, MLX5_GENERAL_OBJ_TYPE_DEFINER);
ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create Definer");
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return devx_obj;
}
struct mlx5dr_devx_obj *
mlx5dr_cmd_sq_create(struct ibv_context *ctx,
struct mlx5dr_cmd_sq_create_attr *attr)
{
uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
void *sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
void *wqc = MLX5_ADDR_OF(sqc, sqc, wq);
struct mlx5dr_devx_obj *devx_obj;
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to create SQ");
rte_errno = ENOMEM;
return NULL;
}
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
MLX5_SET(sqc, sqc, cqn, attr->cqn);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(sqc, sqc, non_wire, 1);
MLX5_SET(wq, wqc, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wqc, pd, attr->pdn);
MLX5_SET(wq, wqc, uar_page, attr->page_id);
MLX5_SET(wq, wqc, log_wq_stride, log2above(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wqc, log_wq_sz, attr->log_wq_sz);
MLX5_SET(wq, wqc, dbr_umem_id, attr->dbr_id);
MLX5_SET(wq, wqc, wq_umem_id, attr->wq_id);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
simple_free(devx_obj);
rte_errno = errno;
return NULL;
}
devx_obj->id = MLX5_GET(create_sq_out, out, sqn);
return devx_obj;
}
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
int ret;
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
MLX5_SET(modify_sq_in, in, sqn, devx_obj->id);
MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
ret = mlx5_glue->devx_obj_modify(devx_obj->obj, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to modify SQ");
rte_errno = errno;
}
return ret;
}
int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
struct mlx5dr_cmd_query_caps *caps)
{
uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
const struct flow_hw_port_info *port_info;
struct ibv_device_attr_ex attr_ex;
int ret;
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
MLX5_HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to query device caps");
rte_errno = errno;
return rte_errno;
}
caps->wqe_based_update =
MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.eswitch_manager);
caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.flex_parser_protocols);
caps->log_header_modify_argument_granularity =
MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.log_header_modify_argument_granularity);
caps->log_header_modify_argument_granularity -=
MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.
log_header_modify_argument_granularity_offset);
caps->log_header_modify_argument_max_alloc =
MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
caps->definer_format_sup =
MLX5_GET64(query_hca_cap_out, out,
capability.cmd_hca_cap.match_definer_format_supported);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
MLX5_HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to query device caps");
rte_errno = errno;
return rte_errno;
}
caps->full_dw_jumbo_support = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap_2.
format_select_dw_8_6_ext);
caps->format_select_gtpu_dw_0 = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap_2.
format_select_dw_gtpu_dw_0);
caps->format_select_gtpu_dw_1 = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap_2.
format_select_dw_gtpu_dw_1);
caps->format_select_gtpu_dw_2 = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap_2.
format_select_dw_gtpu_dw_2);
caps->format_select_gtpu_ext_dw_0 = MLX5_GET(query_hca_cap_out, out,
capability.cmd_hca_cap_2.
format_select_dw_gtpu_first_ext_dw_0);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
MLX5_HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to query flow table caps");
rte_errno = errno;
return rte_errno;
}
caps->nic_ft.max_level = MLX5_GET(query_hca_cap_out, out,
capability.flow_table_nic_cap.
flow_table_properties_nic_receive.max_ft_level);
caps->nic_ft.reparse = MLX5_GET(query_hca_cap_out, out,
capability.flow_table_nic_cap.
flow_table_properties_nic_receive.reparse);
if (caps->wqe_based_update) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE |
MLX5_HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to query WQE based FT caps");
rte_errno = errno;
return rte_errno;
}
caps->rtc_reparse_mode = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
rtc_reparse_mode);
caps->ste_format = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
ste_format);
caps->rtc_index_mode = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
rtc_index_mode);
caps->rtc_log_depth_max = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
rtc_log_depth_max);
caps->ste_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
ste_alloc_log_max);
caps->ste_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
ste_alloc_log_granularity);
caps->trivial_match_definer = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
trivial_match_definer);
caps->stc_alloc_log_max = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
stc_alloc_log_max);
caps->stc_alloc_log_gran = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
stc_alloc_log_granularity);
}
if (caps->eswitch_manager) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE |
MLX5_HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Failed to query flow table esw caps");
rte_errno = errno;
return rte_errno;
}
caps->fdb_ft.max_level = MLX5_GET(query_hca_cap_out, out,
capability.flow_table_nic_cap.
flow_table_properties_nic_receive.max_ft_level);
caps->fdb_ft.reparse = MLX5_GET(query_hca_cap_out, out,
capability.flow_table_nic_cap.
flow_table_properties_nic_receive.reparse);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_ESW | MLX5_HCA_CAP_OPMOD_GET_CUR);
ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (ret) {
DR_LOG(ERR, "Query eswitch capabilities failed %d\n", ret);
rte_errno = errno;
return rte_errno;
}
if (MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number_valid))
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
if (ret) {
DR_LOG(ERR, "Failed to query device attributes");
rte_errno = ret;
return rte_errno;
}
strlcpy(caps->fw_ver, attr_ex.orig_attr.fw_ver, sizeof(caps->fw_ver));
port_info = flow_hw_get_wire_port(ctx);
if (port_info) {
caps->wire_regc = port_info->regc_value;
caps->wire_regc_mask = port_info->regc_mask;
} else {
DR_LOG(INFO, "Failed to query wire port regc value");
}
return ret;
}
int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
struct mlx5dr_cmd_query_vport_caps *vport_caps,
uint32_t port_num)
{
struct mlx5_port_info port_info = {0};
uint32_t flags;
int ret;
flags = MLX5_PORT_QUERY_VPORT | MLX5_PORT_QUERY_ESW_OWNER_VHCA_ID;
ret = mlx5_glue->devx_port_query(ctx, port_num, &port_info);
/* Check if query succeed and vport is enabled */
if (ret || (port_info.query_flags & flags) != flags) {
rte_errno = ENOTSUP;
return rte_errno;
}
vport_caps->vport_num = port_info.vport_id;
vport_caps->esw_owner_vhca_id = port_info.esw_owner_vhca_id;
if (port_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
vport_caps->metadata_c = port_info.vport_meta_tag;
vport_caps->metadata_c_mask = port_info.vport_meta_mask;
}
return 0;
}

View File

@ -0,0 +1,230 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2022 NVIDIA Corporation & Affiliates
*/
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
};
struct mlx5dr_cmd_ft_modify_attr {
uint8_t type;
uint32_t rtc_id_0;
uint32_t rtc_id_1;
uint32_t table_miss_id;
uint8_t table_miss_action;
uint64_t modify_fs;
};
struct mlx5dr_cmd_fg_attr {
uint32_t table_id;
uint32_t table_type;
};
struct mlx5dr_cmd_forward_tbl {
struct mlx5dr_devx_obj *ft;
struct mlx5dr_devx_obj *fg;
struct mlx5dr_devx_obj *fte;
uint32_t refcount;
};
struct mlx5dr_cmd_rtc_create_attr {
uint32_t pd;
uint32_t stc_base;
uint32_t ste_base;
uint32_t ste_offset;
uint32_t miss_ft_id;
uint8_t update_index_mode;
uint8_t log_depth;
uint8_t log_size;
uint8_t table_type;
uint8_t definer_id;
bool is_jumbo;
};
struct mlx5dr_cmd_stc_create_attr {
uint8_t log_obj_range;
uint8_t table_type;
};
struct mlx5dr_cmd_stc_modify_attr {
uint32_t stc_offset;
uint8_t action_offset;
enum mlx5_ifc_stc_action_type action_type;
union {
uint32_t id; /* TIRN, TAG, FT ID, STE ID */
struct {
uint8_t decap;
uint16_t start_anchor;
uint16_t end_anchor;
} remove_header;
struct {
uint32_t arg_id;
uint32_t pattern_id;
} modify_header;
struct {
__be64 data;
} modify_action;
struct {
uint32_t arg_id;
uint32_t header_size;
uint8_t is_inline;
uint8_t encap;
uint16_t insert_anchor;
uint16_t insert_offset;
} insert_header;
struct {
uint8_t aso_type;
uint32_t devx_obj_id;
uint8_t return_reg_id;
} aso;
struct {
uint16_t vport_num;
uint16_t esw_owner_vhca_id;
} vport;
struct {
struct mlx5dr_pool_chunk ste;
struct mlx5dr_pool *ste_pool;
uint32_t ste_obj_id; /* Internal */
uint32_t match_definer_id;
uint8_t log_hash_size;
} ste_table;
struct {
uint16_t start_anchor;
uint16_t num_of_words;
} remove_words;
uint32_t dest_table_id;
uint32_t dest_tir_num;
};
};
struct mlx5dr_cmd_ste_create_attr {
uint8_t log_obj_range;
uint8_t table_type;
};
struct mlx5dr_cmd_definer_create_attr {
uint8_t *dw_selector;
uint8_t *byte_selector;
uint8_t *match_mask;
};
struct mlx5dr_cmd_sq_create_attr {
uint32_t cqn;
uint32_t pdn;
uint32_t page_id;
uint32_t dbr_id;
uint32_t wq_id;
uint32_t log_wq_sz;
};
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
};
struct mlx5dr_cmd_query_vport_caps {
uint16_t vport_num;
uint16_t esw_owner_vhca_id;
uint32_t metadata_c;
uint32_t metadata_c_mask;
};
struct mlx5dr_cmd_query_caps {
uint32_t wire_regc;
uint32_t wire_regc_mask;
uint32_t flex_protocols;
uint8_t wqe_based_update;
uint8_t rtc_reparse_mode;
uint16_t ste_format;
uint8_t rtc_index_mode;
uint8_t ste_alloc_log_max;
uint8_t ste_alloc_log_gran;
uint8_t stc_alloc_log_max;
uint8_t stc_alloc_log_gran;
uint8_t rtc_log_depth_max;
uint8_t format_select_gtpu_dw_0;
uint8_t format_select_gtpu_dw_1;
uint8_t format_select_gtpu_dw_2;
uint8_t format_select_gtpu_ext_dw_0;
bool full_dw_jumbo_support;
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
uint64_t definer_format_sup;
uint32_t trivial_match_definer;
char fw_ver[64];
};
int mlx5dr_cmd_destroy_obj(struct mlx5dr_devx_obj *devx_obj);
struct mlx5dr_devx_obj *
mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
struct mlx5dr_cmd_ft_create_attr *ft_attr);
int
mlx5dr_cmd_flow_table_modify(struct mlx5dr_devx_obj *devx_obj,
struct mlx5dr_cmd_ft_modify_attr *ft_attr);
struct mlx5dr_devx_obj *
mlx5dr_cmd_rtc_create(struct ibv_context *ctx,
struct mlx5dr_cmd_rtc_create_attr *rtc_attr);
struct mlx5dr_devx_obj *
mlx5dr_cmd_stc_create(struct ibv_context *ctx,
struct mlx5dr_cmd_stc_create_attr *stc_attr);
int
mlx5dr_cmd_stc_modify(struct mlx5dr_devx_obj *devx_obj,
struct mlx5dr_cmd_stc_modify_attr *stc_attr);
struct mlx5dr_devx_obj *
mlx5dr_cmd_ste_create(struct ibv_context *ctx,
struct mlx5dr_cmd_ste_create_attr *ste_attr);
struct mlx5dr_devx_obj *
mlx5dr_cmd_definer_create(struct ibv_context *ctx,
struct mlx5dr_cmd_definer_create_attr *def_attr);
struct mlx5dr_devx_obj *
mlx5dr_cmd_sq_create(struct ibv_context *ctx,
struct mlx5dr_cmd_sq_create_attr *attr);
struct mlx5dr_devx_obj *
mlx5dr_cmd_arg_create(struct ibv_context *ctx,
uint16_t log_obj_range,
uint32_t pd);
struct mlx5dr_devx_obj *
mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
uint32_t pattern_length,
uint8_t *actions);
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj);
int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
struct mlx5dr_cmd_query_vport_caps *vport_caps,
uint32_t port_num);
int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
struct mlx5dr_cmd_query_caps *caps);
void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
struct mlx5dr_cmd_forward_tbl *
mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
struct mlx5dr_cmd_ft_create_attr *ft_attr,
uint32_t vport);
void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
uint32_t fw_ft_type,
enum mlx5dr_table_type type,
struct mlx5dr_cmd_ft_modify_attr *ft_attr);
#endif /* MLX5DR_CMD_H_ */