Add more and update existing mlx5 core firmware structure definitions and bits.

This change is part of coming ibcore and mlx5ib updates.

Sponsored by:	Mellanox Technologies
MFC after:	1 week
This commit is contained in:
hselasky 2017-11-10 14:39:03 +00:00
parent 16ff114873
commit 75528d7f53
5 changed files with 186 additions and 42 deletions

View File

@ -100,6 +100,35 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
__mlx5_64_off(typ, fld)))
#define MLX5_GET_BE(type_t, typ, p, fld) ({ \
type_t tmp; \
switch (sizeof(tmp)) { \
case sizeof(u8): \
tmp = (__force type_t)MLX5_GET(typ, p, fld); \
break; \
case sizeof(u16): \
tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
break; \
case sizeof(u32): \
tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
break; \
case sizeof(u64): \
tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
break; \
} \
tmp; \
})
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@ -326,6 +355,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
* - ctrl segment (16 bytes)
* - rdma segment (16 bytes)
* - scatter elements (16 bytes each)
*/
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@ -648,9 +688,9 @@ enum {
};
enum {
CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
};
enum {
@ -1288,6 +1328,7 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};

View File

@ -306,6 +306,11 @@ struct cmd_msg_cache {
};
struct mlx5_traffic_counter {
u64 packets;
u64 octets;
};
struct mlx5_cmd_stats {
u64 sum;
u64 n;
@ -743,6 +748,13 @@ struct mlx5_pas {
u8 log_sz;
};
enum port_state_policy {
MLX5_POLICY_DOWN = 0,
MLX5_POLICY_UP = 1,
MLX5_POLICY_FOLLOW = 2,
MLX5_POLICY_INVALID = 0xffffffff
};
static inline void *
mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
@ -811,6 +823,11 @@ static inline void *mlx5_vmalloc(unsigned long size)
return rtn;
}
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
}
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);

View File

@ -59,13 +59,6 @@ enum {
MLX5_IB_CACHE_LINE_SIZE = 64,
};
enum {
MLX5_RQ_NUM_STATE = MLX5_RQC_STATE_ERR + 1,
MLX5_SQ_NUM_STATE = MLX5_SQC_STATE_ERR + 1,
MLX5_QP_STATE = MLX5_QP_NUM_STATE + 1,
MLX5_QP_STATE_BAD = MLX5_QP_STATE + 1,
};
static const u32 mlx5_ib_opcode[] = {
[IB_WR_SEND] = MLX5_OPCODE_SEND,
[IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,

View File

@ -761,7 +761,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 multi_pkt_send_wqe[0x2];
u8 wqe_inline_mode[0x2];
u8 rss_ind_tbl_cap[0x4];
u8 reserved_1[0x3];
u8 scatter_fcs[0x1];
u8 reserved_1[0x2];
u8 tunnel_lso_const_out_ip_id[0x1];
u8 tunnel_lro_gre[0x1];
u8 tunnel_lro_vxlan[0x1];
@ -1050,10 +1051,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cd[0x1];
u8 atm[0x1];
u8 apm[0x1];
u8 reserved_32[0x7];
u8 imaicl[0x1];
u8 reserved_32[0x6];
u8 qkv[0x1];
u8 pkv[0x1];
u8 reserved_33[0x4];
u8 set_deth_sqpn[0x1];
u8 reserved_33[0x3];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
@ -1805,7 +1808,7 @@ enum {
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 reserved_0[0x4];
u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
u8 reserved_1[0x3];
u8 pm_state[0x2];
@ -1867,7 +1870,10 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_16[0x8];
u8 cqn_snd[0x18];
u8 reserved_17[0x40];
u8 reserved_at_400[0x8];
u8 deth_sqpn[0x18];
u8 reserved_17[0x20];
u8 reserved_18[0x8];
u8 last_acked_psn[0x18];
@ -2065,7 +2071,11 @@ struct mlx5_ifc_traffic_counter_bits {
};
struct mlx5_ifc_tisc_bits {
u8 reserved_0[0xc];
u8 strict_lag_tx_port_affinity[0x1];
u8 reserved_at_1[0x3];
u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4];
u8 prio[0x4];
u8 reserved_1[0x10];
@ -4662,30 +4672,29 @@ struct mlx5_ifc_query_flow_group_in_bits {
struct mlx5_ifc_query_flow_counter_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
u8 reserved_at_40[0x40];
struct mlx5_ifc_traffic_counter_bits flow_statistics;
u8 reserved_2[0x700];
struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
};
struct mlx5_ifc_query_flow_counter_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
u8 reserved_at_10[0x10];
u8 reserved_1[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_2[0x80];
u8 reserved_at_40[0x80];
u8 clear[0x1];
u8 reserved_3[0x1f];
u8 reserved_at_c1[0xf];
u8 num_of_counters[0x10];
u8 reserved_4[0x10];
u8 reserved_at_e0[0x10];
u8 flow_counter_id[0x10];
};
@ -5111,6 +5120,15 @@ struct mlx5_ifc_modify_tis_out_bits {
u8 reserved_1[0x40];
};
struct mlx5_ifc_modify_tis_bitmask_bits {
u8 reserved_at_0[0x20];
u8 reserved_at_20[0x1d];
u8 lag_tx_port_affinity[0x1];
u8 strict_lag_tx_port_affinity[0x1];
u8 prio[0x1];
};
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@ -5123,7 +5141,7 @@ struct mlx5_ifc_modify_tis_in_bits {
u8 reserved_3[0x20];
u8 modify_bitmask[0x40];
struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
u8 reserved_4[0x40];
@ -5271,12 +5289,9 @@ struct mlx5_ifc_modify_rq_out_bits {
u8 reserved_1[0x40];
};
struct mlx5_ifc_rq_bitmask_bits {
u8 reserved[0x20];
u8 reserved1[0x1e];
u8 vlan_strip_disable[0x1];
u8 reserved2[0x1];
enum {
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
};
struct mlx5_ifc_modify_rq_in_bits {
@ -5292,7 +5307,7 @@ struct mlx5_ifc_modify_rq_in_bits {
u8 reserved_3[0x20];
struct mlx5_ifc_rq_bitmask_bits bitmask;
u8 modify_bitmask[0x40];
u8 reserved_4[0x40];
@ -8133,6 +8148,36 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
u8 reserved_0[0x180];
};
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 symbol_error_counter[0x10];
u8 link_error_recovery_counter[0x8];
u8 link_downed_counter[0x8];
u8 port_rcv_errors[0x10];
u8 port_rcv_remote_physical_errors[0x10];
u8 port_rcv_switch_relay_errors[0x10];
u8 port_xmit_discards[0x10];
u8 port_xmit_constraint_errors[0x8];
u8 port_rcv_constraint_errors[0x8];
u8 reserved_at_70[0x8];
u8 link_overrun_errors[0x8];
u8 reserved_at_80[0x10];
u8 vl_15_dropped[0x10];
u8 reserved_at_a0[0xa0];
};
struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
u8 time_since_last_clear_high[0x20];

View File

@ -28,9 +28,7 @@
#ifndef MLX5_QP_H
#define MLX5_QP_H
#include <dev/mlx5/device.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/mlx5_ifc.h>
#define MLX5_INVALID_LKEY 0x100
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
@ -45,6 +43,7 @@
#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
#define MLX5_BSF_APPTAG_ESCAPE 0x1
#define MLX5_BSF_APPREF_ESCAPE 0x2
#define MLX5_WQE_DS_UNITS 16
enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@ -78,7 +77,16 @@ enum mlx5_qp_state {
MLX5_QP_STATE_ERR = 6,
MLX5_QP_STATE_SQ_DRAINING = 7,
MLX5_QP_STATE_SUSPENDED = 9,
MLX5_QP_NUM_STATE
MLX5_QP_NUM_STATE,
MLX5_QP_STATE,
MLX5_QP_STATE_BAD,
};
enum {
MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
};
enum {
@ -157,6 +165,7 @@ enum {
enum {
MLX5_FENCE_MODE_NONE = 0 << 5,
MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
MLX5_FENCE_MODE_FENCE = 2 << 5,
MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
};
@ -198,6 +207,8 @@ struct mlx5_wqe_ctrl_seg {
__be32 imm;
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
enum {
MLX5_MLX_FLAG_MASK_VL15 = 0x40,
MLX5_MLX_FLAG_MASK_SLR = 0x20,
@ -221,10 +232,10 @@ enum {
};
enum {
MLX5_ETH_WQE_SWP_OUTER_L3_TYPE = 1 << 0,
MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 1,
MLX5_ETH_WQE_SWP_INNER_L3_TYPE = 1 << 4,
MLX5_ETH_WQE_SWP_INNER_L4_TYPE = 1 << 5,
MLX5_ETH_WQE_SWP_INNER_L3_TYPE = 1 << 0,
MLX5_ETH_WQE_SWP_INNER_L4_TYPE = 1 << 1,
MLX5_ETH_WQE_SWP_OUTER_L3_TYPE = 1 << 4,
MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 5,
};
struct mlx5_wqe_eth_seg {
@ -415,6 +426,42 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
enum mlx5_pagefault_flags {
MLX5_PFAULT_REQUESTOR = 1 << 0,
MLX5_PFAULT_WRITE = 1 << 1,
MLX5_PFAULT_RDMA = 1 << 2,
};
/* Contains the details of a pagefault. */
struct mlx5_pagefault {
u32 bytes_committed;
u8 event_subtype;
enum mlx5_pagefault_flags flags;
union {
/* Initiator or send message responder pagefault details. */
struct {
/* Received packet size, only valid for responders. */
u32 packet_size;
/*
* WQE index. Refers to either the send queue or
* receive queue, according to event_subtype.
*/
u16 wqe_index;
} wqe;
/* RDMA responder pagefault details */
struct {
u32 r_key;
/*
* Received packet size, minimal size page fault
* resolution required for forward progress.
*/
u32 packet_size;
u32 rdma_op_len;
u64 rdma_va;
} rdma;
};
};
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
@ -462,7 +509,8 @@ struct mlx5_qp_context {
u8 reserved2[4];
__be32 next_send_psn;
__be32 cqn_send;
u8 reserved3[8];
__be32 deth_sqpn;
u8 reserved3[4];
__be32 last_acked_psn;
__be32 ssn;
__be32 params2;