Use the autogenerated interface file for all commands in mlx5core.

This patch accumulates the following Linux commits:
- 90b3e38d048f09b22fb50bcd460cea65fd00b2d7
  mlx5_core: Modify CQ moderation parameters
- 09a7d9eca1a6cf5eb4f9abfdf8914db9dbd96f08
  mlx5_core: QP/XRCD commands via mlx5 ifc
- 1a412fb1caa2c1b77719ccb5ed8b0c3c2bc65da7
  mlx5_core: Modify QP commands via mlx5 ifc
- ec22eb53106be1472ba6573dc900943f52f8fd1e
  mlx5_core: MKey/PSV commands via mlx5 ifc
- 73b626c182dff06867ceba996a819e8372c9b2ce
  mlx5_core: EQ commands via mlx5 ifc
- 20ed51c643b6296789a48adc3bc2cc875a1612cf
  mlx5_core: Access register and MAD IFC commands via mlx5 ifc
- a533ed5e179cd15512d40282617909d3482a771c
  mlx5_core: Pages management commands via mlx5 ifc
- b8a4ddb2e8f44f872fb93bbda2d541b27079fd2b
  mlx5_core: Add MLX5_ARRAY_SET64 to fix BUILD_BUG_ON
- af1ba291c5e498973cc325c501dd8da80b234571
  mlx5_core: Refactor internal SRQ API
- b06e7de8a9d8d1d540ec122bbdf2face2a211634
  mlx5_core: Refactor device capability function
- c4f287c4a6ac489c18afc4acc4353141a8c53070
  mlx5_core: Unify and improve command interface

Submitted by:	Matthew Finlay <matt@mellanox.com>
MFC after:	1 week
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2018-03-08 10:43:42 +00:00
parent ca55159467
commit 788333d9a6
29 changed files with 1093 additions and 2076 deletions

View File

@ -157,12 +157,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen);
u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz);
u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);

View File

@ -92,12 +92,21 @@ __mlx5_mask(typ, fld))
___t; \
})
#define MLX5_SET64(typ, p, fld, v) do { \
#define __MLX5_SET64(typ, p, fld, v) do { \
BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
} while (0)
#define MLX5_SET64(typ, p, fld, v) do { \
BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
__MLX5_SET64(typ, p, fld, v); \
} while (0)
#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
__MLX5_SET64(typ, p, fld[idx], v); \
} while (0)
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
@ -366,30 +375,6 @@ enum {
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
__be16 opmod;
};
struct mlx5_outbox_hdr {
u8 status;
u8 rsvd[3];
__be32 syndrome;
};
struct mlx5_cmd_set_dc_cnak_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 enable;
u8 reserved[47];
__be64 pa;
};
struct mlx5_cmd_set_dc_cnak_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@ -405,7 +390,6 @@ struct mlx5_cmd_layout {
u8 status_own;
};
struct mlx5_health_buffer {
__be32 assert_var[5];
__be32 rsvd0[3];
@ -736,211 +720,6 @@ struct mlx5_cqe128 {
struct mlx5_cqe64 cqe64;
};
struct mlx5_srq_ctx {
u8 state_log_sz;
u8 rsvd0[3];
__be32 flags_xrcd;
__be32 pgoff_cqn;
u8 rsvd1[4];
u8 log_pg_sz;
u8 rsvd2[7];
__be32 pd;
__be16 lwm;
__be16 wqe_cnt;
u8 rsvd3[8];
__be64 db_record;
};
struct mlx5_create_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_srqn;
u8 rsvd0[4];
struct mlx5_srq_ctx ctx;
u8 rsvd1[208];
__be64 pas[0];
};
struct mlx5_create_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 srqn;
u8 rsvd[4];
};
struct mlx5_destroy_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 srqn;
u8 rsvd[4];
};
struct mlx5_destroy_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 srqn;
u8 rsvd0[4];
};
struct mlx5_query_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_srq_ctx ctx;
u8 rsvd1[32];
__be64 pas[0];
};
struct mlx5_arm_srq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 srqn;
__be16 rsvd;
__be16 lwm;
};
struct mlx5_arm_srq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_cq_context {
u8 status;
u8 cqe_sz_flags;
u8 st;
u8 rsvd3;
u8 rsvd4[6];
__be16 page_offset;
__be32 log_sz_usr_page;
__be16 cq_period;
__be16 cq_max_count;
__be16 rsvd20;
__be16 c_eqn;
u8 log_pg_sz;
u8 rsvd25[7];
__be32 last_notified_index;
__be32 solicit_producer_index;
__be32 consumer_counter;
__be32 producer_counter;
u8 rsvd48[8];
__be64 db_record_addr;
};
struct mlx5_create_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_cqn;
u8 rsvdx[4];
struct mlx5_cq_context ctx;
u8 rsvd6[192];
__be64 pas[0];
};
struct mlx5_create_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_destroy_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_destroy_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_query_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
u8 rsvd0[4];
};
struct mlx5_query_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_cq_context ctx;
u8 rsvd6[16];
__be64 pas[0];
};
struct mlx5_modify_cq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 cqn;
__be32 field_select;
struct mlx5_cq_context ctx;
u8 rsvd[192];
__be64 pas[0];
};
struct mlx5_modify_cq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_eq_context {
u8 status;
u8 ec_oi;
u8 st;
u8 rsvd2[7];
__be16 page_pffset;
__be32 log_sz_usr_page;
u8 rsvd3[7];
u8 intr;
u8 log_page_size;
u8 rsvd4[15];
__be32 consumer_counter;
__be32 produser_counter;
u8 rsvd5[16];
};
struct mlx5_create_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[3];
u8 input_eqn;
u8 rsvd1[4];
struct mlx5_eq_context ctx;
u8 rsvd2[8];
__be64 events_mask;
u8 rsvd3[176];
__be64 pas[0];
};
struct mlx5_create_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[3];
u8 eq_number;
u8 rsvd1[4];
};
struct mlx5_map_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
__be64 mask;
u8 mu;
u8 rsvd0[2];
u8 eqn;
u8 rsvd1[24];
};
struct mlx5_map_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_eq_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[3];
u8 eqn;
u8 rsvd1[4];
};
struct mlx5_query_eq_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
struct mlx5_eq_context ctx;
};
enum {
MLX5_MKEY_STATUS_FREE = 1 << 6,
};
@ -967,123 +746,12 @@ struct mlx5_mkey_seg {
u8 rsvd4[4];
};
struct mlx5_query_special_ctxs_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_query_special_ctxs_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 dump_fill_mkey;
__be32 reserved_lkey;
};
struct mlx5_create_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_mkey_index;
__be32 flags;
struct mlx5_mkey_seg seg;
u8 rsvd1[16];
__be32 xlat_oct_act_size;
__be32 rsvd2;
u8 rsvd3[168];
__be64 pas[0];
};
struct mlx5_create_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 mkey;
u8 rsvd[4];
};
struct mlx5_query_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 mkey;
};
struct mlx5_query_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
__be64 pas[0];
};
struct mlx5_modify_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 mkey;
__be64 pas[0];
};
struct mlx5_modify_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
struct mlx5_dump_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
};
struct mlx5_dump_mkey_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 mkey;
};
struct mlx5_mad_ifc_mbox_in {
struct mlx5_inbox_hdr hdr;
__be16 remote_lid;
u8 rsvd0;
u8 port;
u8 rsvd1[4];
u8 data[256];
};
struct mlx5_mad_ifc_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
u8 data[256];
};
struct mlx5_access_reg_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[2];
__be16 register_id;
__be32 arg;
__be32 data[0];
};
struct mlx5_access_reg_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
__be32 data[0];
};
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
};
struct mlx5_allocate_psv_in {
struct mlx5_inbox_hdr hdr;
__be32 npsv_pd;
__be32 rsvd_psv0;
};
struct mlx5_allocate_psv_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
__be32 psv_idx[4];
};
struct mlx5_destroy_psv_in {
struct mlx5_inbox_hdr hdr;
__be32 psv_number;
u8 rsvd[4];
};
struct mlx5_destroy_psv_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
static inline int mlx5_host_is_le(void)
{
#if defined(__LITTLE_ENDIAN)

View File

@ -41,6 +41,7 @@
#include <dev/mlx5/device.h>
#include <dev/mlx5/doorbell.h>
#include <dev/mlx5/srq.h>
#define MLX5_QCOUNTER_SETS_NETDEV 64
#define MLX5_MAX_NUMBER_OF_VFS 128
@ -856,10 +857,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode);
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@ -883,23 +882,26 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc);
struct mlx5_srq_attr *in);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
struct mlx5_srq_attr *out);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mr_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mr *mkey,
u32 *in, int inlen,
u32 *out, int outlen,
mlx5_cmd_cbk_t callback, void *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mr *mr,
u32 *in, int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey,
u32 *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@ -954,7 +956,7 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_query_eq_mbox_out *out, int outlen);
u32 *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);

View File

@ -75,6 +75,26 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
struct mlx5_ifc_mbox_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_mbox_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
};
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
int uin_size,
@ -588,11 +608,105 @@ const char *mlx5_command_str(int command)
}
}
static const char *cmd_status_str(u8 status)
{
switch (status) {
case MLX5_CMD_STAT_OK:
return "OK";
case MLX5_CMD_STAT_INT_ERR:
return "internal error";
case MLX5_CMD_STAT_BAD_OP_ERR:
return "bad operation";
case MLX5_CMD_STAT_BAD_PARAM_ERR:
return "bad parameter";
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
return "bad system state";
case MLX5_CMD_STAT_BAD_RES_ERR:
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
return "bad resource state";
case MLX5_CMD_STAT_IX_ERR:
return "bad index";
case MLX5_CMD_STAT_NO_RES_ERR:
return "no resources";
case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
return "bad input length";
case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
return "bad output length";
case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
return "bad QP state";
case MLX5_CMD_STAT_BAD_PKT_ERR:
return "bad packet (discarded)";
case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
return "bad size too many outstanding CQEs";
default:
return "unknown status";
}
}
static int cmd_status_to_err_helper(u8 status)
{
switch (status) {
case MLX5_CMD_STAT_OK: return 0;
case MLX5_CMD_STAT_INT_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
default: return -EIO;
}
}
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
{
*status = MLX5_GET(mbox_out, out, status);
*syndrome = MLX5_GET(mbox_out, out, syndrome);
}
static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
{
u32 syndrome;
u8 status;
u16 opcode;
u16 op_mod;
mlx5_cmd_mbox_status(out, &status, &syndrome);
if (!status)
return 0;
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
mlx5_core_err(dev,
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
mlx5_command_str(opcode),
opcode, op_mod,
cmd_status_str(status),
status,
syndrome);
return cmd_status_to_err_helper(status);
}
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
size_t i;
int data_only;
int offset = 0;
@ -654,9 +768,7 @@ static void dump_command(struct mlx5_core_dev *dev,
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
{
struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
return be16_to_cpu(hdr->opcode);
return MLX5_GET(mbox_in, in->first.data, opcode);
}
static void cb_timeout_handler(struct work_struct *work)
@ -676,173 +788,6 @@ static void cb_timeout_handler(struct work_struct *work)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode,
struct mlx5_outbox_hdr *hdr)
{
hdr->status = 0;
hdr->syndrome = 0;
switch (opcode) {
case MLX5_CMD_OP_TEARDOWN_HCA:
case MLX5_CMD_OP_DISABLE_HCA:
case MLX5_CMD_OP_MANAGE_PAGES:
case MLX5_CMD_OP_DESTROY_MKEY:
case MLX5_CMD_OP_DESTROY_EQ:
case MLX5_CMD_OP_DESTROY_CQ:
case MLX5_CMD_OP_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_PSV:
case MLX5_CMD_OP_DESTROY_SRQ:
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
case MLX5_CMD_OP_DESTROY_DCT:
case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
case MLX5_CMD_OP_DEALLOC_PD:
case MLX5_CMD_OP_DEALLOC_UAR:
case MLX5_CMD_OP_DETACH_FROM_MCG:
case MLX5_CMD_OP_DEALLOC_XRCD:
case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
case MLX5_CMD_OP_DESTROY_LAG:
case MLX5_CMD_OP_DESTROY_VPORT_LAG:
case MLX5_CMD_OP_DESTROY_TIR:
case MLX5_CMD_OP_DESTROY_SQ:
case MLX5_CMD_OP_DESTROY_RQ:
case MLX5_CMD_OP_DESTROY_RMP:
case MLX5_CMD_OP_DESTROY_TIS:
case MLX5_CMD_OP_DESTROY_RQT:
case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_2ERR_QP:
case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
case MLX5_CMD_OP_MODIFY_VPORT_STATE:
case MLX5_CMD_OP_MODIFY_SQ:
case MLX5_CMD_OP_MODIFY_RQ:
case MLX5_CMD_OP_MODIFY_TIS:
case MLX5_CMD_OP_MODIFY_LAG:
case MLX5_CMD_OP_MODIFY_TIR:
case MLX5_CMD_OP_MODIFY_RMP:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
case MLX5_CMD_OP_MODIFY_CONG_STATUS:
case MLX5_CMD_OP_MODIFY_CQ:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP:
case MLX5_CMD_OP_ACCESS_REG:
case MLX5_CMD_OP_DRAIN_DCT:
return 0;
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_ALLOC_XRCD:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
case MLX5_CMD_OP_ARM_RQ:
case MLX5_CMD_OP_ARM_XRC_SRQ:
case MLX5_CMD_OP_ATTACH_TO_MCG:
case MLX5_CMD_OP_CONFIG_INT_MODERATION:
case MLX5_CMD_OP_CREATE_CQ:
case MLX5_CMD_OP_CREATE_DCT:
case MLX5_CMD_OP_CREATE_EQ:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_LAG:
case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_CREATE_PSV:
case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
case MLX5_CMD_OP_CREATE_QP:
case MLX5_CMD_OP_CREATE_RMP:
case MLX5_CMD_OP_CREATE_RQ:
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_CREATE_SQ:
case MLX5_CMD_OP_CREATE_SRQ:
case MLX5_CMD_OP_CREATE_TIR:
case MLX5_CMD_OP_CREATE_TIS:
case MLX5_CMD_OP_CREATE_VPORT_LAG:
case MLX5_CMD_OP_CREATE_XRC_SRQ:
case MLX5_CMD_OP_ENABLE_HCA:
case MLX5_CMD_OP_GEN_EQE:
case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
case MLX5_CMD_OP_INIT_HCA:
case MLX5_CMD_OP_MAD_IFC:
case MLX5_CMD_OP_NOP:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
case MLX5_CMD_OP_QUERY_CONG_STATUS:
case MLX5_CMD_OP_QUERY_CQ:
case MLX5_CMD_OP_QUERY_DCT:
case MLX5_CMD_OP_QUERY_EQ:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
case MLX5_CMD_OP_QUERY_ISSI:
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
case MLX5_CMD_OP_QUERY_LAG:
case MLX5_CMD_OP_QUERY_MAD_DEMUX:
case MLX5_CMD_OP_QUERY_MKEY:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
case MLX5_CMD_OP_QUERY_PAGES:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_QUERY_RMP:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_RQ:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
case MLX5_CMD_OP_QUERY_SQ:
case MLX5_CMD_OP_QUERY_SRQ:
case MLX5_CMD_OP_QUERY_TIR:
case MLX5_CMD_OP_QUERY_TIS:
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_QUERY_VPORT_STATE:
case MLX5_CMD_OP_QUERY_XRC_SRQ:
case MLX5_CMD_OP_RST2INIT_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
case MLX5_CMD_OP_SET_HCA_CAP:
case MLX5_CMD_OP_SET_ISSI:
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_SET_MAD_DEMUX:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
hdr->status = MLX5_CMD_STAT_INT_ERR;
hdr->syndrome = 0xFFFFFFFF;
return -ECANCELED;
default:
mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode);
return -EINVAL;
}
}
static void complete_command(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_cmd *cmd = ent->cmd;
@ -863,15 +808,12 @@ static void complete_command(struct mlx5_cmd_work_ent *ent)
sem = &cmd->sem;
if (dev->state != MLX5_DEVICE_STATE_UP) {
struct mlx5_outbox_hdr *out_hdr =
(struct mlx5_outbox_hdr *)ent->out;
struct mlx5_inbox_hdr *in_hdr =
(struct mlx5_inbox_hdr *)(ent->in->first.data);
u16 opcode = be16_to_cpu(in_hdr->opcode);
u8 status = 0;
u32 drv_synd;
ent->ret = set_internal_err_outbox(dev,
opcode,
out_hdr);
ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
MLX5_SET(mbox_out, ent->out, status, status);
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
}
if (ent->callback) {
@ -887,10 +829,14 @@ static void complete_command(struct mlx5_cmd_work_ent *ent)
callback = ent->callback;
context = ent->context;
err = ent->ret;
if (!err)
if (!err) {
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
err = err ? err : mlx5_cmd_check(dev,
ent->in->first.data,
ent->uout);
}
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
@ -1014,16 +960,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
return err;
}
static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
{
return &out->syndrome;
}
static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
{
return &out->status;
}
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@ -1070,7 +1006,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out;
ds = ent->ts2 - ent->ts1;
op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
op = MLX5_GET(mbox_in, in->first.data, opcode);
if (op < ARRAY_SIZE(cmd->stats)) {
stats = &cmd->stats[op];
spin_lock_irq(&stats->lock);
@ -1315,14 +1251,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
return msg;
}
static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
static int is_manage_pages(void *in)
{
return be16_to_cpu(in->opcode);
}
static int is_manage_pages(struct mlx5_inbox_hdr *in)
{
return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
}
static int cmd_exec_helper(struct mlx5_core_dev *dev,
@ -1340,9 +1271,10 @@ static int cmd_exec_helper(struct mlx5_core_dev *dev,
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
*get_synd_ptr(out) = cpu_to_be32(drv_synd);
*get_status_ptr(out) = status;
u16 opcode = MLX5_GET(mbox_in, in, opcode);
err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
MLX5_SET(mbox_out, out, status, status);
MLX5_SET(mbox_out, out, syndrome, drv_synd);
return err;
}
@ -1396,7 +1328,10 @@ static int cmd_exec_helper(struct mlx5_core_dev *dev,
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL);
int err;
err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL);
return err ? : mlx5_cmd_check(dev, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
@ -1631,94 +1566,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
free_cmd_page(dev, cmd);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
static const char *cmd_status_str(u8 status)
{
switch (status) {
case MLX5_CMD_STAT_OK:
return "OK";
case MLX5_CMD_STAT_INT_ERR:
return "internal error";
case MLX5_CMD_STAT_BAD_OP_ERR:
return "bad operation";
case MLX5_CMD_STAT_BAD_PARAM_ERR:
return "bad parameter";
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
return "bad system state";
case MLX5_CMD_STAT_BAD_RES_ERR:
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
return "bad resource state";
case MLX5_CMD_STAT_IX_ERR:
return "bad index";
case MLX5_CMD_STAT_NO_RES_ERR:
return "no resources";
case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
return "bad input length";
case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
return "bad output length";
case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
return "bad QP state";
case MLX5_CMD_STAT_BAD_PKT_ERR:
return "bad packet (discarded)";
case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
return "bad size too many outstanding CQEs";
default:
return "unknown status";
}
}
static int cmd_status_to_err_helper(u8 status)
{
switch (status) {
case MLX5_CMD_STAT_OK: return 0;
case MLX5_CMD_STAT_INT_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
default: return -EIO;
}
}
/* this will be available till all the commands use set/get macros */
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
{
if (!hdr->status)
return 0;
printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome));
return cmd_status_to_err_helper(hdr->status);
}
int mlx5_cmd_status_to_err_v2(void *ptr)
{
u32 syndrome;
u8 status;
status = be32_to_cpu(*(__be32 *)ptr) >> 24;
if (!status)
return 0;
syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome);
return cmd_status_to_err_helper(status);
}

View File

@ -78,21 +78,6 @@ void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5e_init(void);
void mlx5e_cleanup(void);
static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
int in_size, u32 *out,
int out_size)
{
int err;
err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
if (err) {
return err;
}
err = mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
return err;
}
int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name);
#endif /* __MLX5_CORE_H__ */

View File

@ -100,24 +100,20 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_in *in, int inlen)
u32 *in, int inlen)
{
int err;
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_create_cq_mbox_out out;
struct mlx5_destroy_cq_mbox_in din;
struct mlx5_destroy_cq_mbox_out dout;
u32 out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
int err;
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
memset(&out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
@ -143,11 +139,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
din.cqn = cpu_to_be32(cq->cqn);
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL(mlx5_core_create_cq);
@ -155,8 +149,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
struct mlx5_destroy_cq_mbox_in in;
struct mlx5_destroy_cq_mbox_out out;
u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
struct mlx5_core_cq *tmp;
int err;
@ -181,19 +175,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
return -EINVAL;
}
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
goto out;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
synchronize_irq(cq->irqn);
if (atomic_dec_and_test(&cq->refcount))
@ -207,44 +194,25 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
EXPORT_SYMBOL(mlx5_core_destroy_cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out)
u32 *out, int outlen)
{
struct mlx5_query_cq_mbox_in in;
int err;
u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
memset(&in, 0, sizeof(in));
memset(out, 0, sizeof(*out));
MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
MLX5_SET(query_cq_in, in, cqn, cq->cqn);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_cq);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz)
u32 *in, int inlen)
{
struct mlx5_modify_cq_mbox_out out;
int err;
u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
@ -253,17 +221,18 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
u16 cq_period,
u16 cq_max_count)
{
struct mlx5_modify_cq_mbox_in in;
u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
void *cqc;
memset(&in, 0, sizeof(in));
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, cq_period, cq_period);
MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.modify_field_select.modify_field_select,
MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
in.cqn = cpu_to_be32(cq->cqn);
in.ctx.cq_period = cpu_to_be16(cq_period);
in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
MLX5_CQ_MODIFY_COUNT);
return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
@ -272,20 +241,19 @@ int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
u16 cq_max_count,
u8 cq_mode)
{
struct mlx5_modify_cq_mbox_in in;
u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
void *cqc;
memset(&in, 0, sizeof(in));
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, cq_period, cq_period);
MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
MLX5_SET(cqc, cqc, cq_period_mode, cq_mode);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.modify_field_select.modify_field_select,
MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
in.cqn = cpu_to_be32(cq->cqn);
in.ctx.cq_period = cpu_to_be16(cq_period);
in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
in.ctx.cqe_sz_flags = (cq_mode & 2) >> 1;
in.ctx.st = (cq_mode & 1) << 7;
in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
MLX5_CQ_MODIFY_COUNT |
MLX5_CQ_MODIFY_PERIOD_MODE);
return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
int mlx5_init_cq_table(struct mlx5_core_dev *dev)

View File

@ -85,17 +85,13 @@ static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_eq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
@ -391,11 +387,13 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
__be64 *pas;
void *eqc;
int inlen;
u32 *in;
int err;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@ -406,32 +404,32 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
init_eq_buf(eq);
inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
in = mlx5_vzalloc(inlen);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
memset(&out, 0, sizeof(out));
mlx5_fill_page_array(&eq->buf, in->pas);
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
mlx5_fill_page_array(&eq->buf, pas);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
in->ctx.intr = vecidx;
in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->events_mask = cpu_to_be64(mask);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
MLX5_SET64(create_eq_in, in, event_bitmask, mask);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
MLX5_SET(eqc, eqc, uar_page, uar->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
goto err_in;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
goto err_in;
}
eq->eqn = out.eq_number;
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
@ -585,25 +583,16 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_query_eq_mbox_out *out, int outlen)
u32 *out, int outlen)
{
struct mlx5_query_eq_mbox_in in;
int err;
u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
in.eqn = eq->eqn;
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
if (out->hdr.status)
err = mlx5_cmd_status_to_err(&out->hdr);
return err;
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)

View File

@ -38,21 +38,18 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type,
unsigned int id)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, type);
MLX5_SET(set_flow_table_root_in, in, table_id, id);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
@ -60,13 +57,12 @@ int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id)
{
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
int err;
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
@ -80,28 +76,22 @@ int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
return 0;
return err;
}
int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@ -112,7 +102,7 @@ int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
@ -121,12 +111,11 @@ int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
enum fs_ft_type type, unsigned int table_id,
unsigned int *group_id)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int err;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
if (!dev)
return -EINVAL;
memset(out, 0, sizeof(out));
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
@ -137,9 +126,7 @@ int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out, group_id);
@ -151,13 +138,11 @@ int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
enum fs_ft_type type, unsigned int table_id,
unsigned int group_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
@ -169,7 +154,7 @@ int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
@ -182,7 +167,7 @@ int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
unsigned short action, int dest_size,
struct list_head *dests) /* mlx5_flow_desination */
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
u32 *in;
unsigned int inlen;
struct mlx5_flow_rule *dst;
@ -256,9 +241,8 @@ int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
}
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*fte_status |= FS_FTE_STATUS_EXISTING;
@ -273,8 +257,8 @@ int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
enum fs_ft_type type, unsigned int table_id,
unsigned int index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
int err;
if (!(*fte_status & FS_FTE_STATUS_EXISTING))
@ -282,8 +266,6 @@ int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, type);
@ -294,7 +276,7 @@ int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*fte_status = 0;

View File

@ -39,7 +39,7 @@ static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
return err;
}
@ -100,8 +100,7 @@ static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@ -115,54 +114,30 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
return err;
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, pg)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, atomic)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, roce)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
if (err)
return err;
}
@ -172,79 +147,44 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, nic_flow_table)) ||
(MLX5_CAP_GEN(dev, port_type) == MLX5_CMD_HCA_CAP_PORT_TYPE_IB &&
MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) {
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
if (err)
return err;
}
if (
MLX5_CAP_GEN(dev, eswitch_flow_table)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, vport_group_manager)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, snapshot)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, debug)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, qos)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
HCA_CAP_OPMOD_GET_MAX);
err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
if (err)
return err;
}
@ -266,48 +206,31 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)];
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
u64 addr)
{
struct mlx5_cmd_set_dc_cnak_mbox_in *in;
struct mlx5_cmd_set_dc_cnak_mbox_out out;
int err;
u32 in[MLX5_ST_SZ_DW(set_dc_cnak_trace_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_dc_cnak_trace_out)] = {0};
__be64 be_addr;
void *pas;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(set_dc_cnak_trace_in, in, opcode, MLX5_CMD_OP_SET_DC_CNAK_TRACE);
MLX5_SET(set_dc_cnak_trace_in, in, enable, enable);
pas = MLX5_ADDR_OF(set_dc_cnak_trace_in, in, pas);
be_addr = cpu_to_be64(addr);
memcpy(MLX5_ADDR_OF(cmd_pas, pas, pa_h), &be_addr, sizeof(be_addr));
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_DC_CNAK_TRACE);
in->enable = !!enable << 7;
in->pa = cpu_to_be64(addr);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
if (err)
goto out;
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
out:
kfree(in);
return err;
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}

View File

@ -33,30 +33,33 @@
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;
struct mlx5_mad_ifc_mbox_out *out = NULL;
int err;
int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
int err = -ENOMEM;
void *data;
void *resp;
u32 *out;
u32 *in;
in = kzalloc(sizeof(*in), GFP_KERNEL);
in = kzalloc(inlen, GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!in || !out)
goto out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
MLX5_SET(mad_ifc_in, in, op_mod, opmod);
MLX5_SET(mad_ifc_in, in, port, port);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
in->hdr.opmod = cpu_to_be16(opmod);
in->port = port;
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
memcpy(in->data, inb, sizeof(in->data));
err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto out;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out;
}
memcpy(outb, out->data, sizeof(out->data));
resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
memcpy(outb, resp,
MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
out:
kfree(out);

View File

@ -310,8 +310,9 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@ -325,10 +326,6 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto query_ex;
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
mlx5_core_warn(dev,
"QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
@ -359,21 +356,24 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
return err;
}
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
{
int ret;
ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
if (ret)
return ret;
return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
}
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
{
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
int err;
memset(out, 0, sizeof(out));
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
if (err)
return err;
err = mlx5_cmd_status_to_err_v2(out);
return err;
return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
}
static int handle_hca_cap(struct mlx5_core_dev *dev)
@ -386,11 +386,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
set_ctx = kzalloc(set_sz, GFP_KERNEL);
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
goto query_ex;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
goto query_ex;
@ -434,13 +430,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
int err;
if (MLX5_CAP_GEN(dev, atomic)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
HCA_CAP_OPMOD_GET_MAX);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
HCA_CAP_OPMOD_GET_CUR);
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
if (err)
return err;
} else {
@ -492,48 +482,38 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
memset(in, 0, sizeof(in));
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
memset(in, 0, sizeof(in));
u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
{
u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
int err;
u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
u32 sup_issi;
memset(query_in, 0, sizeof(query_in));
memset(query_out, 0, sizeof(query_out));
int err;
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
query_out, sizeof(query_out));
err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out));
if (err) {
if (((struct mlx5_outbox_hdr *)query_out)->status ==
MLX5_CMD_STAT_BAD_OP_ERR) {
u32 syndrome;
u8 status;
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
pr_debug("Only ISSI 0 is supported\n");
return 0;
}
@ -545,16 +525,15 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
if (sup_issi & (1 << 1)) {
memset(set_in, 0, sizeof(set_in));
memset(set_out, 0, sizeof(set_out));
u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
MLX5_SET(set_issi_in, set_in, current_issi, 1);
err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
set_out, sizeof(set_out));
err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out));
if (err) {
printf("mlx5_core: ERR: ""failed to set ISSI=1\n");
printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err);
return err;
}

View File

@ -33,36 +33,28 @@
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)];
u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
memcpy(MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid), mgid,
sizeof(*mgid));
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_attach_mcg);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
{
u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)];
u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
memcpy(MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid), mgid,
sizeof(*mgid));
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_detach_mcg);

View File

@ -43,165 +43,152 @@ void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
{
}
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out)
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mr *mkey,
u32 *in, int inlen,
u32 *out, int outlen,
mlx5_cmd_cbk_t callback, void *context)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_create_mkey_mbox_out lout;
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
u32 mkey_index;
void *mkc;
unsigned long flags;
int err;
u8 key;
memset(&lout, 0, sizeof(lout));
spin_lock_irq(&dev->priv.mkey_lock);
key = dev->priv.mkey_key++;
spin_unlock_irq(&dev->priv.mkey_lock);
in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
if (callback) {
err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
callback, context);
return err;
} else {
err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
}
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
MLX5_SET(mkc, mkc, mkey_7_0, key);
if (callback)
return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
callback, context);
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
if (err) {
mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err;
}
if (lout.hdr.status) {
mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
return mlx5_cmd_status_to_err(&lout.hdr);
}
mr->iova = be64_to_cpu(in->seg.start_addr);
mr->size = be64_to_cpu(in->seg.len);
mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
mkey->pd = MLX5_GET(mkc, mkc, pd);
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
be32_to_cpu(lout.mkey), key, mr->key);
mkey_index, key, mkey->key);
/* connect to MR tree */
spin_lock_irqsave(&table->lock, flags);
err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mr->key), mr);
err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mkey->key), mkey);
spin_unlock_irqrestore(&table->lock, flags);
if (err) {
mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
mr->key, err);
mlx5_core_destroy_mkey(dev, mr);
mkey->key, err);
mlx5_core_destroy_mkey(dev, mkey);
}
return err;
}
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mr *mkey,
u32 *in, int inlen)
{
return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
NULL, 0, NULL, NULL);
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)];
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)];
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
struct mlx5_core_mr *deleted_mr;
unsigned long flags;
memset(in, 0, sizeof(in));
spin_lock_irqsave(&table->lock, flags);
deleted_mr = radix_tree_delete(&table->tree, mlx5_mkey_to_idx(mr->key));
deleted_mr = radix_tree_delete(&table->tree, mlx5_mkey_to_idx(mkey->key));
spin_unlock_irqrestore(&table->lock, flags);
if (!deleted_mr) {
mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", mr->key);
mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", mkey->key);
return -ENOENT;
}
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->key));
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen)
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mkey,
u32 *out, int outlen)
{
struct mlx5_query_mkey_mbox_in in;
int err;
u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *_mkey,
u32 *mkey)
{
struct mlx5_query_special_ctxs_mbox_in in;
struct mlx5_query_special_ctxs_mbox_out out;
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
*mkey = be32_to_cpu(out.dump_fill_mkey);
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out, dump_fill_mkey);
return err;
}
EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
static inline u32 mlx5_get_psv(u32 *out, int psv_index)
{
switch (psv_index) {
case 1: return MLX5_GET(create_psv_out, out, psv1_index);
case 2: return MLX5_GET(create_psv_out, out, psv2_index);
case 3: return MLX5_GET(create_psv_out, out, psv3_index);
default: return MLX5_GET(create_psv_out, out, psv0_index);
}
}
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index)
{
struct mlx5_allocate_psv_in in;
struct mlx5_allocate_psv_out out;
u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0};
int i, err;
if (npsvs > MLX5_MAX_PSVS)
return -EINVAL;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
MLX5_SET(create_psv_in, in, pd, pdn);
MLX5_SET(create_psv_in, in, num_psv, npsvs);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(dev, "cmd exec failed %d\n", err);
mlx5_core_err(dev, "create_psv cmd exec failed %d\n", err);
return err;
}
if (out.hdr.status) {
mlx5_core_err(dev, "create_psv bad status %d\n",
out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
for (i = 0; i < npsvs; i++)
sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
sig_index[i] = mlx5_get_psv(out, i);
return err;
}
@ -209,29 +196,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
{
struct mlx5_destroy_psv_in in;
struct mlx5_destroy_psv_out out;
int err;
u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.psv_number = cpu_to_be32(psv_num);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err) {
mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
goto out;
}
if (out.hdr.status) {
mlx5_core_err(dev, "destroy_psv bad status %d\n",
out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
out:
return err;
MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
MLX5_SET(destroy_psv_in, in, psvn, psv_num);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_psv);

View File

@ -41,21 +41,6 @@ struct mlx5_pages_req {
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
__be16 rsvd;
__be16 func_id;
__be32 num_entries;
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
__be32 num_entries;
u8 rsvd[4];
__be64 pas[0];
};
enum {
MAX_RECLAIM_TIME_MSECS = 5000,
};
@ -310,18 +295,16 @@ free_4k(struct mlx5_core_dev *dev, u64 addr)
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
u32 in[MLX5_ST_SZ_DW(query_pages_in)];
u32 out[MLX5_ST_SZ_DW(query_pages_out)];
u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
MLX5_SET(query_pages_in, in, op_mod,
boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
MLX5_SET(query_pages_in, in, op_mod, boot ?
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@ -334,35 +317,34 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail)
{
struct mlx5_manage_pages_inbox *in;
struct mlx5_manage_pages_outbox out;
struct mlx5_manage_pages_inbox *nin;
int inlen;
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
int err;
u32 *in, *nin;
int i = 0;
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
err = -ENOMEM;
goto out_alloc;
}
memset(&out, 0, sizeof(out));
for (i = 0; i < npages; i++) {
err = alloc_4k(dev, &addr, func_id);
if (err)
goto out_alloc;
in->pas[i] = cpu_to_be64(addr);
MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
}
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
@ -371,37 +353,28 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
dev->priv.fw_pages += npages;
dev->priv.pages_per_func[func_id] += npages;
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
func_id, npages, out.hdr.status);
goto out_alloc;
}
}
mlx5_core_dbg(dev, "err %d\n", err);
goto out_free;
out_alloc:
if (notify_fail) {
nin = kzalloc(sizeof(*nin), GFP_KERNEL);
nin = mlx5_vzalloc(inlen);
if (!nin)
goto out_4k;
memset(&out, 0, sizeof(out));
nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
nin->func_id = cpu_to_be16(func_id);
if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
MLX5_SET(manage_pages_in, nin, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, nin, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, nin, function_id, func_id);
if (mlx5_cmd_exec(dev, nin, inlen, out, sizeof(out)))
mlx5_core_warn(dev, "page notify failed\n");
kfree(nin);
kvfree(nin);
}
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, be64_to_cpu(in->pas[i]));
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
out_free:
kvfree(in);
return err;
@ -410,49 +383,41 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
struct mlx5_manage_pages_inbox in;
struct mlx5_manage_pages_outbox *out;
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
int num_claimed;
int outlen;
u64 addr;
u32 *out;
int err;
int i;
if (nclaimed)
*nclaimed = 0;
memset(&in, 0, sizeof(in));
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = mlx5_vzalloc(outlen);
if (!out)
return -ENOMEM;
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free;
}
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out_free;
}
num_claimed = be32_to_cpu(out->num_entries);
num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
if (nclaimed)
*nclaimed = num_claimed;
dev->priv.fw_pages -= num_claimed;
dev->priv.pages_per_func[func_id] -= num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
out_free:
kvfree(out);
@ -548,8 +513,8 @@ static int optimal_reclaimed_pages(void)
int ret;
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
sizeof(struct mlx5_manage_pages_outbox)) /
FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
MLX5_ST_SZ_BYTES(manage_pages_out)) /
MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
return ret;
}

View File

@ -32,16 +32,13 @@
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
{
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)];
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)];
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@ -52,16 +49,12 @@ EXPORT_SYMBOL(mlx5_core_alloc_pd);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0};
u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_dealloc_pd);

View File

@ -33,43 +33,39 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write)
{
struct mlx5_access_reg_mbox_in *in = NULL;
struct mlx5_access_reg_mbox_out *out = NULL;
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
int err = -ENOMEM;
u32 *out = NULL;
u32 *in = NULL;
void *data;
in = mlx5_vzalloc(sizeof(*in) + size_in);
if (!in)
return -ENOMEM;
in = mlx5_vzalloc(inlen);
out = mlx5_vzalloc(outlen);
if (!in || !out)
goto out;
out = mlx5_vzalloc(sizeof(*out) + size_out);
if (!out)
goto ex1;
data = MLX5_ADDR_OF(access_register_in, in, register_data);
memcpy(data, data_in, size_in);
memcpy(in->data, data_in, size_in);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
in->hdr.opmod = cpu_to_be16(!write);
in->arg = cpu_to_be32(arg);
in->register_id = cpu_to_be16(reg_num);
err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
sizeof(*out) + size_out);
MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG);
MLX5_SET(access_register_in, in, op_mod, !write);
MLX5_SET(access_register_in, in, argument, arg);
MLX5_SET(access_register_in, in, register_id, reg_num);
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
goto ex2;
goto out;
data = MLX5_ADDR_OF(access_register_out, out, register_data);
memcpy(data_out, data, size_out);
if (out->hdr.status)
err = mlx5_cmd_status_to_err(&out->hdr);
if (!err)
memcpy(data_out, out->data, size_out);
ex2:
out:
kvfree(out);
ex1:
kvfree(in);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@ -165,8 +161,8 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
u32 eth_proto_admin, int proto_mask)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u8 an_disable_cap;
u8 an_disable_status;
int err;
@ -178,8 +174,6 @@ int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
if (!an_disable_cap)
return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, an_disable_admin, disable);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
@ -231,12 +225,10 @@ EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
if (proto_mask == MLX5_PTYS_EN)
@ -253,12 +245,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
int mlx5_set_port_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(paos_reg, in, local_port, 1);
MLX5_SET(paos_reg, in, admin_status, status);
@ -271,12 +261,10 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev,
int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(paos_reg)];
u32 out[MLX5_ST_SZ_DW(paos_reg)];
u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(paos_reg)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(paos_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
@ -308,12 +296,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
int *admin_mtu, int *max_mtu, int *oper_mtu)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(pmtu_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
@ -333,10 +319,8 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
MLX5_SET(pmtu_reg, in, local_port, 1);
@ -355,11 +339,8 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
u32 rx_pause, u32 tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
MLX5_SET(pfcc_reg, in, local_port, port);
MLX5_SET(pfcc_reg, in, pptx, tx_pause);
@ -372,13 +353,10 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
u32 *rx_pause, u32 *tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(pfcc_reg, in, local_port, port);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
@ -461,34 +439,27 @@ EXPORT_SYMBOL_GPL(mlx5_is_wol_supported);
int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode)
{
u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_wol);
int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout)
{
u32 in[MLX5_ST_SZ_DW(query_delay_drop_params_in)];
u32 out[MLX5_ST_SZ_DW(query_delay_drop_params_out)];
u32 in[MLX5_ST_SZ_DW(query_delay_drop_params_in)] = {0};
u32 out[MLX5_ST_SZ_DW(query_delay_drop_params_out)] = {0};
int err = 0;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(query_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@ -501,18 +472,14 @@ EXPORT_SYMBOL_GPL(mlx5_query_dropless_mode);
int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout)
{
u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)];
u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0};
MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout, timeout);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_dropless_mode);
@ -520,13 +487,10 @@ int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
struct mlx5_pvlc_reg *pvlc, int write)
{
int sz = MLX5_ST_SZ_BYTES(pvlc_reg);
u8 in[MLX5_ST_SZ_BYTES(pvlc_reg)];
u8 out[MLX5_ST_SZ_BYTES(pvlc_reg)];
u8 in[MLX5_ST_SZ_BYTES(pvlc_reg)] = {0};
u8 out[MLX5_ST_SZ_BYTES(pvlc_reg)] = {0};
int err;
memset(out, 0, sizeof(out));
memset(in, 0, sizeof(in));
MLX5_SET(pvlc_reg, in, local_port, pvlc->local_port);
if (write)
MLX5_SET(pvlc_reg, in, vl_admin, pvlc->vl_admin);
@ -680,13 +644,11 @@ EXPORT_SYMBOL_GPL(mlx5_core_access_pmtu);
int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
{
u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
int lane = 0;
int err;
memset(in, 0, sizeof(in));
MLX5_SET(pmlp_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
@ -705,13 +667,12 @@ int mlx5_query_eeprom(struct mlx5_core_dev *dev,
int i2c_addr, int page_num, int device_addr,
int size, int module_num, u32 *data, int *size_read)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(mcia_reg)] = {0};
u32 *ptr = (u32 *)MLX5_ADDR_OF(mcia_reg, out, dword_0);
int status;
int err;
memset(in, 0, sizeof(in));
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
MLX5_SET(mcia_reg, in, l, 0);
@ -738,18 +699,15 @@ EXPORT_SYMBOL_GPL(mlx5_query_eeprom);
int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port)
{
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(dev, "Failed %s, port %u, err - %d",
mlx5_command_str(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT),
@ -761,18 +719,15 @@ int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port)
int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port)
{
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(dev, "Failed %s, port %u, err - %d",
mlx5_command_str(MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT),
@ -784,16 +739,13 @@ int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port)
int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode)
{
u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0};
u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
@ -805,13 +757,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_wol);
int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
int priority, int *is_enable)
{
u32 in[MLX5_ST_SZ_DW(query_cong_status_in)];
u32 out[MLX5_ST_SZ_DW(query_cong_status_out)];
u32 in[MLX5_ST_SZ_DW(query_cong_status_in)] = {0};
u32 out[MLX5_ST_SZ_DW(query_cong_status_out)] = {0};
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
*is_enable = 0;
MLX5_SET(query_cong_status_in, in, opcode,
@ -819,8 +768,7 @@ int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
MLX5_SET(query_cong_status_in, in, cong_protocol, protocol);
MLX5_SET(query_cong_status_in, in, priority, priority);
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*is_enable = MLX5_GET(query_cong_status_out, out, enable);
return err;
@ -829,11 +777,8 @@ int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
int priority, int enable)
{
u32 in[MLX5_ST_SZ_DW(modify_cong_status_in)];
u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(modify_cong_status_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)] = {0};
MLX5_SET(modify_cong_status_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_STATUS);
@ -841,23 +786,19 @@ int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
MLX5_SET(modify_cong_status_in, in, priority, priority);
MLX5_SET(modify_cong_status_in, in, enable, enable);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol,
void *out, int out_size)
{
u32 in[MLX5_ST_SZ_DW(query_cong_params_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {0};
MLX5_SET(query_cong_params_in, in, opcode,
MLX5_CMD_OP_QUERY_CONG_PARAMS);
MLX5_SET(query_cong_params_in, in, cong_protocol, protocol);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, out_size);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
}
static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
@ -988,51 +929,42 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size)
{
u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)];
memset(out, 0, sizeof(out));
u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = {0};
MLX5_SET(modify_cong_params_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
return mlx5_cmd_exec_check_status(mdev, in, in_size, out, sizeof(out));
return mlx5_cmd_exec(mdev, in, in_size, out, sizeof(out));
}
int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
void *out, int out_size)
{
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {0};
MLX5_SET(query_cong_statistics_in, in, opcode,
MLX5_CMD_OP_QUERY_CONG_STATISTICS);
MLX5_SET(query_cong_statistics_in, in, clear, clear);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, out_size);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
}
int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in,
int in_size)
{
u32 out[MLX5_ST_SZ_DW(set_diagnostic_params_out)];
memset(out, 0, sizeof(out));
u32 out[MLX5_ST_SZ_DW(set_diagnostic_params_out)] = {0};
MLX5_SET(set_diagnostic_params_in, in, opcode,
MLX5_CMD_OP_SET_DIAGNOSTICS);
return mlx5_cmd_exec_check_status(mdev, in, in_size, out, sizeof(out));
return mlx5_cmd_exec(mdev, in, in_size, out, sizeof(out));
}
int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev,
u8 num_of_samples, u16 sample_index,
void *out, int out_size)
{
u32 in[MLX5_ST_SZ_DW(query_diagnostic_counters_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_diagnostic_counters_in)] = {0};
MLX5_SET(query_diagnostic_counters_in, in, opcode,
MLX5_CMD_OP_QUERY_DIAGNOSTICS);
@ -1040,5 +972,5 @@ int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev,
num_of_samples);
MLX5_SET(query_diagnostic_counters_in, in, sample_index, sample_index);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, out_size);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
}

View File

@ -120,30 +120,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
int inlen)
u32 *in, int inlen)
{
struct mlx5_create_qp_mbox_out out;
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
int err;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
return err;
}
if (out.hdr.status) {
mlx5_core_warn(dev, "current num of QPs 0x%x\n",
atomic_read(&dev->num_qps));
return mlx5_cmd_status_to_err(&out.hdr);
}
qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
@ -155,12 +145,9 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
din.qpn = cpu_to_be32(qp->qpn);
mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
@ -168,44 +155,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
struct mlx5_destroy_qp_mbox_in in;
struct mlx5_destroy_qp_mbox_out out;
u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
int err;
destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
in.qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
atomic_dec(&dev->num_qps);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
struct mbox_info {
u32 *in;
u32 *out;
int inlen;
int outlen;
};
static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
{
mbox->inlen = inlen;
mbox->outlen = outlen;
mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
if (!mbox->in || !mbox->out) {
kfree(mbox->in);
kfree(mbox->out);
return -ENOMEM;
}
return 0;
}
static void mbox_free(struct mbox_info *mbox)
{
kfree(mbox->in);
kfree(mbox->out);
}
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc,
struct mbox_info *mbox)
{
mbox->out = NULL;
mbox->in = NULL;
#define MBOX_ALLOC(mbox, typ) \
mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
MLX5_SET(typ##_in, in, opcode, _opcode); \
MLX5_SET(typ##_in, in, qpn, _qpn)
#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
switch (opcode) {
/* 2RST & 2ERR */
case MLX5_CMD_OP_2RST_QP:
if (MBOX_ALLOC(mbox, qp_2rst))
return -ENOMEM;
MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
break;
case MLX5_CMD_OP_2ERR_QP:
if (MBOX_ALLOC(mbox, qp_2err))
return -ENOMEM;
MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
break;
/* MODIFY with QPC */
case MLX5_CMD_OP_RST2INIT_QP:
if (MBOX_ALLOC(mbox, rst2init_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc);
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc);
break;
case MLX5_CMD_OP_INIT2INIT_QP:
if (MBOX_ALLOC(mbox, init2init_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc);
break;
default:
mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
opcode, qpn);
return -EINVAL;
}
return 0;
}
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp)
{
struct mlx5_modify_qp_mbox_out out;
int err = 0;
struct mbox_info mbox;
int err;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(operation);
in->qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
opt_param_mask, qpc, &mbox);
if (err)
return err;
return mlx5_cmd_status_to_err(&out.hdr);
err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
mbox_free(&mbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
@ -223,86 +311,63 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
}
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
struct mlx5_query_qp_mbox_out *out, int outlen)
u32 *out, int outlen)
{
struct mlx5_query_qp_mbox_in in;
int err;
u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
memset(&in, 0, sizeof(in));
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
in.qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return 0;
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct,
struct mlx5_create_dct_mbox_in *in)
u32 *in)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_create_dct_mbox_out out;
struct mlx5_destroy_dct_mbox_in din;
struct mlx5_destroy_dct_mbox_out dout;
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_dct_in);
int err;
init_completion(&dct->drained);
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "create DCT failed, ret %d", err);
return err;
}
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;
dct->dctn = MLX5_GET(create_dct_out, out, dctn);
dct->common.res = MLX5_RES_DCT;
spin_lock_irq(&table->lock);
@ -320,10 +385,8 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
din.dctn = cpu_to_be32(dct->dctn);
MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, din, dctn, dct->dctn);
mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
return err;
@ -333,41 +396,37 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct)
{
struct mlx5_drain_dct_mbox_out out;
struct mlx5_drain_dct_mbox_in in;
int err;
u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DRAIN_DCT);
in.dctn = cpu_to_be32(dct->dctn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, dct->dctn);
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
}
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_destroy_dct_mbox_out out;
struct mlx5_destroy_dct_mbox_in in;
u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
unsigned long flags;
int err;
err = mlx5_core_drain_dct(dev, dct);
if (err) {
mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
return err;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
goto free_dct;
} else {
mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
return err;
}
}
wait_for_completion(&dct->drained);
free_dct:
spin_lock_irqsave(&table->lock, flags);
if (radix_tree_delete(&table->tree, dct->dctn) != dct)
mlx5_core_warn(dev, "dct delete differs\n");
@ -377,61 +436,37 @@ int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
complete(&dct->common.free);
wait_for_completion(&dct->common.free);
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
in.dctn = cpu_to_be32(dct->dctn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, dct->dctn);
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return 0;
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
struct mlx5_query_dct_mbox_out *out)
u32 *out, int outlen)
{
struct mlx5_query_dct_mbox_in in;
int err;
u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
memset(&in, 0, sizeof(in));
memset(out, 0, sizeof(*out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_DCT);
in.dctn = cpu_to_be32(dct->dctn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
MLX5_SET(query_dct_in, in, dctn, dct->dctn);
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)out, outlen);
}
EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
{
struct mlx5_arm_dct_mbox_out out;
struct mlx5_arm_dct_mbox_in in;
int err;
u32 out[MLX5_ST_SZ_DW(arm_dct_out)] = {0};
u32 in[MLX5_ST_SZ_DW(arm_dct_in)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
MLX5_SET(arm_dct_in, in, opcode, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
MLX5_SET(arm_dct_in, in, dctn, dct->dctn);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
in.dctn = cpu_to_be32(dct->dctn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);

View File

@ -57,54 +57,56 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
static void set_wq(void *wq, struct mlx5_srq_attr *in)
{
void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(wq, wq, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
MLX5_SET(wq, wq, log_wq_sz, in->log_size);
MLX5_SET(wq, wq, page_offset, in->page_offset);
MLX5_SET(wq, wq, lwm, in->lwm);
MLX5_SET(wq, wq, pd, in->pd);
MLX5_SET64(wq, wq, dbr_addr, in->db_record);
}
if (srqc_to_rmpc) {
switch (MLX5_GET(srqc, srqc, state)) {
case MLX5_SRQC_STATE_GOOD:
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
break;
case MLX5_SRQC_STATE_ERROR:
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
break;
default:
printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state));
}
static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
{
MLX5_SET(srqc, srqc, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
MLX5_SET(srqc, srqc, page_offset, in->page_offset);
MLX5_SET(srqc, srqc, lwm, in->lwm);
MLX5_SET(srqc, srqc, pd, in->pd);
MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
MLX5_SET(srqc, srqc, xrcd, in->xrcd);
MLX5_SET(srqc, srqc, cqn, in->cqn);
}
MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
MLX5_SET64(wq, wq, dbr_addr,
((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 |
((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2);
} else {
switch (MLX5_GET(rmpc, rmpc, state)) {
case MLX5_RMPC_STATE_RDY:
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
break;
case MLX5_RMPC_STATE_ERR:
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
break;
default:
printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state));
}
static void get_wq(void *wq, struct mlx5_srq_attr *in)
{
if (MLX5_GET(wq, wq, wq_signature))
in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
in->log_size = MLX5_GET(wq, wq, log_wq_sz);
in->page_offset = MLX5_GET(wq, wq, page_offset);
in->lwm = MLX5_GET(wq, wq, lwm);
in->pd = MLX5_GET(wq, wq, pd);
in->db_record = MLX5_GET64(wq, wq, dbr_addr);
}
MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32);
MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff);
}
static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
{
if (MLX5_GET(srqc, srqc, wq_signature))
in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
in->page_offset = MLX5_GET(srqc, srqc, page_offset);
in->lwm = MLX5_GET(srqc, srqc, lwm);
in->pd = MLX5_GET(srqc, srqc, pd);
in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
@ -124,12 +126,12 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
static int get_pas_size(void *srqc)
static int get_pas_size(struct mlx5_srq_attr *in)
{
u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
u32 log_page_size = in->log_page_size + 12;
u32 log_srq_size = in->log_size;
u32 log_rq_stride = in->wqe_shift;
u32 page_offset = in->page_offset;
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
@ -141,26 +143,27 @@ static int get_pas_size(void *srqc)
}
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int srq_inlen)
struct mlx5_srq_attr *in)
{
void *create_in;
void *rmpc;
void *srqc;
void *wq;
int pas_size;
int inlen;
int err;
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
pas_size = get_pas_size(srqc);
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
rmpc_srqc_reformat(srqc, rmpc, true);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
@ -175,11 +178,10 @@ static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
}
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
u32 *rmp_out;
void *rmpc;
void *srqc;
int err;
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
@ -190,9 +192,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (err)
goto out;
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
rmpc_srqc_reformat(srqc, rmpc, false);
get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(rmp_out);
@ -206,19 +209,16 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in,
int srq_inlen)
struct mlx5_srq_attr *in)
{
void *create_in;
void *srqc;
void *xrc_srqc;
void *pas;
int pas_size;
int inlen;
int err;
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
pas_size = get_pas_size(srqc);
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
@ -227,7 +227,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
set_srqc(xrc_srqc, in);
MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
memcpy(pas, in->pas, pas_size);
err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
@ -247,9 +248,10 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
u32 *xrcsrq_out;
void *xrc_srqc;
int err;
xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
@ -260,6 +262,12 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
if (err)
goto out;
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
xrc_srq_context_entry);
get_srqc(xrc_srqc, out);
if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(xrcsrq_out);
return err;
@ -272,18 +280,33 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
}
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen)
struct mlx5_srq_attr *in)
{
struct mlx5_create_srq_mbox_out out;
u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
void *create_in;
void *srqc;
void *pas;
int pas_size;
int inlen;
int err;
memset(&out, 0, sizeof(out));
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), sizeof(out));
set_srqc(srqc, in);
memcpy(pas, in->pas, pas_size);
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ);
err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out));
kvfree(create_in);
if (!err)
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
return err;
}
@ -291,57 +314,66 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
struct mlx5_destroy_srq_mbox_in in;
struct mlx5_destroy_srq_mbox_out out;
u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
struct mlx5_query_srq_mbox_in in;
u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
u32 *srq_out;
void *srqc;
int outlen = MLX5_ST_SZ_BYTES(query_srq_out);
int err;
memset(&in, 0, sizeof(in));
srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
if (!srq_out)
return -ENOMEM;
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, outlen);
if (err)
goto out;
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)out, sizeof(*out));
srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
get_srqc(srqc, out);
if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(srq_out);
return err;
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
struct mlx5_arm_srq_mbox_in in;
struct mlx5_arm_srq_mbox_out out;
/* arm_srq structs missing using identical xrc ones */
u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
in.hdr.opmod = cpu_to_be16(!!is_srq);
in.srqn = cpu_to_be32(srq->srqn);
in.lwm = cpu_to_be16(lwm);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
}
static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc)
struct mlx5_srq_attr *in)
{
if (!dev->issi)
return create_srq_cmd(dev, srq, in, inlen);
return create_srq_cmd(dev, srq, in);
else if (srq->common.res == MLX5_RES_XSRQ)
return create_xrc_srq_cmd(dev, srq, in, inlen);
return create_xrc_srq_cmd(dev, srq, in);
else
return create_rmp_cmd(dev, srq, in, inlen);
return create_rmp_cmd(dev, srq, in);
}
static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
@ -355,15 +387,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *sr
}
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc)
struct mlx5_srq_attr *in)
{
int err;
struct mlx5_srq_table *table = &dev->priv.srq_table;
srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
if (in->type == IB_SRQT_XRC)
srq->common.res = MLX5_RES_XSRQ;
else
srq->common.res = MLX5_RES_SRQ;
err = create_srq_split(dev, srq, in, inlen, is_xrc);
err = create_srq_split(dev, srq, in);
if (err)
return err;
@ -418,7 +452,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
struct mlx5_srq_attr *out)
{
if (!dev->issi)
return query_srq_cmd(dev, srq, out);

View File

@ -32,17 +32,14 @@
int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@ -52,28 +49,24 @@ int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
u32 out[MLX5_ST_SZ_DW(create_rq_out)];
u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqn = MLX5_GET(create_rq_out, out, rqn);
@ -82,48 +75,43 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
u32 out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_rq_in)];
u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
memset(in, 0, sizeof(in));
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
u32 out[MLX5_ST_SZ_DW(create_sq_out)];
u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*sqn = MLX5_GET(create_sq_out, out, sqn);
@ -132,49 +120,44 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_sq_in)];
u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
memset(in, 0, sizeof(in));
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)];
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
@ -183,27 +166,24 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn)
{
u32 out[MLX5_ST_SZ_DW(create_tis_out)];
u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
@ -223,26 +203,23 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn)
{
u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
int err;
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
@ -251,37 +228,33 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmp
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
memset(in, 0, sizeof(in));
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
@ -315,13 +288,12 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *xsrqn)
{
u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
int err;
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
@ -330,33 +302,27 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *xs
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
void *srqc;
int outlen = MLX5_ST_SZ_BYTES(query_xrc_srq_out);
u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
void *xrc_srqc;
void *srqc;
int err;
memset(in, 0, sizeof(in));
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out,
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
if (!err) {
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
xrc_srq_context_entry);
@ -369,11 +335,8 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
{
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
@ -381,21 +344,19 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
MLX5_SET(arm_xrc_srq_in, in, op_mod,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
@ -405,24 +366,21 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}

View File

@ -33,16 +33,13 @@
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)];
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)];
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@ -54,17 +51,13 @@ EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0};
u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
MLX5_SET(dealloc_uar_in, in, uar, uarn);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);

View File

@ -37,9 +37,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
int err;
u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
@ -48,7 +46,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
if (vport)
MLX5_SET(query_vport_state_in, in, other_vport, 1);
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
if (err)
mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
@ -78,12 +76,10 @@ EXPORT_SYMBOL(mlx5_query_vport_admin_state);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state)
{
u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
int err;
memset(in, 0, sizeof(in));
MLX5_SET(modify_vport_state_in, in, opcode,
MLX5_CMD_OP_MODIFY_VPORT_STATE);
MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
@ -94,8 +90,7 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
MLX5_SET(modify_vport_state_in, in, admin_state, state);
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
sizeof(out));
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
@ -106,9 +101,7 @@ EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
@ -117,7 +110,7 @@ static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
@ -138,22 +131,18 @@ static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
int client_id, u16 *counter_set_id)
{
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
int err;
if (mdev->num_q_counter_allocated[client_id] >
mlx5_vport_max_q_counter_allocator(mdev, client_id))
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(alloc_q_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_Q_COUNTER);
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
@ -167,23 +156,19 @@ int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
int client_id, u16 counter_set_id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
int err;
if (mdev->num_q_counter_allocated[client_id] <= 0)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
counter_set_id);
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev->num_q_counter_allocated[client_id]--;
@ -196,27 +181,22 @@ int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
void *out,
int out_size)
{
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
MLX5_SET(query_q_counter_in, in, clear, reset);
MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, out_size);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
}
int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
u16 counter_set_id,
u32 *out_of_rx_buffer)
{
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
int err;
memset(out, 0, sizeof(out));
err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
sizeof(out));
@ -388,13 +368,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
MLX5_SET(modify_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
}
static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
@ -674,7 +653,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int *list_size)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
void *nic_vport_ctx;
int max_list_size;
int req_list_size;
@ -698,7 +677,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@ -711,7 +689,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@ -738,7 +716,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
u8 addr_list[][ETH_ALEN],
int list_size)
{
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
void *nic_vport_ctx;
int max_list_size;
int in_sz;
@ -756,7 +734,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
memset(out, 0, sizeof(out));
in = kzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@ -781,7 +758,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
ether_addr_copy(curr_mac, addr_list[i]);
}
err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@ -792,7 +769,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int *size)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
void *nic_vport_ctx;
int req_list_size;
int max_list_size;
@ -812,7 +789,6 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
@ -826,7 +802,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
if (vport)
MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto out;
@ -852,7 +828,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size)
{
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
void *nic_vport_ctx;
int max_list_size;
int in_sz;
@ -868,7 +844,6 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
list_size * MLX5_ST_SZ_BYTES(vlan_layout);
memset(out, 0, sizeof(out));
in = kzalloc(in_sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@ -893,7 +868,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
}
err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
kfree(in);
return err;
}
@ -1007,13 +982,11 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
u8 port_num, u8 vport_num, u32 *out,
int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
int is_group_manager;
is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
memset(in, 0, sizeof(in));
MLX5_SET(query_hca_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
@ -1031,7 +1004,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
if (MLX5_CAP_GEN(mdev, num_ports) == 2)
MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
@ -1159,10 +1132,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
if (err)
goto out;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto out;
tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
gid->global.subnet_prefix = tmp->global.subnet_prefix;
gid->global.interface_id = tmp->global.interface_id;
@ -1226,10 +1195,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
if (err)
goto out;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto out;
pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
for (i = 0; i < nout; i++, pkey++,
pkarr += MLX5_ST_SZ_BYTES(pkey))
@ -1268,11 +1233,9 @@ static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
u16 vport, void *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
int err;
memset(out, 0, sizeof(out));
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
@ -1280,8 +1243,7 @@ static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
err = mlx5_cmd_exec_check_status(mdev, in, inlen,
out, sizeof(out));
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
@ -1582,14 +1544,9 @@ int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
MLX5_SET(query_vport_counter_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
if (err)
goto ex;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto ex;
ex:
kvfree(in);
ex:
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);

View File

@ -2086,7 +2086,7 @@ mlx5e_open_rqt(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
void *rqtc;
int inlen;
int err;
@ -2118,8 +2118,7 @@ mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (!err)
priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
@ -2131,16 +2130,13 @@ mlx5e_open_rqt(struct mlx5e_priv *priv)
static void
mlx5e_close_rqt(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
memset(in, 0, sizeof(in));
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
sizeof(out));
mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
}
static void
@ -2975,32 +2971,36 @@ mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
static int
mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
struct mlx5_core_mr *mr)
struct mlx5_core_mr *mkey)
{
struct ifnet *ifp = priv->ifp;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_create_mkey_mbox_in *in;
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
u32 *in;
int err;
in = mlx5_vzalloc(sizeof(*in));
in = mlx5_vzalloc(inlen);
if (in == NULL) {
if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
return (-ENOMEM);
}
in->seg.flags = MLX5_PERM_LOCAL_WRITE |
MLX5_PERM_LOCAL_READ |
MLX5_ACCESS_MODE_PA;
in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
NULL);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, pd, pdn);
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
if (err)
if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
__func__, err);
kvfree(in);
return (err);
}

View File

@ -953,8 +953,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq,
(struct mlx5_create_cq_mbox_in *)cqb, inlen);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
goto err_cqb;
@ -1299,8 +1298,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq,
(struct mlx5_modify_cq_mbox_in *)in, inlen);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
goto ex_alloc;

View File

@ -191,10 +191,10 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey,
(struct mlx5_create_mkey_mbox_in *)in,
inlen, reg_mr_callback, mr,
(struct mlx5_create_mkey_mbox_out *)mr->out);
err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
in, inlen,
mr->out, sizeof(mr->out),
reg_mr_callback, mr);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
@ -504,9 +504,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET64(mkc, mkc, start_addr, 0);
err = mlx5_core_create_mkey(mdev, &mr->mmkey,
(struct mlx5_create_mkey_mbox_in *)in,
inlen, NULL, NULL, NULL);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@ -922,9 +920,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(virt_addr, length, 1 << page_shift));
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey,
(struct mlx5_create_mkey_mbox_in *)in,
inlen, NULL, NULL, NULL);
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@ -1429,9 +1425,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
MLX5_SET(mkc, mkc, umr_en, 1);
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey,
(struct mlx5_create_mkey_mbox_in *)in,
inlen, NULL, NULL, NULL);
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
goto err_destroy_psv;
@ -1511,9 +1505,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey,
(struct mlx5_create_mkey_mbox_in *)in,
inlen, NULL, NULL, NULL);
err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
if (err)
goto free;

View File

@ -1727,7 +1727,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, pd);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, (struct mlx5_create_qp_mbox_in *)in, inlen);
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
if (err) {
@ -1889,19 +1889,10 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
struct mlx5_modify_qp_mbox_in *in;
mlx5_ib_qp_disable_pagefaults(qp);
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (in != NULL) {
err = mlx5_core_qp_modify(dev->mdev,
MLX5_CMD_OP_2RST_QP,
in, 0, &base->mqp);
kfree(in);
} else {
err = -ENOMEM;
}
err = mlx5_core_qp_modify(dev->mdev,
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
} else {
struct mlx5_modify_raw_qp_param raw_qp_param = {
.operation = MLX5_CMD_OP_2RST_QP
@ -2580,7 +2571,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_pd *pd;
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
@ -2590,11 +2580,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
int err;
u16 op;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
context = &in->ctx;
err = to_mlx5_st(ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
@ -2758,7 +2747,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
op = optab[mlx5_cur][mlx5_new];
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
in->optparam = cpu_to_be32(optpar);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
@ -2770,7 +2758,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
} else {
err = mlx5_core_qp_modify(dev->mdev, op, in, 0, &base->mqp);
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
&base->mqp);
}
if (err)
@ -2812,7 +2801,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
out:
kfree(in);
kfree(context);
return err;
}
@ -4403,8 +4392,7 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!outb)
return -ENOMEM;
err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp,
(struct mlx5_query_qp_mbox_out *)outb,
err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
outlen);
if (err)
goto out;

View File

@ -68,9 +68,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
}
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in,
struct ib_udata *udata, int buf_size, int *inlen,
int type)
struct mlx5_srq_attr *in,
struct ib_udata *udata, int buf_size)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
@ -97,7 +96,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata->inlen - sizeof(ucmd)))
return -EINVAL;
if (type == IB_SRQT_XRC) {
if (in->type == IB_SRQT_XRC) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
@ -123,14 +122,13 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem;
}
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
*in = mlx5_vzalloc(*inlen);
if (!(*in)) {
in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
if (!in->pas) {
err = -ENOMEM;
goto err_umem;
}
mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &srq->db);
@ -139,20 +137,15 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_in;
}
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->page_offset = offset;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
type == IB_SRQT_XRC) {
void *xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
}
in->type == IB_SRQT_XRC)
in->user_index = uidx;
return 0;
err_in:
kvfree(*in);
kvfree(in->pas);
err_umem:
ib_umem_release(srq->umem);
@ -161,8 +154,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
}
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size,
int *inlen, int type)
struct mlx5_srq_attr *in, int buf_size)
{
int err;
int i;
@ -196,13 +188,12 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
buf_size, page_shift, srq->buf.npages, npages);
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
*in = mlx5_vzalloc(*inlen);
if (!*in) {
in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
if (!in->pas) {
err = -ENOMEM;
goto err_buf;
}
mlx5_fill_page_array(&srq->buf, (*in)->pas);
mlx5_fill_page_array(&srq->buf, in->pas);
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
@ -213,19 +204,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
srq->wq_sig = !!srq_signature;
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
type == IB_SRQT_XRC) {
void *xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
}
in->type == IB_SRQT_XRC)
in->user_index = MLX5_IB_DEFAULT_UIDX;
return 0;
err_in:
kvfree(*in);
kvfree(in->pas);
err_buf:
mlx5_buf_free(dev->mdev, &srq->buf);
@ -258,10 +245,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
int desc_size;
int buf_size;
int err;
struct mlx5_create_srq_mbox_in *uninitialized_var(in);
int uninitialized_var(inlen);
int is_xrc;
u32 flgs, xrcdn;
struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
/* Sanity check SRQ size before proceeding */
@ -294,9 +278,9 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
srq->msrq.max_avail_gather);
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen, init_attr->srq_type);
err = create_srq_user(pd, srq, &in, udata, buf_size);
else
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen, init_attr->srq_type);
err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@ -304,24 +288,23 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0;
if (is_xrc) {
xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
in.type = init_attr->srq_type;
in.log_size = ilog2(srq->msrq.max);
in.wqe_shift = srq->msrq.wqe_shift - 4;
if (srq->wq_sig)
in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
if (init_attr->srq_type == IB_SRQT_XRC) {
in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
} else if (init_attr->srq_type == IB_SRQT_BASIC) {
xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
}
in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
kvfree(in);
in.pd = to_mpd(pd)->pdn;
in.db_record = srq->db.dma;
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
kvfree(in.pas);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
@ -389,7 +372,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
struct mlx5_query_srq_mbox_out *out;
struct mlx5_srq_attr *out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
@ -399,7 +382,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (ret)
goto out_box;
srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
srq_attr->srq_limit = out->lwm;
srq_attr->max_wr = srq->msrq.max - 1;
srq_attr->max_sge = srq->msrq.max_gs;

View File

@ -2187,12 +2187,9 @@ struct mlx5_ifc_srqc_bits {
u8 reserved_9[0x40];
u8 db_record_addr_h[0x20];
u8 dbr_addr[0x40];
u8 db_record_addr_l[0x1e];
u8 reserved_10[0x2];
u8 reserved_11[0x80];
u8 reserved_10[0x80];
};
enum {
@ -3996,7 +3993,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 syndrome[0x20];
u8 reserved_1[0x20];
u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
};
@ -4288,9 +4285,9 @@ struct mlx5_ifc_query_pages_out_bits {
};
enum {
MLX5_BOOT_PAGES = 0x1,
MLX5_INIT_PAGES = 0x2,
MLX5_POST_INIT_PAGES = 0x3,
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
};
struct mlx5_ifc_query_pages_in_bits {

View File

@ -170,13 +170,6 @@ enum {
MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
};
enum {
MLX5_QP_DRAIN_SIGERR = 1 << 26,
MLX5_QP_LAT_SENSITIVE = 1 << 28,
MLX5_QP_BLOCK_MCAST = 1 << 30,
MLX5_QP_ENABLE_SIG = 1U << 31,
};
enum {
MLX5_RCV_DBR = 0,
MLX5_SND_DBR = 1,
@ -535,17 +528,6 @@ struct mlx5_qp_context {
u8 rsvd1[24];
};
struct mlx5_create_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_qpn;
u8 rsvd0[4];
__be32 opt_param_mask;
u8 rsvd1[4];
struct mlx5_qp_context ctx;
u8 rsvd3[16];
__be64 pas[0];
};
struct mlx5_dct_context {
u8 state;
u8 rsvd0[7];
@ -570,125 +552,6 @@ struct mlx5_dct_context {
u8 rsvd[12];
};
struct mlx5_create_dct_mbox_in {
struct mlx5_inbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_dct_context context;
u8 rsvd[48];
};
struct mlx5_create_dct_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 dctn;
u8 rsvd0[4];
};
struct mlx5_destroy_dct_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 dctn;
u8 rsvd0[4];
};
struct mlx5_destroy_dct_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_drain_dct_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 dctn;
u8 rsvd0[4];
};
struct mlx5_drain_dct_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_create_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 qpn;
u8 rsvd0[4];
};
struct mlx5_destroy_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd0[4];
};
struct mlx5_destroy_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_modify_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd1[4];
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
u8 rsvd2[16];
};
struct mlx5_modify_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_query_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd[4];
};
struct mlx5_query_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd1[8];
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
u8 rsvd2[16];
__be64 pas[0];
};
struct mlx5_query_dct_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 dctn;
u8 rsvd[4];
};
struct mlx5_query_dct_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
struct mlx5_dct_context ctx;
u8 rsvd1[48];
};
struct mlx5_arm_dct_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 dctn;
u8 rsvd[4];
};
struct mlx5_arm_dct_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd0[8];
};
struct mlx5_conf_sqp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
u8 rsvd[3];
u8 type;
};
struct mlx5_conf_sqp_mbox_out {
struct mlx5_outbox_hdr hdr;
u8 rsvd[8];
};
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@ -701,24 +564,24 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
u32 *in,
int inlen);
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
struct mlx5_query_qp_mbox_out *out, int outlen);
u32 *out, int outlen);
int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
struct mlx5_query_dct_mbox_out *out);
u32 *out, int outlen);
int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct,
struct mlx5_create_dct_mbox_in *in);
u32 *in);
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct);
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,

View File

@ -30,6 +30,31 @@
#include <dev/mlx5/driver.h>
enum {
MLX5_SRQ_FLAG_ERR = (1 << 0),
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
};
struct mlx5_srq_attr {
u32 type;
u32 flags;
u32 log_size;
u32 wqe_shift;
u32 log_page_size;
u32 wqe_cnt;
u32 srqn;
u32 xrcd;
u32 page_offset;
u32 cqn;
u32 pd;
u32 lwm;
u32 user_index;
u64 db_record;
u64 *pas;
};
struct mlx5_core_dev;
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);