Update the mlx4 core and mlx4en(4) modules towards Linux v4.9.
Background: The coming ibcore update forces an update of mlx4ib(4) which in turn requires an updated mlx4 core module. This also affects the mlx4en(4) module because commonly used APIs are updated. This commit is a middle step updating the mlx4 modules towards the new ibcore. This change contains no major new features. Changes in mlx4: a) Improved error handling when mlx4 PCI devices are detached inside VMs. b) Major update of codebase towards Linux 4.9. Changes in mlx4ib(4): a) Minimal changes needed in order to compile using the updated mlx4 core APIs. Changes in mlx4en(4): a) Update flow steering code in mlx4en to use new APIs for registering MAC addresses and IP addresses. b) Update all statistics counters to be 64-bit. c) Minimal changes needed in order to compile using the updated mlx4 core APIs. Sponsored by: Mellanox Technologies MFC after: 1 week
This commit is contained in:
parent
ec7f8d58b9
commit
c3191c2e2b
@ -4569,6 +4569,8 @@ dev/mlx4/mlx4_core/mlx4_eq.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
dev/mlx4/mlx4_core/mlx4_fw.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
dev/mlx4/mlx4_core/mlx4_fw_qos.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
dev/mlx4/mlx4_core/mlx4_icm.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
dev/mlx4/mlx4_core/mlx4_intf.c optional mlx4 pci \
|
||||
@ -4595,8 +4597,6 @@ dev/mlx4/mlx4_core/mlx4_srq.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
dev/mlx4/mlx4_core/mlx4_resource_tracker.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
dev/mlx4/mlx4_core/mlx4_sys_tune.c optional mlx4 pci \
|
||||
compile-with "${OFED_C}"
|
||||
|
||||
dev/mlx4/mlx4_en/mlx4_en_cq.c optional mlx4en pci inet inet6 \
|
||||
compile-with "${OFED_C}"
|
||||
|
@ -36,6 +36,8 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct mlx4_counter;
|
||||
|
||||
enum {
|
||||
/* initialization and general commands */
|
||||
MLX4_CMD_SYS_EN = 0x1,
|
||||
@ -67,8 +69,13 @@ enum {
|
||||
MLX4_CMD_MAP_ICM_AUX = 0xffc,
|
||||
MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
|
||||
MLX4_CMD_SET_ICM_SIZE = 0xffd,
|
||||
MLX4_CMD_ACCESS_REG = 0x3b,
|
||||
MLX4_CMD_ALLOCATE_VPP = 0x80,
|
||||
MLX4_CMD_SET_VPORT_QOS = 0x81,
|
||||
|
||||
/*master notify fw on finish for slave's flr*/
|
||||
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
|
||||
MLX4_CMD_VIRT_PORT_MAP = 0x5c,
|
||||
MLX4_CMD_GET_OP_REQ = 0x59,
|
||||
|
||||
/* TPT commands */
|
||||
@ -116,6 +123,7 @@ enum {
|
||||
/* special QP and management commands */
|
||||
MLX4_CMD_CONF_SPECIAL_QP = 0x23,
|
||||
MLX4_CMD_MAD_IFC = 0x24,
|
||||
MLX4_CMD_MAD_DEMUX = 0x203,
|
||||
|
||||
/* multicast commands */
|
||||
MLX4_CMD_READ_MCG = 0x25,
|
||||
@ -125,6 +133,7 @@ enum {
|
||||
/* miscellaneous commands */
|
||||
MLX4_CMD_DIAG_RPRT = 0x30,
|
||||
MLX4_CMD_NOP = 0x31,
|
||||
MLX4_CMD_CONFIG_DEV = 0x3a,
|
||||
MLX4_CMD_ACCESS_MEM = 0x2e,
|
||||
MLX4_CMD_SET_VEP = 0x52,
|
||||
|
||||
@ -158,6 +167,9 @@ enum {
|
||||
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
|
||||
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
|
||||
MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
|
||||
|
||||
/* Update and read QCN parameters */
|
||||
MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -166,21 +178,42 @@ enum {
|
||||
MLX4_CMD_TIME_CLASS_C = 60000,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* virtual to physical port mapping opcode modifiers */
|
||||
MLX4_GET_PORT_VIRT2PHY = 0x0,
|
||||
MLX4_SET_PORT_VIRT2PHY = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_MAILBOX_SIZE = 4096,
|
||||
MLX4_ACCESS_MEM_ALIGN = 256,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* set port opcode modifiers */
|
||||
MLX4_SET_PORT_GENERAL = 0x0,
|
||||
MLX4_SET_PORT_RQP_CALC = 0x1,
|
||||
MLX4_SET_PORT_MAC_TABLE = 0x2,
|
||||
MLX4_SET_PORT_VLAN_TABLE = 0x3,
|
||||
MLX4_SET_PORT_PRIO_MAP = 0x4,
|
||||
MLX4_SET_PORT_GID_TABLE = 0x5,
|
||||
MLX4_SET_PORT_PRIO2TC = 0x8,
|
||||
MLX4_SET_PORT_SCHEDULER = 0x9
|
||||
/* Set port opcode modifiers */
|
||||
MLX4_SET_PORT_IB_OPCODE = 0x0,
|
||||
MLX4_SET_PORT_ETH_OPCODE = 0x1,
|
||||
MLX4_SET_PORT_BEACON_OPCODE = 0x4,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* Set port Ethernet input modifiers */
|
||||
MLX4_SET_PORT_GENERAL = 0x0,
|
||||
MLX4_SET_PORT_RQP_CALC = 0x1,
|
||||
MLX4_SET_PORT_MAC_TABLE = 0x2,
|
||||
MLX4_SET_PORT_VLAN_TABLE = 0x3,
|
||||
MLX4_SET_PORT_PRIO_MAP = 0x4,
|
||||
MLX4_SET_PORT_GID_TABLE = 0x5,
|
||||
MLX4_SET_PORT_PRIO2TC = 0x8,
|
||||
MLX4_SET_PORT_SCHEDULER = 0x9,
|
||||
MLX4_SET_PORT_VXLAN = 0xB,
|
||||
MLX4_SET_PORT_ROCE_ADDR = 0xD
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_CMD_MAD_DEMUX_CONFIG = 0,
|
||||
MLX4_CMD_MAD_DEMUX_QUERY_STATE = 1,
|
||||
MLX4_CMD_MAD_DEMUX_QUERY_RESTR = 2, /* Query mad demux restrictions */
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -188,6 +221,43 @@ enum {
|
||||
MLX4_CMD_NATIVE
|
||||
};
|
||||
|
||||
/*
|
||||
* MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
|
||||
* Receive checksum value is reported in CQE also for non TCP/UDP packets.
|
||||
*
|
||||
* MLX4_RX_CSUM_MODE_L4 -
|
||||
* L4_CSUM bit in CQE, which indicates whether or not L4 checksum
|
||||
* was validated correctly, is supported.
|
||||
*
|
||||
* MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
|
||||
* IP_OK CQE's field is supported also for non TCP/UDP IP packets.
|
||||
*
|
||||
* MLX4_RX_CSUM_MODE_MULTI_VLAN -
|
||||
* Receive Checksum offload is supported for packets with more than 2 vlan headers.
|
||||
*/
|
||||
enum mlx4_rx_csum_mode {
|
||||
MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
|
||||
MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
|
||||
MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
|
||||
MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
|
||||
};
|
||||
|
||||
struct mlx4_config_dev_params {
|
||||
u16 vxlan_udp_dport;
|
||||
u8 rx_csum_flags_port_1;
|
||||
u8 rx_csum_flags_port_2;
|
||||
};
|
||||
|
||||
enum mlx4_en_congestion_control_algorithm {
|
||||
MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
|
||||
};
|
||||
|
||||
enum mlx4_en_congestion_control_opmod {
|
||||
MLX4_CONGESTION_CONTROL_GET_PARAMS,
|
||||
MLX4_CONGESTION_CONTROL_GET_STATISTICS,
|
||||
MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
|
||||
};
|
||||
|
||||
struct mlx4_dev;
|
||||
|
||||
struct mlx4_cmd_mailbox {
|
||||
@ -233,26 +303,28 @@ static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_para
|
||||
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
|
||||
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
|
||||
|
||||
int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
|
||||
struct mlx4_counter *counter_stats, int reset);
|
||||
u32 mlx4_comm_get_version(void);
|
||||
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac);
|
||||
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
|
||||
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
|
||||
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
|
||||
u8 qos, __be16 proto);
|
||||
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
|
||||
int max_tx_rate);
|
||||
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
|
||||
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
|
||||
int mlx4_get_vf_link_state(struct mlx4_dev *dev, int port, int vf);
|
||||
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
|
||||
struct mlx4_config_dev_params *params);
|
||||
void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
|
||||
void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
|
||||
/*
|
||||
* mlx4_get_slave_default_vlan -
|
||||
* retrun true if VST ( default vlan)
|
||||
* if VST will fill vlan & qos (if not NULL)
|
||||
* return true if VST ( default vlan)
|
||||
* if VST, will return vlan & qos (if not NULL)
|
||||
*/
|
||||
bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, u16 *vlan, u8 *qos);
|
||||
|
||||
enum {
|
||||
IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */
|
||||
IFLA_VF_LINK_STATE_ENABLE, /* link always up */
|
||||
IFLA_VF_LINK_STATE_DISABLE, /* link always down */
|
||||
__IFLA_VF_LINK_STATE_MAX,
|
||||
};
|
||||
bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
|
||||
u16 *vlan, u8 *qos);
|
||||
|
||||
#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
|
||||
#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
|
||||
|
||||
#endif /* MLX4_CMD_H */
|
||||
|
@ -42,31 +42,22 @@ struct mlx4_cqe {
|
||||
__be32 vlan_my_qpn;
|
||||
__be32 immed_rss_invalid;
|
||||
__be32 g_mlpath_rqpn;
|
||||
__be16 sl_vid;
|
||||
union {
|
||||
struct {
|
||||
union {
|
||||
struct {
|
||||
__be16 sl_vid;
|
||||
__be16 rlid;
|
||||
};
|
||||
__be32 timestamp_16_47;
|
||||
};
|
||||
__be16 rlid;
|
||||
__be16 status;
|
||||
u8 ipv6_ext_mask;
|
||||
u8 badfcs_enc;
|
||||
};
|
||||
struct {
|
||||
__be16 reserved1;
|
||||
u8 smac[6];
|
||||
};
|
||||
u8 smac[ETH_ALEN];
|
||||
};
|
||||
__be32 byte_cnt;
|
||||
__be16 wqe_index;
|
||||
__be16 checksum;
|
||||
u8 reserved2[1];
|
||||
__be16 timestamp_0_15;
|
||||
u8 reserved[3];
|
||||
u8 owner_sr_opcode;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct mlx4_err_cqe {
|
||||
__be32 my_qpn;
|
||||
@ -95,7 +86,13 @@ struct mlx4_ts_cqe {
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
|
||||
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
|
||||
MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
|
||||
MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
|
||||
MLX4_CQE_L2_TUNNEL = 1 << 27,
|
||||
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
|
||||
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
|
||||
|
||||
MLX4_CQE_QPN_MASK = 0xffffff,
|
||||
MLX4_CQE_VID_MASK = 0xfff,
|
||||
};
|
||||
@ -177,5 +174,5 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
u16 count, u16 period);
|
||||
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
int entries, struct mlx4_mtt *mtt);
|
||||
int mlx4_cq_ignore_overrun(struct mlx4_dev *dev, struct mlx4_cq *cq);
|
||||
|
||||
#endif /* MLX4_CQ_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,8 +38,6 @@
|
||||
struct mlx4_dev;
|
||||
|
||||
#define MLX4_MAC_MASK 0xffffffffffffULL
|
||||
#define MLX4_BE_SHORT_MASK cpu_to_be16(0xffff)
|
||||
#define MLX4_BE_WORD_MASK cpu_to_be32(0xffffffff)
|
||||
|
||||
enum mlx4_dev_event {
|
||||
MLX4_DEV_EVENT_CATASTROPHIC_ERROR,
|
||||
@ -51,76 +49,46 @@ enum mlx4_dev_event {
|
||||
MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_INTFF_BONDING = 1 << 0
|
||||
};
|
||||
|
||||
struct mlx4_interface {
|
||||
void * (*add) (struct mlx4_dev *dev);
|
||||
void (*remove)(struct mlx4_dev *dev, void *context);
|
||||
void (*event) (struct mlx4_dev *dev, void *context,
|
||||
enum mlx4_dev_event event, unsigned long param);
|
||||
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
|
||||
void (*activate)(struct mlx4_dev *dev, void *context);
|
||||
struct list_head list;
|
||||
enum mlx4_protocol protocol;
|
||||
int flags;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_MAX_DEVICES = 32,
|
||||
MLX4_DEVS_TBL_SIZE = MLX4_MAX_DEVICES + 1,
|
||||
MLX4_DBDF2VAL_STR_SIZE = 512,
|
||||
MLX4_STR_NAME_SIZE = 64,
|
||||
MLX4_MAX_BDF_VALS = 2,
|
||||
MLX4_ENDOF_TBL = -1LL
|
||||
};
|
||||
|
||||
struct mlx4_dbdf2val {
|
||||
u64 dbdf;
|
||||
int val[MLX4_MAX_BDF_VALS];
|
||||
};
|
||||
|
||||
struct mlx4_range {
|
||||
int min;
|
||||
int max;
|
||||
};
|
||||
|
||||
/*
|
||||
* mlx4_dbdf2val_lst struct holds all the data needed to convert
|
||||
* dbdf-to-value-list string into dbdf-to-value table.
|
||||
* dbdf-to-value-list string is a comma separated list of dbdf-to-value strings.
|
||||
* the format of dbdf-to-value string is: "[mmmm:]bb:dd.f-v1[;v2]"
|
||||
* mmmm - Domain number (optional)
|
||||
* bb - Bus number
|
||||
* dd - device number
|
||||
* f - Function number
|
||||
* v1 - First value related to the domain-bus-device-function.
|
||||
* v2 - Second value related to the domain-bus-device-function (optional).
|
||||
* bb, dd - Two hexadecimal digits without preceding 0x.
|
||||
* mmmm - Four hexadecimal digits without preceding 0x.
|
||||
* f - One hexadecimal without preceding 0x.
|
||||
* v1,v2 - Number with normal convention (e.g 100, 0xd3).
|
||||
* dbdf-to-value-list string format:
|
||||
* "[mmmm:]bb:dd.f-v1[;v2],[mmmm:]bb:dd.f-v1[;v2],..."
|
||||
*
|
||||
*/
|
||||
struct mlx4_dbdf2val_lst {
|
||||
char name[MLX4_STR_NAME_SIZE]; /* String name */
|
||||
char str[MLX4_DBDF2VAL_STR_SIZE]; /* dbdf2val list str */
|
||||
struct mlx4_dbdf2val tbl[MLX4_DEVS_TBL_SIZE];/* dbdf to value table */
|
||||
int num_vals; /* # of vals per dbdf */
|
||||
int def_val[MLX4_MAX_BDF_VALS]; /* Default values */
|
||||
struct mlx4_range range; /* Valid values range */
|
||||
};
|
||||
|
||||
int mlx4_fill_dbdf2val_tbl(struct mlx4_dbdf2val_lst *dbdf2val_lst);
|
||||
int mlx4_get_val(struct mlx4_dbdf2val *tbl, struct pci_dev *pdev, int idx,
|
||||
int *val);
|
||||
|
||||
int mlx4_register_interface(struct mlx4_interface *intf);
|
||||
void mlx4_unregister_interface(struct mlx4_interface *intf);
|
||||
|
||||
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto,
|
||||
int port);
|
||||
int mlx4_bond(struct mlx4_dev *dev);
|
||||
int mlx4_unbond(struct mlx4_dev *dev);
|
||||
static inline int mlx4_is_bonded(struct mlx4_dev *dev)
|
||||
{
|
||||
return !!(dev->flags & MLX4_FLAG_BONDED);
|
||||
}
|
||||
|
||||
static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
|
||||
{
|
||||
return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
|
||||
}
|
||||
|
||||
struct mlx4_port_map {
|
||||
u8 port1;
|
||||
u8 port2;
|
||||
};
|
||||
|
||||
int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
|
||||
|
||||
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
|
||||
|
||||
#ifndef ETH_ALEN
|
||||
#define ETH_ALEN 6
|
||||
#endif
|
||||
static inline u64 mlx4_mac_to_u64(const u8 *addr)
|
||||
{
|
||||
u64 mac = 0;
|
||||
|
@ -43,6 +43,28 @@ struct mlx4_mod_stat_cfg {
|
||||
u8 log_pg_sz_m;
|
||||
};
|
||||
|
||||
struct mlx4_port_cap {
|
||||
u8 link_state;
|
||||
u8 supported_port_types;
|
||||
u8 suggested_type;
|
||||
u8 default_sense;
|
||||
u8 log_max_macs;
|
||||
u8 log_max_vlans;
|
||||
int ib_mtu;
|
||||
int max_port_width;
|
||||
int max_vl;
|
||||
int max_tc_eth;
|
||||
int max_gids;
|
||||
int max_pkeys;
|
||||
u64 def_mac;
|
||||
u16 eth_mtu;
|
||||
int trans_type;
|
||||
int vendor_oui;
|
||||
u16 wavelength;
|
||||
u64 trans_code;
|
||||
u8 dmfs_optimized_state;
|
||||
};
|
||||
|
||||
struct mlx4_dev_cap {
|
||||
int max_srq_sz;
|
||||
int max_qp_sz;
|
||||
@ -58,26 +80,13 @@ struct mlx4_dev_cap {
|
||||
int max_eqs;
|
||||
int num_sys_eqs;
|
||||
int reserved_mtts;
|
||||
int max_mrw_sz;
|
||||
int reserved_mrws;
|
||||
int max_mtt_seg;
|
||||
int max_requester_per_qp;
|
||||
int max_responder_per_qp;
|
||||
int max_rdma_global;
|
||||
int local_ca_ack_delay;
|
||||
int num_ports;
|
||||
u32 max_msg_sz;
|
||||
int ib_mtu[MLX4_MAX_PORTS + 1];
|
||||
int max_port_width[MLX4_MAX_PORTS + 1];
|
||||
int max_vl[MLX4_MAX_PORTS + 1];
|
||||
int max_gids[MLX4_MAX_PORTS + 1];
|
||||
int max_pkeys[MLX4_MAX_PORTS + 1];
|
||||
u64 def_mac[MLX4_MAX_PORTS + 1];
|
||||
u16 eth_mtu[MLX4_MAX_PORTS + 1];
|
||||
int trans_type[MLX4_MAX_PORTS + 1];
|
||||
int vendor_oui[MLX4_MAX_PORTS + 1];
|
||||
u16 wavelength[MLX4_MAX_PORTS + 1];
|
||||
u64 trans_code[MLX4_MAX_PORTS + 1];
|
||||
u16 stat_rate_support;
|
||||
int fs_log_max_ucast_qp_range_size;
|
||||
int fs_max_num_qp_per_entry;
|
||||
@ -115,15 +124,11 @@ struct mlx4_dev_cap {
|
||||
u64 max_icm_sz;
|
||||
int max_gso_sz;
|
||||
int max_rss_tbl_sz;
|
||||
u8 supported_port_types[MLX4_MAX_PORTS + 1];
|
||||
u8 suggested_type[MLX4_MAX_PORTS + 1];
|
||||
u8 default_sense[MLX4_MAX_PORTS + 1];
|
||||
u8 log_max_macs[MLX4_MAX_PORTS + 1];
|
||||
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
|
||||
u32 max_basic_counters;
|
||||
u32 sync_qp;
|
||||
u8 timestamp_support;
|
||||
u32 max_extended_counters;
|
||||
u32 max_counters;
|
||||
u32 dmfs_high_rate_qpn_base;
|
||||
u32 dmfs_high_rate_qpn_range;
|
||||
struct mlx4_rate_limit_caps rl_caps;
|
||||
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_func_cap {
|
||||
@ -138,14 +143,17 @@ struct mlx4_func_cap {
|
||||
int max_eq;
|
||||
int reserved_eq;
|
||||
int mcg_quota;
|
||||
u32 qp0_qkey;
|
||||
u32 qp0_tunnel_qpn;
|
||||
u32 qp0_proxy_qpn;
|
||||
u32 qp1_tunnel_qpn;
|
||||
u32 qp1_proxy_qpn;
|
||||
u32 reserved_lkey;
|
||||
u8 physical_port;
|
||||
u8 port_flags;
|
||||
u8 def_counter_index;
|
||||
u8 extra_flags;
|
||||
u8 flags0;
|
||||
u8 flags1;
|
||||
u64 phys_port_id;
|
||||
u32 extra_flags;
|
||||
};
|
||||
|
||||
struct mlx4_func {
|
||||
@ -159,9 +167,7 @@ struct mlx4_func {
|
||||
};
|
||||
|
||||
struct mlx4_adapter {
|
||||
u16 vsd_vendor_id;
|
||||
char board_id[MLX4_BOARD_ID_LEN];
|
||||
char vsd[MLX4_VSD_LEN];
|
||||
u8 inta_pin;
|
||||
};
|
||||
|
||||
@ -180,7 +186,7 @@ struct mlx4_init_hca_param {
|
||||
u64 global_caps;
|
||||
u16 log_mc_entry_sz;
|
||||
u16 log_mc_hash_sz;
|
||||
u16 hca_core_clock;
|
||||
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
|
||||
u8 log_num_qps;
|
||||
u8 log_num_srqs;
|
||||
u8 log_num_cqs;
|
||||
@ -190,11 +196,15 @@ struct mlx4_init_hca_param {
|
||||
u8 log_mc_table_sz;
|
||||
u8 log_mpt_sz;
|
||||
u8 log_uar_sz;
|
||||
u8 mw_enabled; /* Enable memory windows */
|
||||
u8 uar_page_sz; /* log pg sz in 4k chunks */
|
||||
u8 mw_enable; /* Enable memory windows */
|
||||
u8 fs_hash_enable_bits;
|
||||
u8 steering_mode; /* for QUERY_HCA */
|
||||
u8 dmfs_high_steer_mode; /* for QUERY_HCA */
|
||||
u64 dev_cap_enabled;
|
||||
u16 cqe_size; /* For use only when CQE stride feature enabled */
|
||||
u16 eqe_size; /* For use only when EQE stride feature enabled */
|
||||
u8 rss_ip_frags;
|
||||
u8 phv_check_en; /* for QUERY_HCA */
|
||||
};
|
||||
|
||||
struct mlx4_init_ib_param {
|
||||
@ -218,14 +228,17 @@ struct mlx4_set_ib_param {
|
||||
u32 cap_mask;
|
||||
};
|
||||
|
||||
void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
|
||||
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
|
||||
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
|
||||
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
|
||||
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
|
||||
struct mlx4_func_cap *func_cap);
|
||||
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave);
|
||||
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
||||
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
|
||||
int mlx4_RUN_FW(struct mlx4_dev *dev);
|
||||
@ -236,9 +249,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
|
||||
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
|
||||
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
|
||||
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
|
||||
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
|
||||
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
|
||||
int mlx4_NOP(struct mlx4_dev *dev);
|
||||
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
|
||||
int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave);
|
||||
void mlx4_opreq_action(struct work_struct *work);
|
||||
|
||||
#endif /* MLX4_FW_H */
|
||||
|
145
sys/dev/mlx4/mlx4_core/fw_qos.h
Normal file
145
sys/dev/mlx4/mlx4_core/fw_qos.h
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef MLX4_FW_QOS_H
|
||||
#define MLX4_FW_QOS_H
|
||||
|
||||
#include <dev/mlx4/cmd.h>
|
||||
#include <dev/mlx4/device.h>
|
||||
|
||||
#define MLX4_NUM_UP 8
|
||||
#define MLX4_NUM_TC 8
|
||||
|
||||
/* Default supported priorities for VPP allocation */
|
||||
#define MLX4_DEFAULT_QOS_PRIO (0)
|
||||
|
||||
/* Derived from FW feature definition, 0 is the default vport fo all QPs */
|
||||
#define MLX4_VPP_DEFAULT_VPORT (0)
|
||||
|
||||
struct mlx4_vport_qos_param {
|
||||
u32 bw_share;
|
||||
u32 max_avg_bw;
|
||||
u8 enable;
|
||||
};
|
||||
|
||||
/**
|
||||
* mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic
|
||||
* classes of a given port and device.
|
||||
*
|
||||
* @dev: mlx4_dev.
|
||||
* @port: Physical port number.
|
||||
* @prio2tc: Array of TC associated with each priorities.
|
||||
*
|
||||
* Returns 0 on success or a negative mlx4_core errno code.
|
||||
**/
|
||||
int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
|
||||
|
||||
/**
|
||||
* mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between
|
||||
* traffic classes (ETS) and configured rate limit for traffic classes.
|
||||
* tc_tx_bw, pg and ratelimit are arrays where each index represents a TC.
|
||||
* The description for those parameters below refers to a single TC.
|
||||
*
|
||||
* @dev: mlx4_dev.
|
||||
* @port: Physical port number.
|
||||
* @tc_tx_bw: The percentage of the bandwidth allocated for traffic class
|
||||
* within a TC group. The sum of the bw_percentage of all the traffic
|
||||
* classes within a TC group must equal 100% for correct operation.
|
||||
* @pg: The TC group the traffic class is associated with.
|
||||
* @ratelimit: The maximal bandwidth allowed for the use by this traffic class.
|
||||
*
|
||||
* Returns 0 on success or a negative mlx4_core errno code.
|
||||
**/
|
||||
int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
|
||||
u8 *pg, u16 *ratelimit);
|
||||
/**
|
||||
* mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation.
|
||||
* Before distribution of VPPs to priorities, only availible_vpp is returned.
|
||||
* After initialization it returns the distribution of VPPs among priorities.
|
||||
*
|
||||
* @dev: mlx4_dev.
|
||||
* @port: Physical port number.
|
||||
* @availible_vpp: Pointer to variable where number of availible VPPs is stored
|
||||
* @vpp_p_up: Distribution of VPPs to priorities is stored in this array
|
||||
*
|
||||
* Returns 0 on success or a negative mlx4_core errno code.
|
||||
**/
|
||||
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
|
||||
u16 *availible_vpp, u8 *vpp_p_up);
|
||||
/**
|
||||
* mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
|
||||
* The total number of VPPs assigned to all for a port must not exceed
|
||||
* the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get.
|
||||
* VPP allocation is allowed only after the port type has been set,
|
||||
* and while no QPs are open for this port.
|
||||
*
|
||||
* @dev: mlx4_dev.
|
||||
* @port: Physical port number.
|
||||
* @vpp_p_up: Allocation of VPPs to different priorities.
|
||||
*
|
||||
* Returns 0 on success or a negative mlx4_core errno code.
|
||||
**/
|
||||
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
|
||||
|
||||
/**
|
||||
* mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
|
||||
* Each priority allowed for the Vport is assigned with a share of the BW,
|
||||
* and a BW limitation. This commands query the current QoS values.
|
||||
*
|
||||
* @dev: mlx4_dev.
|
||||
* @port: Physical port number.
|
||||
* @vport: Vport id.
|
||||
* @out_param: Array of mlx4_vport_qos_param that will contain the values.
|
||||
*
|
||||
* Returns 0 on success or a negative mlx4_core errno code.
|
||||
**/
|
||||
int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
|
||||
struct mlx4_vport_qos_param *out_param);
|
||||
|
||||
/**
|
||||
* mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
|
||||
* QoS parameters can be modified at any time, but must be initialized
|
||||
* before any QP is associated with the VPort.
|
||||
*
|
||||
* @dev: mlx4_dev.
|
||||
* @port: Physical port number.
|
||||
* @vport: Vport id.
|
||||
* @out_param: Array of mlx4_vport_qos_param which holds the requested values.
|
||||
*
|
||||
* Returns 0 on success or a negative mlx4_core errno code.
|
||||
**/
|
||||
int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
|
||||
struct mlx4_vport_qos_param *in_param);
|
||||
|
||||
#endif /* MLX4_FW_QOS_H */
|
@ -72,14 +72,15 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
gfp_t gfp_mask, int coherent);
|
||||
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
|
||||
|
||||
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
|
||||
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
|
||||
gfp_t gfp);
|
||||
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
|
||||
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 start, u32 end);
|
||||
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 start, u32 end);
|
||||
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u64 virt, int obj_size, u64 nobj, int reserved,
|
||||
u64 virt, int obj_size, u32 nobj, int reserved,
|
||||
int use_lowmem, int use_coherent);
|
||||
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
|
||||
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
|
||||
|
@ -44,18 +44,17 @@
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <dev/mlx4/device.h>
|
||||
#include <dev/mlx4/driver.h>
|
||||
#include <dev/mlx4/doorbell.h>
|
||||
#include <dev/mlx4/cmd.h>
|
||||
#include <dev/mlx4/mlx4_core/fw_qos.h>
|
||||
|
||||
#define DRV_NAME "mlx4_core"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DRV_VERSION "2.1.6"
|
||||
|
||||
#define DRV_STACK_NAME "Linux-MLNX_OFED"
|
||||
#define DRV_STACK_VERSION "2.1"
|
||||
#define DRV_NAME_FOR_FW DRV_STACK_NAME","DRV_STACK_VERSION
|
||||
#define DRV_VERSION "3.4.1"
|
||||
#define DRV_RELDATE "October 2017"
|
||||
|
||||
#define MLX4_FS_UDP_UC_EN (1 << 1)
|
||||
#define MLX4_FS_TCP_UC_EN (1 << 2)
|
||||
@ -63,20 +62,11 @@
|
||||
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
|
||||
#define MLX4_FS_NUM_MCG (1 << 17)
|
||||
|
||||
struct mlx4_set_port_prio2tc_context {
|
||||
u8 prio2tc[4];
|
||||
};
|
||||
#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
|
||||
|
||||
struct mlx4_port_scheduler_tc_cfg_be {
|
||||
__be16 pg;
|
||||
__be16 bw_precentage;
|
||||
__be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
|
||||
__be16 max_bw_value;
|
||||
};
|
||||
#define MLX4_QUERY_IF_STAT_RESET BIT(31)
|
||||
|
||||
struct mlx4_set_port_scheduler_context {
|
||||
struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
|
||||
};
|
||||
#define ETH_P_8021AD 0x88A8
|
||||
|
||||
enum {
|
||||
MLX4_HCR_BASE = 0x80680,
|
||||
@ -84,14 +74,17 @@ enum {
|
||||
MLX4_CLR_INT_SIZE = 0x00008,
|
||||
MLX4_SLAVE_COMM_BASE = 0x0,
|
||||
MLX4_COMM_PAGESIZE = 0x1000,
|
||||
MLX4_CLOCK_SIZE = 0x00008
|
||||
MLX4_CLOCK_SIZE = 0x00008,
|
||||
MLX4_COMM_CHAN_CAPS = 0x8,
|
||||
MLX4_COMM_CHAN_FLAGS = 0xc
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
|
||||
MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
|
||||
MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
|
||||
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE)/16 - 2),
|
||||
MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
|
||||
MLX4_MTT_ENTRY_PER_SEG = 8,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -118,6 +111,10 @@ enum mlx4_mpt_state {
|
||||
};
|
||||
|
||||
#define MLX4_COMM_TIME 10000
|
||||
#define MLX4_COMM_OFFLINE_TIME_OUT 30000
|
||||
#define MLX4_COMM_CMD_NA_OP 0x0
|
||||
|
||||
|
||||
enum {
|
||||
MLX4_COMM_CMD_RESET,
|
||||
MLX4_COMM_CMD_VHCR0,
|
||||
@ -128,6 +125,11 @@ enum {
|
||||
MLX4_COMM_CMD_FLR = 254
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_VF_SMI_DISABLED,
|
||||
MLX4_VF_SMI_ENABLED
|
||||
};
|
||||
|
||||
/*The flag indicates that the slave should delay the RESET cmd*/
|
||||
#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
|
||||
/*indicates how many retries will be done if we are in the middle of FLR*/
|
||||
@ -191,7 +193,7 @@ struct mlx4_vhcr_cmd {
|
||||
u8 status;
|
||||
u8 flags;
|
||||
__be16 opcode;
|
||||
} __packed;
|
||||
};
|
||||
|
||||
struct mlx4_cmd_info {
|
||||
u16 opcode;
|
||||
@ -199,7 +201,6 @@ struct mlx4_cmd_info {
|
||||
bool has_outbox;
|
||||
bool out_is_imm;
|
||||
bool encode_slave_id;
|
||||
bool skip_err_print;
|
||||
int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox);
|
||||
int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
|
||||
@ -208,35 +209,33 @@ struct mlx4_cmd_info {
|
||||
struct mlx4_cmd_info *cmd);
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_DEBUG_MASK_CMD_TIME = 0x100,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX4_DEBUG
|
||||
extern int mlx4_debug_level;
|
||||
#else /* CONFIG_MLX4_DEBUG */
|
||||
#define mlx4_debug_level (0)
|
||||
#endif /* CONFIG_MLX4_DEBUG */
|
||||
|
||||
#define mlx4_dbg(mdev, format, arg...) \
|
||||
#define mlx4_dbg(mdev, format, ...) \
|
||||
do { \
|
||||
if (mlx4_debug_level) \
|
||||
dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
|
||||
dev_printk(KERN_DEBUG, \
|
||||
&(mdev)->persist->pdev->dev, format, \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define mlx4_err(mdev, format, arg...) \
|
||||
dev_err(&mdev->pdev->dev, format, ##arg)
|
||||
#define mlx4_info(mdev, format, arg...) \
|
||||
dev_info(&mdev->pdev->dev, format, ##arg)
|
||||
#define mlx4_warn(mdev, format, arg...) \
|
||||
dev_warn(&mdev->pdev->dev, format, ##arg)
|
||||
#define mlx4_err(mdev, format, ...) \
|
||||
dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
|
||||
#define mlx4_info(mdev, format, ...) \
|
||||
dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
|
||||
#define mlx4_warn(mdev, format, ...) \
|
||||
dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
|
||||
|
||||
extern int mlx4_log_num_mgm_entry_size;
|
||||
extern int log_mtts_per_seg;
|
||||
extern int mlx4_blck_lb;
|
||||
extern int mlx4_set_4k_mtu;
|
||||
extern int mlx4_internal_err_reset;
|
||||
|
||||
#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
|
||||
#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
|
||||
MLX4_MFUNC_MAX))
|
||||
#define ALL_SLAVES 0xff
|
||||
|
||||
struct mlx4_bitmap {
|
||||
@ -246,6 +245,7 @@ struct mlx4_bitmap {
|
||||
u32 reserved_top;
|
||||
u32 mask;
|
||||
u32 avail;
|
||||
u32 effective_len;
|
||||
spinlock_t lock;
|
||||
unsigned long *table;
|
||||
};
|
||||
@ -277,6 +277,8 @@ struct mlx4_icm_table {
|
||||
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
|
||||
#define MLX4_MPT_FLAG_REGION (1 << 8)
|
||||
|
||||
#define MLX4_MPT_PD_MASK (0x1FFFFUL)
|
||||
#define MLX4_MPT_PD_VF_MASK (0xFE0000UL)
|
||||
#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
|
||||
#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
|
||||
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
|
||||
@ -286,6 +288,15 @@ struct mlx4_icm_table {
|
||||
#define MLX4_MPT_STATUS_SW 0xF0
|
||||
#define MLX4_MPT_STATUS_HW 0x00
|
||||
|
||||
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
|
||||
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
|
||||
|
||||
#define MLX4_EQ_ASYNC 0
|
||||
#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
|
||||
!!((int)(vector) >= MLX4_EQ_ASYNC))
|
||||
#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
|
||||
!!((int)(vector) >= MLX4_EQ_ASYNC))
|
||||
|
||||
/*
|
||||
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
|
||||
*/
|
||||
@ -381,6 +392,10 @@ struct mlx4_eq {
|
||||
int nent;
|
||||
struct mlx4_buf_list *page_list;
|
||||
struct mlx4_mtt mtt;
|
||||
u32 ncqs;
|
||||
struct mlx4_active_ports actv_ports;
|
||||
u32 ref_count;
|
||||
int affinity_cpu_id;
|
||||
};
|
||||
|
||||
struct mlx4_slave_eqe {
|
||||
@ -401,7 +416,7 @@ struct mlx4_profile {
|
||||
int num_cq;
|
||||
int num_mcg;
|
||||
int num_mpt;
|
||||
unsigned num_mtt_segs;
|
||||
unsigned num_mtt;
|
||||
};
|
||||
|
||||
struct mlx4_fw {
|
||||
@ -460,6 +475,7 @@ struct mlx4_slave_state {
|
||||
u8 init_port_mask;
|
||||
bool active;
|
||||
bool old_vlan_api;
|
||||
bool vst_qinq_supported;
|
||||
u8 function;
|
||||
dma_addr_t vhcr_dma;
|
||||
u16 mtu[MLX4_MAX_PORTS + 1];
|
||||
@ -481,18 +497,20 @@ struct mlx4_slave_state {
|
||||
#define MLX4_VGT 4095
|
||||
#define NO_INDX (-1)
|
||||
|
||||
|
||||
struct mlx4_vport_state {
|
||||
u64 mac;
|
||||
u16 default_vlan;
|
||||
u8 default_qos;
|
||||
__be16 vlan_proto;
|
||||
u32 tx_rate;
|
||||
bool spoofchk;
|
||||
u32 link_state;
|
||||
u8 qos_vport;
|
||||
__be64 guid;
|
||||
};
|
||||
|
||||
struct mlx4_vf_admin_state {
|
||||
struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
|
||||
u8 enable_smi[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_vport_oper_state {
|
||||
@ -500,8 +518,10 @@ struct mlx4_vport_oper_state {
|
||||
int mac_idx;
|
||||
int vlan_idx;
|
||||
};
|
||||
|
||||
struct mlx4_vf_oper_state {
|
||||
struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
|
||||
u8 smi_enabled[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct slave_list {
|
||||
@ -510,7 +530,7 @@ struct slave_list {
|
||||
};
|
||||
|
||||
struct resource_allocator {
|
||||
spinlock_t alloc_lock;
|
||||
spinlock_t alloc_lock; /* protect quotas */
|
||||
union {
|
||||
int res_reserved;
|
||||
int res_port_rsvd[MLX4_MAX_PORTS];
|
||||
@ -542,6 +562,11 @@ struct mlx4_slave_event_eq {
|
||||
struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
|
||||
};
|
||||
|
||||
struct mlx4_qos_manager {
|
||||
int num_of_qos_vfs;
|
||||
DECLARE_BITMAP(priority_bm, MLX4_NUM_UP);
|
||||
};
|
||||
|
||||
struct mlx4_master_qp0_state {
|
||||
int proxy_qp0_active;
|
||||
int qp0_active;
|
||||
@ -555,11 +580,12 @@ struct mlx4_mfunc_master_ctx {
|
||||
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
|
||||
int init_port_ref[MLX4_MAX_PORTS + 1];
|
||||
u16 max_mtu[MLX4_MAX_PORTS + 1];
|
||||
u8 pptx;
|
||||
u8 pprx;
|
||||
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
|
||||
struct mlx4_resource_tracker res_tracker;
|
||||
struct workqueue_struct *comm_wq;
|
||||
struct work_struct comm_work;
|
||||
struct work_struct arm_comm_work;
|
||||
struct work_struct slave_event_work;
|
||||
struct work_struct slave_flr_event_work;
|
||||
spinlock_t slave_state_lock;
|
||||
@ -567,6 +593,7 @@ struct mlx4_mfunc_master_ctx {
|
||||
struct mlx4_eqe cmd_eqe;
|
||||
struct mlx4_slave_event_eq slave_eq;
|
||||
struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
|
||||
struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
|
||||
};
|
||||
|
||||
struct mlx4_mfunc {
|
||||
@ -591,10 +618,10 @@ struct mlx4_mgm {
|
||||
struct mlx4_cmd {
|
||||
struct pci_pool *pool;
|
||||
void __iomem *hcr;
|
||||
struct mutex hcr_mutex;
|
||||
struct mutex slave_cmd_mutex;
|
||||
struct semaphore poll_sem;
|
||||
struct semaphore event_sem;
|
||||
struct rw_semaphore switch_sem;
|
||||
int max_cmds;
|
||||
spinlock_t context_lock;
|
||||
int free_head;
|
||||
@ -603,11 +630,13 @@ struct mlx4_cmd {
|
||||
u8 use_events;
|
||||
u8 toggle;
|
||||
u8 comm_toggle;
|
||||
u8 initialized;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0,
|
||||
MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1,
|
||||
MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2,
|
||||
};
|
||||
struct mlx4_vf_immed_vlan_work {
|
||||
struct work_struct work;
|
||||
@ -618,8 +647,10 @@ struct mlx4_vf_immed_vlan_work {
|
||||
int orig_vlan_ix;
|
||||
u8 port;
|
||||
u8 qos;
|
||||
u8 qos_vport;
|
||||
u16 vlan_id;
|
||||
u16 orig_vlan_id;
|
||||
__be16 vlan_proto;
|
||||
};
|
||||
|
||||
|
||||
@ -639,7 +670,6 @@ struct mlx4_mr_table {
|
||||
struct mlx4_cq_table {
|
||||
struct mlx4_bitmap bitmap;
|
||||
spinlock_t lock;
|
||||
rwlock_t cq_table_lock;
|
||||
struct radix_tree_root tree;
|
||||
struct mlx4_icm_table table;
|
||||
struct mlx4_icm_table cmpt_table;
|
||||
@ -666,8 +696,17 @@ struct mlx4_srq_table {
|
||||
struct mlx4_icm_table cmpt_table;
|
||||
};
|
||||
|
||||
enum mlx4_qp_table_zones {
|
||||
MLX4_QP_TABLE_ZONE_GENERAL,
|
||||
MLX4_QP_TABLE_ZONE_RSS,
|
||||
MLX4_QP_TABLE_ZONE_RAW_ETH,
|
||||
MLX4_QP_TABLE_ZONE_NUM
|
||||
};
|
||||
|
||||
struct mlx4_qp_table {
|
||||
struct mlx4_bitmap bitmap;
|
||||
struct mlx4_bitmap *bitmap_gen;
|
||||
struct mlx4_zone_allocator *zones;
|
||||
u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM];
|
||||
u32 rdmarc_base;
|
||||
int rdmarc_shift;
|
||||
spinlock_t lock;
|
||||
@ -696,17 +735,30 @@ struct mlx4_catas_err {
|
||||
struct mlx4_mac_table {
|
||||
__be64 entries[MLX4_MAX_MAC_NUM];
|
||||
int refs[MLX4_MAX_MAC_NUM];
|
||||
bool is_dup[MLX4_MAX_MAC_NUM];
|
||||
struct mutex mutex;
|
||||
int total;
|
||||
int max;
|
||||
};
|
||||
|
||||
#define MLX4_ROCE_GID_ENTRY_SIZE 16
|
||||
|
||||
struct mlx4_roce_gid_entry {
|
||||
u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
|
||||
};
|
||||
|
||||
struct mlx4_roce_gid_table {
|
||||
struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#define MLX4_MAX_VLAN_NUM 128
|
||||
#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
|
||||
|
||||
struct mlx4_vlan_table {
|
||||
__be32 entries[MLX4_MAX_VLAN_NUM];
|
||||
int refs[MLX4_MAX_VLAN_NUM];
|
||||
int is_dup[MLX4_MAX_VLAN_NUM];
|
||||
struct mutex mutex;
|
||||
int total;
|
||||
int max;
|
||||
@ -722,11 +774,15 @@ enum {
|
||||
MCAST_DEFAULT = 2
|
||||
};
|
||||
|
||||
|
||||
struct mlx4_set_port_general_context {
|
||||
u8 reserved[3];
|
||||
u16 reserved1;
|
||||
u8 v_ignore_fcs;
|
||||
u8 flags;
|
||||
u16 reserved2;
|
||||
union {
|
||||
u8 ignore_fcs;
|
||||
u8 roce_mode;
|
||||
};
|
||||
u8 reserved2;
|
||||
__be16 mtu;
|
||||
u8 pptx;
|
||||
u8 pfctx;
|
||||
@ -734,6 +790,9 @@ struct mlx4_set_port_general_context {
|
||||
u8 pprx;
|
||||
u8 pfcrx;
|
||||
u16 reserved4;
|
||||
u32 reserved5;
|
||||
u8 phv_en;
|
||||
u8 reserved6[3];
|
||||
};
|
||||
|
||||
struct mlx4_set_port_rqp_calc_context {
|
||||
@ -754,13 +813,6 @@ struct mlx4_set_port_rqp_calc_context {
|
||||
__be32 mcast;
|
||||
};
|
||||
|
||||
struct mlx4_hca_info {
|
||||
struct mlx4_dev *dev;
|
||||
struct device_attribute firmware_attr;
|
||||
struct device_attribute hca_attr;
|
||||
struct device_attribute board_attr;
|
||||
};
|
||||
|
||||
struct mlx4_port_info {
|
||||
struct mlx4_dev *dev;
|
||||
int port;
|
||||
@ -771,6 +823,7 @@ struct mlx4_port_info {
|
||||
struct device_attribute port_mtu_attr;
|
||||
struct mlx4_mac_table mac_table;
|
||||
struct mlx4_vlan_table vlan_table;
|
||||
struct mlx4_roce_gid_table gid_table;
|
||||
int base_qpn;
|
||||
};
|
||||
|
||||
@ -779,10 +832,11 @@ struct mlx4_sense {
|
||||
u8 do_sense_port[MLX4_MAX_PORTS + 1];
|
||||
u8 sense_allowed[MLX4_MAX_PORTS + 1];
|
||||
struct delayed_work sense_poll;
|
||||
int gone;
|
||||
};
|
||||
|
||||
struct mlx4_msix_ctl {
|
||||
u64 pool_bm;
|
||||
DECLARE_BITMAP(pool_bm, MAX_MSIX);
|
||||
struct mutex pool_lock;
|
||||
};
|
||||
|
||||
@ -796,22 +850,6 @@ enum {
|
||||
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
|
||||
};
|
||||
|
||||
struct mlx4_roce_gid_entry {
|
||||
u8 raw[16];
|
||||
};
|
||||
|
||||
struct counter_index {
|
||||
struct list_head list;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct mlx4_counters {
|
||||
struct mlx4_bitmap bitmap;
|
||||
struct list_head global_port_list[MLX4_MAX_PORTS];
|
||||
struct list_head vf_list[MLX4_MAX_NUM_VF][MLX4_MAX_PORTS];
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_NO_RR = 0,
|
||||
MLX4_USE_RR = 1,
|
||||
@ -825,6 +863,7 @@ struct mlx4_priv {
|
||||
spinlock_t ctx_lock;
|
||||
|
||||
int pci_dev_data;
|
||||
int removed;
|
||||
|
||||
struct list_head pgdir_list;
|
||||
struct mutex pgdir_mutex;
|
||||
@ -842,7 +881,8 @@ struct mlx4_priv {
|
||||
struct mlx4_srq_table srq_table;
|
||||
struct mlx4_qp_table qp_table;
|
||||
struct mlx4_mcg_table mcg_table;
|
||||
struct mlx4_counters counters_table;
|
||||
struct mlx4_bitmap counters_bitmap;
|
||||
int def_counter[MLX4_MAX_PORTS];
|
||||
|
||||
struct mlx4_catas_err catas_err;
|
||||
|
||||
@ -851,7 +891,6 @@ struct mlx4_priv {
|
||||
struct mlx4_uar driver_uar;
|
||||
void __iomem *kar;
|
||||
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
|
||||
struct mlx4_hca_info hca_info;
|
||||
struct mlx4_sense sense;
|
||||
struct mutex port_mutex;
|
||||
struct mlx4_msix_ctl msix_ctl;
|
||||
@ -863,8 +902,10 @@ struct mlx4_priv {
|
||||
int reserved_mtts;
|
||||
int fs_hash_mode;
|
||||
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
|
||||
struct mlx4_port_map v2p; /* cached port mapping configuration */
|
||||
struct mutex bond_mutex; /* for bond mode */
|
||||
__be64 slave_node_guids[MLX4_MFUNC_MAX];
|
||||
struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][128];
|
||||
|
||||
atomic_t opreq_count;
|
||||
struct work_struct opreq_task;
|
||||
};
|
||||
@ -913,7 +954,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
|
||||
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
|
||||
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
|
||||
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
|
||||
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
|
||||
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
|
||||
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
|
||||
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
|
||||
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
|
||||
@ -921,7 +962,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
|
||||
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
|
||||
int __mlx4_mpt_reserve(struct mlx4_dev *dev);
|
||||
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
|
||||
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
|
||||
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
|
||||
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
|
||||
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
|
||||
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
|
||||
@ -956,6 +997,11 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
@ -968,20 +1014,17 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
|
||||
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
|
||||
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
int start_index, int npages, u64 *page_list);
|
||||
int __mlx4_counter_alloc(struct mlx4_dev *dev, int slave, int port, u32 *idx);
|
||||
void __mlx4_counter_free(struct mlx4_dev *dev, int slave, int port, u32 idx);
|
||||
|
||||
int __mlx4_slave_counters_free(struct mlx4_dev *dev, int slave);
|
||||
int __mlx4_clear_if_stat(struct mlx4_dev *dev,
|
||||
u8 counter_index);
|
||||
u8 mlx4_get_default_counter_index(struct mlx4_dev *dev, int slave, int port);
|
||||
|
||||
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
|
||||
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
|
||||
int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
|
||||
struct mlx4_counter *data);
|
||||
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
|
||||
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
|
||||
|
||||
void mlx4_start_catas_poll(struct mlx4_dev *dev);
|
||||
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
|
||||
void mlx4_catas_init(void);
|
||||
int mlx4_catas_init(struct mlx4_dev *dev);
|
||||
void mlx4_catas_end(struct mlx4_dev *dev);
|
||||
int mlx4_restart_one(struct pci_dev *pdev);
|
||||
int mlx4_register_device(struct mlx4_dev *dev);
|
||||
void mlx4_unregister_device(struct mlx4_dev *dev);
|
||||
@ -996,7 +1039,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
struct mlx4_dev_cap *dev_cap,
|
||||
struct mlx4_init_hca_param *init_hca);
|
||||
void mlx4_master_comm_channel(struct work_struct *work);
|
||||
void mlx4_master_arm_comm_channel(struct work_struct *work);
|
||||
void mlx4_gen_slave_eqe(struct work_struct *work);
|
||||
void mlx4_master_handle_slave_flr(struct work_struct *work);
|
||||
|
||||
@ -1137,17 +1179,27 @@ int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
|
||||
int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
|
||||
|
||||
enum {
|
||||
MLX4_CMD_CLEANUP_STRUCT = 1UL << 0,
|
||||
MLX4_CMD_CLEANUP_POOL = 1UL << 1,
|
||||
MLX4_CMD_CLEANUP_HCR = 1UL << 2,
|
||||
MLX4_CMD_CLEANUP_VHCR = 1UL << 3,
|
||||
MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1
|
||||
};
|
||||
|
||||
int mlx4_cmd_init(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
|
||||
int mlx4_multi_func_init(struct mlx4_dev *dev);
|
||||
int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev);
|
||||
void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
|
||||
int mlx4_cmd_use_events(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
|
||||
|
||||
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
|
||||
unsigned long timeout);
|
||||
u16 op, unsigned long timeout);
|
||||
|
||||
void mlx4_cq_tasklet_cb(unsigned long data);
|
||||
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
|
||||
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
|
||||
|
||||
@ -1155,7 +1207,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
|
||||
|
||||
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
|
||||
|
||||
void mlx4_handle_catas_err(struct mlx4_dev *dev);
|
||||
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
|
||||
|
||||
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
|
||||
enum mlx4_port_type *type);
|
||||
@ -1172,8 +1224,14 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
|
||||
|
||||
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
|
||||
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
|
||||
void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
|
||||
struct mlx4_roce_gid_table *table);
|
||||
void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
|
||||
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
|
||||
int mlx4_bond_vlan_table(struct mlx4_dev *dev);
|
||||
int mlx4_unbond_vlan_table(struct mlx4_dev *dev);
|
||||
int mlx4_bond_mac_table(struct mlx4_dev *dev);
|
||||
int mlx4_unbond_mac_table(struct mlx4_dev *dev);
|
||||
|
||||
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
|
||||
/* resource tracker functions*/
|
||||
@ -1181,6 +1239,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
|
||||
enum mlx4_resource resource_type,
|
||||
u64 resource_id, int *slave);
|
||||
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
|
||||
void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
|
||||
int mlx4_init_resource_tracker(struct mlx4_dev *dev);
|
||||
|
||||
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
|
||||
@ -1227,6 +1286,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
|
||||
int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
|
||||
int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
@ -1241,7 +1306,6 @@ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
||||
u8 gid[16], u8 port,
|
||||
int block_mcast_loopback,
|
||||
enum mlx4_protocol prot, u64 *reg_id);
|
||||
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
|
||||
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
@ -1279,11 +1343,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_MOD_STAT_CFG_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
|
||||
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
|
||||
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
|
||||
@ -1315,13 +1379,86 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
|
||||
|
||||
#define NOT_MASKED_PD_BITS 17
|
||||
|
||||
void sys_tune_init(void);
|
||||
void sys_tune_fini(void);
|
||||
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
|
||||
|
||||
void mlx4_init_quotas(struct mlx4_dev *dev);
|
||||
|
||||
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave);
|
||||
int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave);
|
||||
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
|
||||
/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
|
||||
void mlx4_replace_zero_macs(struct mlx4_dev *dev);
|
||||
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
|
||||
/* Returns the VF index of slave */
|
||||
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
|
||||
int mlx4_config_mad_demux(struct mlx4_dev *dev);
|
||||
int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
|
||||
int mlx4_bond_fs_rules(struct mlx4_dev *dev);
|
||||
int mlx4_unbond_fs_rules(struct mlx4_dev *dev);
|
||||
|
||||
enum mlx4_zone_flags {
|
||||
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
|
||||
MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1,
|
||||
MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2,
|
||||
MLX4_ZONE_USE_RR = 1UL << 3,
|
||||
};
|
||||
|
||||
enum mlx4_zone_alloc_flags {
|
||||
/* No two objects could overlap between zones. UID
|
||||
* could be left unused. If this flag is given and
|
||||
* two overlapped zones are used, an object will be free'd
|
||||
* from the smallest possible matching zone.
|
||||
*/
|
||||
MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0,
|
||||
};
|
||||
|
||||
struct mlx4_zone_allocator;
|
||||
|
||||
/* Create a new zone allocator */
|
||||
struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags);
|
||||
|
||||
/* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator
|
||||
* <zone_alloc>. Allocating an object from this zone adds an offset <offset>.
|
||||
* Similarly, when searching for an object to free, this offset it taken into
|
||||
* account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap>
|
||||
* is given through the MLX4_ZONE_USE_RR flag in <flags>.
|
||||
* When an allocation fails, <zone_alloc> tries to allocate from other zones
|
||||
* according to the policy set by <flags>. <puid> is the unique identifier
|
||||
* received to this zone.
|
||||
*/
|
||||
int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
|
||||
struct mlx4_bitmap *bitmap,
|
||||
u32 flags,
|
||||
int priority,
|
||||
int offset,
|
||||
u32 *puid);
|
||||
|
||||
/* Remove bitmap indicated by <uid> from <zone_alloc> */
|
||||
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid);
|
||||
|
||||
/* Delete the zone allocator <zone_alloc. This function doesn't destroy
|
||||
* the attached bitmaps.
|
||||
*/
|
||||
void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
|
||||
|
||||
/* Allocate <count> objects with align <align> and skip_mask <skip_mask>
|
||||
* from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually
|
||||
* allocated from is returned in <puid>. If the allocation fails, a negative
|
||||
* number is returned. Otherwise, the offset of the first object is returned.
|
||||
*/
|
||||
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
|
||||
int align, u32 skip_mask, u32 *puid);
|
||||
|
||||
/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
|
||||
* <zones>.
|
||||
*/
|
||||
u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
|
||||
u32 uid, u32 obj, u32 count);
|
||||
|
||||
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
|
||||
* specifying the uid when freeing an object, zone allocator could figure it by
|
||||
* itself. Other parameters are similar to mlx4_zone_free.
|
||||
*/
|
||||
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count);
|
||||
|
||||
/* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */
|
||||
struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid);
|
||||
|
||||
#endif /* MLX4_H */
|
||||
|
@ -116,12 +116,12 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
|
||||
spin_lock(&bitmap->lock);
|
||||
|
||||
obj = find_aligned_range(bitmap->table, bitmap->last,
|
||||
bitmap->max, cnt, align, skip_mask);
|
||||
bitmap->max, cnt, align, skip_mask);
|
||||
if (obj >= bitmap->max) {
|
||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||
& bitmap->mask;
|
||||
obj = find_aligned_range(bitmap->table, 0, bitmap->max,
|
||||
cnt, align, skip_mask);
|
||||
cnt, align, skip_mask);
|
||||
}
|
||||
|
||||
if (obj < bitmap->max) {
|
||||
@ -148,6 +148,11 @@ u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
|
||||
return bitmap->avail;
|
||||
}
|
||||
|
||||
static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
|
||||
{
|
||||
return obj & (bitmap->max + bitmap->reserved_top - 1);
|
||||
}
|
||||
|
||||
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
|
||||
int use_rr)
|
||||
{
|
||||
@ -167,23 +172,17 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
|
||||
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
|
||||
u32 reserved_bot, u32 reserved_top)
|
||||
{
|
||||
/* sanity check */
|
||||
if (num <= (u64)reserved_top + reserved_bot)
|
||||
return -EINVAL;
|
||||
|
||||
/* num must be a power of 2 */
|
||||
if (num != roundup_pow_of_two(num))
|
||||
return -EINVAL;
|
||||
|
||||
if (reserved_bot + reserved_top >= num)
|
||||
return -EINVAL;
|
||||
|
||||
bitmap->last = 0;
|
||||
bitmap->top = 0;
|
||||
bitmap->max = num - reserved_top;
|
||||
bitmap->mask = mask;
|
||||
bitmap->reserved_top = reserved_top;
|
||||
bitmap->avail = num - reserved_top - reserved_bot;
|
||||
bitmap->effective_len = bitmap->avail;
|
||||
spin_lock_init(&bitmap->lock);
|
||||
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
|
||||
sizeof (long), GFP_KERNEL);
|
||||
@ -200,6 +199,382 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
|
||||
kfree(bitmap->table);
|
||||
}
|
||||
|
||||
struct mlx4_zone_allocator {
|
||||
struct list_head entries;
|
||||
struct list_head prios;
|
||||
u32 last_uid;
|
||||
u32 mask;
|
||||
/* protect the zone_allocator from concurrent accesses */
|
||||
spinlock_t lock;
|
||||
enum mlx4_zone_alloc_flags flags;
|
||||
};
|
||||
|
||||
struct mlx4_zone_entry {
|
||||
struct list_head list;
|
||||
struct list_head prio_list;
|
||||
u32 uid;
|
||||
struct mlx4_zone_allocator *allocator;
|
||||
struct mlx4_bitmap *bitmap;
|
||||
int use_rr;
|
||||
int priority;
|
||||
int offset;
|
||||
enum mlx4_zone_flags flags;
|
||||
};
|
||||
|
||||
struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
|
||||
{
|
||||
struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
|
||||
|
||||
if (NULL == zones)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&zones->entries);
|
||||
INIT_LIST_HEAD(&zones->prios);
|
||||
spin_lock_init(&zones->lock);
|
||||
zones->last_uid = 0;
|
||||
zones->mask = 0;
|
||||
zones->flags = flags;
|
||||
|
||||
return zones;
|
||||
}
|
||||
|
||||
int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
|
||||
struct mlx4_bitmap *bitmap,
|
||||
u32 flags,
|
||||
int priority,
|
||||
int offset,
|
||||
u32 *puid)
|
||||
{
|
||||
u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
|
||||
struct mlx4_zone_entry *it;
|
||||
struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
|
||||
|
||||
if (NULL == zone)
|
||||
return -ENOMEM;
|
||||
|
||||
zone->flags = flags;
|
||||
zone->bitmap = bitmap;
|
||||
zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
|
||||
zone->priority = priority;
|
||||
zone->offset = offset;
|
||||
|
||||
spin_lock(&zone_alloc->lock);
|
||||
|
||||
zone->uid = zone_alloc->last_uid++;
|
||||
zone->allocator = zone_alloc;
|
||||
|
||||
if (zone_alloc->mask < mask)
|
||||
zone_alloc->mask = mask;
|
||||
|
||||
list_for_each_entry(it, &zone_alloc->prios, prio_list)
|
||||
if (it->priority >= priority)
|
||||
break;
|
||||
|
||||
if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
|
||||
list_add_tail(&zone->prio_list, &it->prio_list);
|
||||
list_add_tail(&zone->list, &it->list);
|
||||
|
||||
spin_unlock(&zone_alloc->lock);
|
||||
|
||||
*puid = zone->uid;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Should be called under a lock */
|
||||
static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
|
||||
{
|
||||
struct mlx4_zone_allocator *zone_alloc = entry->allocator;
|
||||
|
||||
if (!list_empty(&entry->prio_list)) {
|
||||
/* Check if we need to add an alternative node to the prio list */
|
||||
if (!list_is_last(&entry->list, &zone_alloc->entries)) {
|
||||
struct mlx4_zone_entry *next = list_first_entry(&entry->list,
|
||||
typeof(*next),
|
||||
list);
|
||||
|
||||
if (next->priority == entry->priority)
|
||||
list_add_tail(&next->prio_list, &entry->prio_list);
|
||||
}
|
||||
|
||||
list_del(&entry->prio_list);
|
||||
}
|
||||
|
||||
list_del(&entry->list);
|
||||
|
||||
if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
|
||||
u32 mask = 0;
|
||||
struct mlx4_zone_entry *it;
|
||||
|
||||
list_for_each_entry(it, &zone_alloc->prios, prio_list) {
|
||||
u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
|
||||
|
||||
if (mask < cur_mask)
|
||||
mask = cur_mask;
|
||||
}
|
||||
zone_alloc->mask = mask;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
|
||||
{
|
||||
struct mlx4_zone_entry *zone, *tmp;
|
||||
|
||||
spin_lock(&zone_alloc->lock);
|
||||
|
||||
list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
|
||||
list_del(&zone->list);
|
||||
list_del(&zone->prio_list);
|
||||
kfree(zone);
|
||||
}
|
||||
|
||||
spin_unlock(&zone_alloc->lock);
|
||||
kfree(zone_alloc);
|
||||
}
|
||||
|
||||
/* Should be called under a lock */
|
||||
static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
|
||||
int align, u32 skip_mask, u32 *puid)
|
||||
{
|
||||
u32 uid = 0;
|
||||
u32 res;
|
||||
struct mlx4_zone_allocator *zone_alloc = zone->allocator;
|
||||
struct mlx4_zone_entry *curr_node;
|
||||
|
||||
res = mlx4_bitmap_alloc_range(zone->bitmap, count,
|
||||
align, skip_mask);
|
||||
|
||||
if (res != (u32)-1) {
|
||||
res += zone->offset;
|
||||
uid = zone->uid;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
|
||||
if (unlikely(curr_node->priority == zone->priority))
|
||||
break;
|
||||
}
|
||||
|
||||
if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
|
||||
struct mlx4_zone_entry *it = curr_node;
|
||||
|
||||
list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
|
||||
res = mlx4_bitmap_alloc_range(it->bitmap, count,
|
||||
align, skip_mask);
|
||||
if (res != (u32)-1) {
|
||||
res += it->offset;
|
||||
uid = it->uid;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
|
||||
struct mlx4_zone_entry *it = curr_node;
|
||||
|
||||
list_for_each_entry_from(it, &zone_alloc->entries, list) {
|
||||
if (unlikely(it == zone))
|
||||
continue;
|
||||
|
||||
if (unlikely(it->priority != curr_node->priority))
|
||||
break;
|
||||
|
||||
res = mlx4_bitmap_alloc_range(it->bitmap, count,
|
||||
align, skip_mask);
|
||||
if (res != (u32)-1) {
|
||||
res += it->offset;
|
||||
uid = it->uid;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
|
||||
if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
|
||||
goto out;
|
||||
|
||||
curr_node = list_first_entry(&curr_node->prio_list,
|
||||
typeof(*curr_node),
|
||||
prio_list);
|
||||
|
||||
list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
|
||||
res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
|
||||
align, skip_mask);
|
||||
if (res != (u32)-1) {
|
||||
res += curr_node->offset;
|
||||
uid = curr_node->uid;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (NULL != puid && res != (u32)-1)
|
||||
*puid = uid;
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Should be called under a lock */
|
||||
static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
|
||||
u32 count)
|
||||
{
|
||||
mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
|
||||
}
|
||||
|
||||
/* Should be called under a lock */
|
||||
static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
|
||||
struct mlx4_zone_allocator *zones, u32 uid)
|
||||
{
|
||||
struct mlx4_zone_entry *zone;
|
||||
|
||||
list_for_each_entry(zone, &zones->entries, list) {
|
||||
if (zone->uid == uid)
|
||||
return zone;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
|
||||
{
|
||||
struct mlx4_zone_entry *zone;
|
||||
struct mlx4_bitmap *bitmap;
|
||||
|
||||
spin_lock(&zones->lock);
|
||||
|
||||
zone = __mlx4_find_zone_by_uid(zones, uid);
|
||||
|
||||
bitmap = zone == NULL ? NULL : zone->bitmap;
|
||||
|
||||
spin_unlock(&zones->lock);
|
||||
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
|
||||
{
|
||||
struct mlx4_zone_entry *zone;
|
||||
int res;
|
||||
|
||||
spin_lock(&zones->lock);
|
||||
|
||||
zone = __mlx4_find_zone_by_uid(zones, uid);
|
||||
|
||||
if (NULL == zone) {
|
||||
res = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = __mlx4_zone_remove_one_entry(zone);
|
||||
|
||||
out:
|
||||
spin_unlock(&zones->lock);
|
||||
kfree(zone);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Should be called under a lock */
|
||||
static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
|
||||
struct mlx4_zone_allocator *zones, u32 obj)
|
||||
{
|
||||
struct mlx4_zone_entry *zone, *zone_candidate = NULL;
|
||||
u32 dist = (u32)-1;
|
||||
|
||||
/* Search for the smallest zone that this obj could be
|
||||
* allocated from. This is done in order to handle
|
||||
* situations when small bitmaps are allocated from bigger
|
||||
* bitmaps (and the allocated space is marked as reserved in
|
||||
* the bigger bitmap.
|
||||
*/
|
||||
list_for_each_entry(zone, &zones->entries, list) {
|
||||
if (obj >= zone->offset) {
|
||||
u32 mobj = (obj - zone->offset) & zones->mask;
|
||||
|
||||
if (mobj < zone->bitmap->max) {
|
||||
u32 curr_dist = zone->bitmap->effective_len;
|
||||
|
||||
if (curr_dist < dist) {
|
||||
dist = curr_dist;
|
||||
zone_candidate = zone;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return zone_candidate;
|
||||
}
|
||||
|
||||
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
|
||||
int align, u32 skip_mask, u32 *puid)
|
||||
{
|
||||
struct mlx4_zone_entry *zone;
|
||||
int res = -1;
|
||||
|
||||
spin_lock(&zones->lock);
|
||||
|
||||
zone = __mlx4_find_zone_by_uid(zones, uid);
|
||||
|
||||
if (NULL == zone)
|
||||
goto out;
|
||||
|
||||
res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
|
||||
|
||||
out:
|
||||
spin_unlock(&zones->lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
|
||||
{
|
||||
struct mlx4_zone_entry *zone;
|
||||
int res = 0;
|
||||
|
||||
spin_lock(&zones->lock);
|
||||
|
||||
zone = __mlx4_find_zone_by_uid(zones, uid);
|
||||
|
||||
if (NULL == zone) {
|
||||
res = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
__mlx4_free_from_zone(zone, obj, count);
|
||||
|
||||
out:
|
||||
spin_unlock(&zones->lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
|
||||
{
|
||||
struct mlx4_zone_entry *zone;
|
||||
int res;
|
||||
|
||||
if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
|
||||
return -EFAULT;
|
||||
|
||||
spin_lock(&zones->lock);
|
||||
|
||||
zone = __mlx4_find_zone_by_uid_unique(zones, obj);
|
||||
|
||||
if (NULL == zone) {
|
||||
res = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
__mlx4_free_from_zone(zone, obj, count);
|
||||
res = 0;
|
||||
|
||||
out:
|
||||
spin_unlock(&zones->lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
/*
|
||||
* Handling for queue buffers -- we allocate a bunch of memory and
|
||||
* register it in a memory region at HCA virtual address 0. If the
|
||||
@ -208,7 +583,7 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
|
||||
*/
|
||||
|
||||
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
struct mlx4_buf *buf)
|
||||
struct mlx4_buf *buf, gfp_t gfp)
|
||||
{
|
||||
dma_addr_t t;
|
||||
|
||||
@ -216,8 +591,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
buf->nbufs = 1;
|
||||
buf->npages = 1;
|
||||
buf->page_shift = get_order(size) + PAGE_SHIFT;
|
||||
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
|
||||
size, &t, GFP_KERNEL);
|
||||
buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
|
||||
size, &t, gfp);
|
||||
if (!buf->direct.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -237,14 +612,15 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
buf->npages = buf->nbufs;
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
|
||||
GFP_KERNEL);
|
||||
gfp);
|
||||
if (!buf->page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf =
|
||||
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
&t, GFP_KERNEL);
|
||||
dma_alloc_coherent(&dev->persist->pdev->dev,
|
||||
PAGE_SIZE,
|
||||
&t, gfp);
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
@ -255,7 +631,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
|
||||
|
||||
if (BITS_PER_LONG == 64) {
|
||||
struct page **pages;
|
||||
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
|
||||
pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
|
||||
if (!pages)
|
||||
goto err_free;
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
@ -281,15 +657,17 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
|
||||
int i;
|
||||
|
||||
if (buf->nbufs == 1)
|
||||
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
|
||||
dma_free_coherent(&dev->persist->pdev->dev, size,
|
||||
buf->direct.buf,
|
||||
buf->direct.map);
|
||||
else {
|
||||
if (BITS_PER_LONG == 64 && buf->direct.buf)
|
||||
if (BITS_PER_LONG == 64)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(&dev->persist->pdev->dev,
|
||||
PAGE_SIZE,
|
||||
buf->page_list[i].buf,
|
||||
buf->page_list[i].map);
|
||||
kfree(buf->page_list);
|
||||
@ -297,11 +675,12 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_buf_free);
|
||||
|
||||
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
|
||||
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct mlx4_db_pgdir *pgdir;
|
||||
|
||||
pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
|
||||
pgdir = kzalloc(sizeof *pgdir, gfp);
|
||||
if (!pgdir)
|
||||
return NULL;
|
||||
|
||||
@ -309,7 +688,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
|
||||
pgdir->bits[0] = pgdir->order0;
|
||||
pgdir->bits[1] = pgdir->order1;
|
||||
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
|
||||
&pgdir->db_dma, GFP_KERNEL);
|
||||
&pgdir->db_dma, gfp);
|
||||
if (!pgdir->db_page) {
|
||||
kfree(pgdir);
|
||||
return NULL;
|
||||
@ -349,7 +728,7 @@ static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
|
||||
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_db_pgdir *pgdir;
|
||||
@ -361,7 +740,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
|
||||
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
|
||||
goto out;
|
||||
|
||||
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
|
||||
pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
|
||||
if (!pgdir) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
@ -398,7 +777,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
|
||||
set_bit(i, db->u.pgdir->bits[o]);
|
||||
|
||||
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
|
||||
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
||||
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
|
||||
db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
||||
list_del(&db->u.pgdir->list);
|
||||
kfree(db->u.pgdir);
|
||||
@ -413,13 +792,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_db_alloc(dev, &wqres->db, 1);
|
||||
err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*wqres->db.db = 0;
|
||||
|
||||
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
|
||||
err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_db;
|
||||
|
||||
@ -428,7 +807,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
|
||||
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
|
@ -42,16 +42,177 @@
|
||||
|
||||
#define MLX4_CATAS_POLL_INTERVAL (5 * HZ)
|
||||
|
||||
static DEFINE_SPINLOCK(catas_lock);
|
||||
|
||||
static LIST_HEAD(catas_list);
|
||||
static struct work_struct catas_work;
|
||||
|
||||
static int internal_err_reset = 1;
|
||||
module_param(internal_err_reset, int, 0644);
|
||||
int mlx4_internal_err_reset = 1;
|
||||
module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644);
|
||||
MODULE_PARM_DESC(internal_err_reset,
|
||||
"Reset device on internal errors if non-zero"
|
||||
" (default 1, in SRIOV mode default is 0)");
|
||||
"Reset device on internal errors if non-zero (default 1)");
|
||||
|
||||
static int read_vendor_id(struct mlx4_dev *dev)
|
||||
{
|
||||
u16 vendor_id = 0;
|
||||
int ret;
|
||||
|
||||
ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id);
|
||||
if (ret) {
|
||||
mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (vendor_id == 0xffff) {
|
||||
mlx4_err(dev, "PCI can't be accessed to read vendor id\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_reset_master(struct mlx4_dev *dev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (mlx4_is_master(dev))
|
||||
mlx4_report_internal_err_comm_event(dev);
|
||||
|
||||
if (!pci_channel_offline(dev->persist->pdev)) {
|
||||
err = read_vendor_id(dev);
|
||||
/* If PCI can't be accessed to read vendor ID we assume that its
|
||||
* link was disabled and chip was already reset.
|
||||
*/
|
||||
if (err)
|
||||
return 0;
|
||||
|
||||
err = mlx4_reset(dev);
|
||||
if (err)
|
||||
mlx4_err(dev, "Fail to reset HCA\n");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_reset_slave(struct mlx4_dev *dev)
|
||||
{
|
||||
#define COM_CHAN_RST_REQ_OFFSET 0x10
|
||||
#define COM_CHAN_RST_ACK_OFFSET 0x08
|
||||
|
||||
u32 comm_flags;
|
||||
u32 rst_req;
|
||||
u32 rst_ack;
|
||||
unsigned long end;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
if (pci_channel_offline(dev->persist->pdev))
|
||||
return 0;
|
||||
|
||||
comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
|
||||
MLX4_COMM_CHAN_FLAGS));
|
||||
if (comm_flags == 0xffffffff) {
|
||||
mlx4_err(dev, "VF reset is not needed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) {
|
||||
mlx4_err(dev, "VF reset is not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
|
||||
COM_CHAN_RST_REQ_OFFSET;
|
||||
rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
|
||||
COM_CHAN_RST_ACK_OFFSET;
|
||||
if (rst_req != rst_ack) {
|
||||
mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rst_req ^= 1;
|
||||
mlx4_warn(dev, "VF is sending reset request to Firmware\n");
|
||||
comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
|
||||
__raw_writel((__force u32)cpu_to_be32(comm_flags),
|
||||
(__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
|
||||
/* Make sure that our comm channel write doesn't
|
||||
* get mixed in with writes from another CPU.
|
||||
*/
|
||||
mmiowb();
|
||||
|
||||
end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
|
||||
while (time_before(jiffies, end)) {
|
||||
comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
|
||||
MLX4_COMM_CHAN_FLAGS));
|
||||
rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
|
||||
COM_CHAN_RST_ACK_OFFSET;
|
||||
|
||||
/* Reading rst_req again since the communication channel can
|
||||
* be reset at any time by the PF and all its bits will be
|
||||
* set to zero.
|
||||
*/
|
||||
rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
|
||||
COM_CHAN_RST_REQ_OFFSET;
|
||||
|
||||
if (rst_ack == rst_req) {
|
||||
mlx4_warn(dev, "VF Reset succeed\n");
|
||||
return 0;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
mlx4_err(dev, "Fail to send reset over the communication channel\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int mlx4_comm_internal_err(u32 slave_read)
|
||||
{
|
||||
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
|
||||
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
|
||||
}
|
||||
|
||||
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
|
||||
{
|
||||
int err;
|
||||
struct mlx4_dev *dev;
|
||||
|
||||
if (!mlx4_internal_err_reset)
|
||||
return;
|
||||
|
||||
mutex_lock(&persist->device_state_mutex);
|
||||
if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
|
||||
goto out;
|
||||
|
||||
dev = persist->dev;
|
||||
mlx4_err(dev, "device is going to be reset\n");
|
||||
if (mlx4_is_slave(dev))
|
||||
err = mlx4_reset_slave(dev);
|
||||
else
|
||||
err = mlx4_reset_master(dev);
|
||||
BUG_ON(err != 0);
|
||||
|
||||
dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
|
||||
mlx4_err(dev, "device was reset successfully\n");
|
||||
mutex_unlock(&persist->device_state_mutex);
|
||||
|
||||
/* At that step HW was already reset, now notify clients */
|
||||
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
|
||||
mlx4_cmd_wake_completions(dev);
|
||||
return;
|
||||
|
||||
out:
|
||||
mutex_unlock(&persist->device_state_mutex);
|
||||
}
|
||||
|
||||
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
mlx4_enter_error_state(persist);
|
||||
mutex_lock(&persist->interface_state_mutex);
|
||||
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
|
||||
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
|
||||
err = mlx4_restart_one(persist->pdev);
|
||||
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
|
||||
err);
|
||||
}
|
||||
mutex_unlock(&persist->interface_state_mutex);
|
||||
}
|
||||
|
||||
static void dump_err_buf(struct mlx4_dev *dev)
|
||||
{
|
||||
@ -69,58 +230,40 @@ static void poll_catas(unsigned long dev_ptr)
|
||||
{
|
||||
struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
u32 slave_read;
|
||||
|
||||
if (readl(priv->catas_err.map)) {
|
||||
/* If the device is off-line, we cannot try to recover it */
|
||||
if (pci_channel_offline(dev->pdev))
|
||||
mod_timer(&priv->catas_err.timer,
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
|
||||
else {
|
||||
dump_err_buf(dev);
|
||||
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
|
||||
|
||||
if (internal_err_reset) {
|
||||
spin_lock(&catas_lock);
|
||||
list_add(&priv->catas_err.list, &catas_list);
|
||||
spin_unlock(&catas_lock);
|
||||
|
||||
queue_work(mlx4_wq, &catas_work);
|
||||
}
|
||||
if (mlx4_is_slave(dev)) {
|
||||
slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
|
||||
if (mlx4_comm_internal_err(slave_read)) {
|
||||
mlx4_warn(dev, "Internal error detected on the communication channel\n");
|
||||
goto internal_err;
|
||||
}
|
||||
} else
|
||||
mod_timer(&priv->catas_err.timer,
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
|
||||
} else if (readl(priv->catas_err.map)) {
|
||||
dump_err_buf(dev);
|
||||
goto internal_err;
|
||||
}
|
||||
|
||||
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
mlx4_warn(dev, "Internal error mark was detected on device\n");
|
||||
goto internal_err;
|
||||
}
|
||||
|
||||
mod_timer(&priv->catas_err.timer,
|
||||
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
|
||||
return;
|
||||
|
||||
internal_err:
|
||||
if (mlx4_internal_err_reset)
|
||||
queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
|
||||
}
|
||||
|
||||
static void catas_reset(struct work_struct *work)
|
||||
{
|
||||
struct mlx4_priv *priv, *tmppriv;
|
||||
struct mlx4_dev *dev;
|
||||
struct mlx4_dev_persistent *persist =
|
||||
container_of(work, struct mlx4_dev_persistent,
|
||||
catas_work);
|
||||
|
||||
LIST_HEAD(tlist);
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(&catas_lock);
|
||||
list_splice_init(&catas_list, &tlist);
|
||||
spin_unlock_irq(&catas_lock);
|
||||
|
||||
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
|
||||
struct pci_dev *pdev = priv->dev.pdev;
|
||||
|
||||
/* If the device is off-line, we cannot reset it */
|
||||
if (pci_channel_offline(pdev))
|
||||
continue;
|
||||
|
||||
ret = mlx4_restart_one(priv->dev.pdev);
|
||||
/* 'priv' now is not valid */
|
||||
if (ret)
|
||||
pr_err("mlx4 %s: Reset failed (%d)\n",
|
||||
pci_name(pdev), ret);
|
||||
else {
|
||||
dev = pci_get_drvdata(pdev);
|
||||
mlx4_dbg(dev, "Reset succeeded\n");
|
||||
}
|
||||
}
|
||||
mlx4_handle_error_state(persist);
|
||||
}
|
||||
|
||||
void mlx4_start_catas_poll(struct mlx4_dev *dev)
|
||||
@ -128,22 +271,21 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
phys_addr_t addr;
|
||||
|
||||
/*If we are in SRIOV the default of the module param must be 0*/
|
||||
if (mlx4_is_mfunc(dev))
|
||||
internal_err_reset = 0;
|
||||
|
||||
INIT_LIST_HEAD(&priv->catas_err.list);
|
||||
init_timer(&priv->catas_err.timer);
|
||||
priv->catas_err.map = NULL;
|
||||
|
||||
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
|
||||
priv->fw.catas_offset;
|
||||
if (!mlx4_is_slave(dev)) {
|
||||
addr = pci_resource_start(dev->persist->pdev,
|
||||
priv->fw.catas_bar) +
|
||||
priv->fw.catas_offset;
|
||||
|
||||
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
|
||||
if (!priv->catas_err.map) {
|
||||
mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
|
||||
(unsigned long long) addr);
|
||||
return;
|
||||
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
|
||||
if (!priv->catas_err.map) {
|
||||
mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
|
||||
(unsigned long long)addr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
priv->catas_err.timer.data = (unsigned long) dev;
|
||||
@ -164,12 +306,24 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
|
||||
priv->catas_err.map = NULL;
|
||||
}
|
||||
|
||||
spin_lock_irq(&catas_lock);
|
||||
list_del_init(&priv->catas_err.list);
|
||||
spin_unlock_irq(&catas_lock);
|
||||
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION)
|
||||
flush_workqueue(dev->persist->catas_wq);
|
||||
}
|
||||
|
||||
void __init mlx4_catas_init(void)
|
||||
int mlx4_catas_init(struct mlx4_dev *dev)
|
||||
{
|
||||
INIT_WORK(&catas_work, catas_reset);
|
||||
INIT_WORK(&dev->persist->catas_work, catas_reset);
|
||||
dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health");
|
||||
if (!dev->persist->catas_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_catas_end(struct mlx4_dev *dev)
|
||||
{
|
||||
if (dev->persist->catas_wq) {
|
||||
destroy_workqueue(dev->persist->catas_wq);
|
||||
dev->persist->catas_wq = NULL;
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -53,18 +53,10 @@
|
||||
|
||||
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
||||
{
|
||||
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
||||
struct mlx4_cq *cq;
|
||||
|
||||
read_lock(&cq_table->cq_table_lock);
|
||||
|
||||
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
|
||||
cqn & (dev->caps.num_cqs - 1));
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
|
||||
read_unlock(&cq_table->cq_table_lock);
|
||||
|
||||
if (!cq) {
|
||||
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
|
||||
return;
|
||||
@ -73,9 +65,6 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
|
||||
++cq->arm_sn;
|
||||
|
||||
cq->comp(cq);
|
||||
|
||||
if (atomic_dec_and_test(&cq->refcount))
|
||||
complete(&cq->free);
|
||||
}
|
||||
|
||||
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
|
||||
@ -83,13 +72,13 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
|
||||
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
|
||||
struct mlx4_cq *cq;
|
||||
|
||||
read_lock(&cq_table->cq_table_lock);
|
||||
spin_lock(&cq_table->lock);
|
||||
|
||||
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcount);
|
||||
|
||||
read_unlock(&cq_table->cq_table_lock);
|
||||
spin_unlock(&cq_table->lock);
|
||||
|
||||
if (!cq) {
|
||||
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
|
||||
@ -137,8 +126,6 @@ int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
memset(cq_context, 0, sizeof *cq_context);
|
||||
|
||||
cq_context->cq_max_count = cpu_to_be16(count);
|
||||
cq_context->cq_period = cpu_to_be16(period);
|
||||
|
||||
@ -162,8 +149,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
memset(cq_context, 0, sizeof *cq_context);
|
||||
|
||||
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
|
||||
cq_context->log_page_size = mtt->page_shift - 12;
|
||||
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
@ -177,28 +162,6 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
|
||||
|
||||
int mlx4_cq_ignore_overrun(struct mlx4_dev *dev, struct mlx4_cq *cq)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_cq_context *cq_context;
|
||||
int err;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
memset(cq_context, 0, sizeof *cq_context);
|
||||
|
||||
cq_context->flags |= cpu_to_be32(MLX4_CQ_FLAG_OI);
|
||||
|
||||
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 3);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_cq_ignore_overrun);
|
||||
|
||||
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
@ -209,11 +172,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
|
||||
if (*cqn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_table_get(dev, &cq_table->table, *cqn);
|
||||
err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
|
||||
err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_put;
|
||||
return 0;
|
||||
@ -283,7 +246,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||
u64 mtt_addr;
|
||||
int err;
|
||||
|
||||
if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
|
||||
if (vector >= dev->caps.num_comp_vectors)
|
||||
return -EINVAL;
|
||||
|
||||
cq->vector = vector;
|
||||
@ -305,14 +268,14 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||
}
|
||||
|
||||
cq_context = mailbox->buf;
|
||||
memset(cq_context, 0, sizeof *cq_context);
|
||||
|
||||
cq_context->flags = cpu_to_be32(!!collapsed << 18);
|
||||
if (timestamp_en)
|
||||
cq_context->flags |= cpu_to_be32(1 << 19);
|
||||
|
||||
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
|
||||
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
|
||||
cq_context->logsize_usrpage =
|
||||
cpu_to_be32((ilog2(nent) << 24) |
|
||||
mlx4_to_hw_uar_index(dev, uar->index));
|
||||
cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
|
||||
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
||||
|
||||
mtt_addr = mlx4_mtt_addr(dev, mtt);
|
||||
@ -331,9 +294,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
|
||||
atomic_set(&cq->refcount, 1);
|
||||
init_completion(&cq->free);
|
||||
|
||||
cq->eqn = priv->eq_table.eq[cq->vector].eqn;
|
||||
cq->irq = priv->eq_table.eq[cq->vector].irq;
|
||||
|
||||
cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
|
||||
return 0;
|
||||
|
||||
err_radix:
|
||||
@ -358,7 +319,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
|
||||
if (err)
|
||||
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
|
||||
|
||||
synchronize_irq(priv->eq_table.eq[cq->vector].irq);
|
||||
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
|
||||
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
|
||||
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
|
||||
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, cq->cqn);
|
||||
@ -378,7 +342,6 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
|
||||
int err;
|
||||
|
||||
spin_lock_init(&cq_table->lock);
|
||||
rwlock_init(&cq_table->cq_table_lock);
|
||||
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
|
||||
if (mlx4_is_slave(dev))
|
||||
return 0;
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
289
sys/dev/mlx4/mlx4_core/mlx4_fw_qos.c
Normal file
289
sys/dev/mlx4/mlx4_core/mlx4_fw_qos.c
Normal file
@ -0,0 +1,289 @@
|
||||
/*
|
||||
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include "fw_qos.h"
|
||||
#include "fw.h"
|
||||
|
||||
enum {
|
||||
/* allocate vpp opcode modifiers */
|
||||
MLX4_ALLOCATE_VPP_ALLOCATE = 0x0,
|
||||
MLX4_ALLOCATE_VPP_QUERY = 0x1
|
||||
};
|
||||
|
||||
enum {
|
||||
/* set vport qos opcode modifiers */
|
||||
MLX4_SET_VPORT_QOS_SET = 0x0,
|
||||
MLX4_SET_VPORT_QOS_QUERY = 0x1
|
||||
};
|
||||
|
||||
struct mlx4_set_port_prio2tc_context {
|
||||
u8 prio2tc[4];
|
||||
};
|
||||
|
||||
struct mlx4_port_scheduler_tc_cfg_be {
|
||||
__be16 pg;
|
||||
__be16 bw_precentage;
|
||||
__be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
|
||||
__be16 max_bw_value;
|
||||
};
|
||||
|
||||
struct mlx4_set_port_scheduler_context {
|
||||
struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
|
||||
};
|
||||
|
||||
/* Granular Qos (per VF) section */
|
||||
struct mlx4_alloc_vpp_param {
|
||||
__be32 availible_vpp;
|
||||
__be32 vpp_p_up[MLX4_NUM_UP];
|
||||
};
|
||||
|
||||
struct mlx4_prio_qos_param {
|
||||
__be32 bw_share;
|
||||
__be32 max_avg_bw;
|
||||
__be32 reserved;
|
||||
__be32 enable;
|
||||
__be32 reserved1[4];
|
||||
};
|
||||
|
||||
struct mlx4_set_vport_context {
|
||||
__be32 reserved[8];
|
||||
struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP];
|
||||
};
|
||||
|
||||
int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_set_port_prio2tc_context *context;
|
||||
int err;
|
||||
u32 in_mod;
|
||||
int i;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
context = mailbox->buf;
|
||||
|
||||
for (i = 0; i < MLX4_NUM_UP; i += 2)
|
||||
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
|
||||
|
||||
in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
|
||||
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
|
||||
|
||||
int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
|
||||
u8 *pg, u16 *ratelimit)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_set_port_scheduler_context *context;
|
||||
int err;
|
||||
u32 in_mod;
|
||||
int i;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
context = mailbox->buf;
|
||||
|
||||
for (i = 0; i < MLX4_NUM_TC; i++) {
|
||||
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
|
||||
u16 r;
|
||||
|
||||
if (ratelimit && ratelimit[i]) {
|
||||
if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
|
||||
r = ratelimit[i];
|
||||
tc->max_bw_units =
|
||||
htons(MLX4_RATELIMIT_100M_UNITS);
|
||||
} else {
|
||||
r = ratelimit[i] / 10;
|
||||
tc->max_bw_units =
|
||||
htons(MLX4_RATELIMIT_1G_UNITS);
|
||||
}
|
||||
tc->max_bw_value = htons(r);
|
||||
} else {
|
||||
tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
|
||||
tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
|
||||
}
|
||||
|
||||
tc->pg = htons(pg[i]);
|
||||
tc->bw_precentage = htons(tc_tx_bw[i]);
|
||||
}
|
||||
|
||||
in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
|
||||
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
|
||||
|
||||
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
|
||||
u16 *availible_vpp, u8 *vpp_p_up)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_alloc_vpp_param *out_param;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
out_param = mailbox->buf;
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, port,
|
||||
MLX4_ALLOCATE_VPP_QUERY,
|
||||
MLX4_CMD_ALLOCATE_VPP,
|
||||
MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Total number of supported VPPs */
|
||||
*availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp);
|
||||
|
||||
for (i = 0; i < MLX4_NUM_UP; i++)
|
||||
vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]);
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get);
|
||||
|
||||
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_alloc_vpp_param *in_param;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
in_param = mailbox->buf;
|
||||
|
||||
for (i = 0; i < MLX4_NUM_UP; i++)
|
||||
in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]);
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma, port,
|
||||
MLX4_ALLOCATE_VPP_ALLOCATE,
|
||||
MLX4_CMD_ALLOCATE_VPP,
|
||||
MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set);
|
||||
|
||||
int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
|
||||
struct mlx4_vport_qos_param *out_param)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_set_vport_context *ctx;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
ctx = mailbox->buf;
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port,
|
||||
MLX4_SET_VPORT_QOS_QUERY,
|
||||
MLX4_CMD_SET_VPORT_QOS,
|
||||
MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < MLX4_NUM_UP; i++) {
|
||||
out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share);
|
||||
out_param[i].max_avg_bw =
|
||||
be32_to_cpu(ctx->qos_p_up[i].max_avg_bw);
|
||||
out_param[i].enable =
|
||||
!!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31);
|
||||
}
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get);
|
||||
|
||||
int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
|
||||
struct mlx4_vport_qos_param *in_param)
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_set_vport_context *ctx;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
ctx = mailbox->buf;
|
||||
|
||||
for (i = 0; i < MLX4_NUM_UP; i++) {
|
||||
ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share);
|
||||
ctx->qos_p_up[i].max_avg_bw =
|
||||
cpu_to_be32(in_param[i].max_avg_bw);
|
||||
ctx->qos_p_up[i].enable =
|
||||
cpu_to_be32(in_param[i].enable << 31);
|
||||
}
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port,
|
||||
MLX4_SET_VPORT_QOS_SET,
|
||||
MLX4_CMD_SET_VPORT_QOS,
|
||||
MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set);
|
@ -57,7 +57,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
|
||||
int i;
|
||||
|
||||
if (chunk->nsg > 0)
|
||||
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
|
||||
pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
@ -70,7 +70,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
|
||||
int i;
|
||||
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
|
||||
dma_free_coherent(&dev->persist->pdev->dev,
|
||||
chunk->mem[i].length,
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
}
|
||||
@ -135,10 +136,12 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
/* We use sg_set_buf for coherent allocs, which assumes low memory */
|
||||
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
|
||||
|
||||
icm = kmalloc_node(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
|
||||
icm = kmalloc_node(sizeof(*icm),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
|
||||
dev->numa_node);
|
||||
if (!icm) {
|
||||
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
icm = kmalloc(sizeof(*icm),
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
if (!icm)
|
||||
return NULL;
|
||||
}
|
||||
@ -150,12 +153,14 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
|
||||
while (npages > 0) {
|
||||
if (!chunk) {
|
||||
chunk = kmalloc_node(sizeof *chunk,
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
|
||||
chunk = kmalloc_node(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM |
|
||||
__GFP_NOWARN),
|
||||
dev->numa_node);
|
||||
if (!chunk) {
|
||||
chunk = kmalloc(sizeof *chunk,
|
||||
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
|
||||
chunk = kmalloc(sizeof(*chunk),
|
||||
gfp_mask & ~(__GFP_HIGHMEM |
|
||||
__GFP_NOWARN));
|
||||
if (!chunk)
|
||||
goto fail;
|
||||
}
|
||||
@ -170,7 +175,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
--cur_order;
|
||||
|
||||
if (coherent)
|
||||
ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
|
||||
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
|
||||
&chunk->mem[chunk->npages],
|
||||
cur_order, gfp_mask);
|
||||
else
|
||||
@ -190,7 +195,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
if (coherent)
|
||||
++chunk->nsg;
|
||||
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
|
||||
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
|
||||
chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
|
||||
chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
@ -205,7 +210,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
|
||||
}
|
||||
|
||||
if (!coherent && chunk) {
|
||||
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
|
||||
chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
|
||||
chunk->npages,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
@ -242,7 +247,8 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
}
|
||||
|
||||
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
|
||||
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
|
||||
gfp_t gfp)
|
||||
{
|
||||
u32 i = (obj & (table->num_obj - 1)) /
|
||||
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
@ -256,7 +262,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
|
||||
}
|
||||
|
||||
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
|
||||
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
|
||||
(table->lowmem ? gfp : GFP_HIGHUSER) |
|
||||
__GFP_NOWARN, table->coherent);
|
||||
if (!table->icm[i]) {
|
||||
ret = -ENOMEM;
|
||||
@ -289,14 +295,10 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
|
||||
|
||||
if (--table->icm[i]->refcount == 0) {
|
||||
offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
|
||||
|
||||
if (!mlx4_UNMAP_ICM(dev, table->virt + offset,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE)) {
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
table->icm[i] = NULL;
|
||||
} else {
|
||||
pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n");
|
||||
}
|
||||
mlx4_UNMAP_ICM(dev, table->virt + offset,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
table->icm[i] = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&table->mutex);
|
||||
@ -357,7 +359,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u32 i;
|
||||
|
||||
for (i = start; i <= end; i += inc) {
|
||||
err = mlx4_table_get(dev, table, i);
|
||||
err = mlx4_table_get(dev, table, i, GFP_KERNEL);
|
||||
if (err)
|
||||
goto fail;
|
||||
}
|
||||
@ -383,7 +385,7 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
}
|
||||
|
||||
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u64 virt, int obj_size, u64 nobj, int reserved,
|
||||
u64 virt, int obj_size, u32 nobj, int reserved,
|
||||
int use_lowmem, int use_coherent)
|
||||
{
|
||||
int obj_per_chunk;
|
||||
@ -393,7 +395,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
u64 size;
|
||||
|
||||
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
|
||||
num_icm = div_u64((nobj + obj_per_chunk - 1), obj_per_chunk);
|
||||
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
|
||||
if (!table->icm)
|
||||
@ -436,15 +438,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
err:
|
||||
for (i = 0; i < num_icm; ++i)
|
||||
if (table->icm[i]) {
|
||||
if (!mlx4_UNMAP_ICM(dev,
|
||||
virt + i * MLX4_TABLE_CHUNK_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE)) {
|
||||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
} else {
|
||||
pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
mlx4_free_icm(dev, table->icm[i], use_coherent);
|
||||
}
|
||||
|
||||
kfree(table->icm);
|
||||
|
||||
return -ENOMEM;
|
||||
@ -452,22 +450,14 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
|
||||
|
||||
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
|
||||
{
|
||||
int i, err = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < table->num_icm; ++i)
|
||||
if (table->icm[i]) {
|
||||
err = mlx4_UNMAP_ICM(dev,
|
||||
table->virt + i * MLX4_TABLE_CHUNK_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
if (!err) {
|
||||
mlx4_free_icm(dev, table->icm[i],
|
||||
table->coherent);
|
||||
} else {
|
||||
pr_warn("mlx4_core: mlx4_UNMAP_ICM failed.\n");
|
||||
break;
|
||||
}
|
||||
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
|
||||
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
|
||||
mlx4_free_icm(dev, table->icm[i], table->coherent);
|
||||
}
|
||||
|
||||
if (!err)
|
||||
kfree(table->icm);
|
||||
kfree(table->icm);
|
||||
}
|
||||
|
@ -38,6 +38,7 @@
|
||||
|
||||
struct mlx4_device_context {
|
||||
struct list_head list;
|
||||
struct list_head bond_list;
|
||||
struct mlx4_interface *intf;
|
||||
void *context;
|
||||
};
|
||||
@ -61,6 +62,8 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
|
||||
spin_lock_irq(&priv->ctx_lock);
|
||||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
spin_unlock_irq(&priv->ctx_lock);
|
||||
if (intf->activate)
|
||||
intf->activate(&priv->dev, dev_ctx->context);
|
||||
} else
|
||||
kfree(dev_ctx);
|
||||
}
|
||||
@ -91,8 +94,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_add_tail(&intf->list, &intf_list);
|
||||
list_for_each_entry(priv, &dev_list, dev_list)
|
||||
list_for_each_entry(priv, &dev_list, dev_list) {
|
||||
if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
|
||||
mlx4_dbg(&priv->dev,
|
||||
"SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
|
||||
intf->flags &= ~MLX4_INTFF_BONDING;
|
||||
}
|
||||
mlx4_add_device(intf, priv);
|
||||
}
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
|
||||
@ -115,6 +124,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
|
||||
|
||||
int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
LIST_HEAD(bond_list);
|
||||
|
||||
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = mlx4_disable_rx_port_check(dev, enable);
|
||||
if (ret) {
|
||||
mlx4_err(dev, "Fail to %s rx port check\n",
|
||||
enable ? "enable" : "disable");
|
||||
return ret;
|
||||
}
|
||||
if (enable) {
|
||||
dev->flags |= MLX4_FLAG_BONDED;
|
||||
} else {
|
||||
ret = mlx4_virt2phy_port_map(dev, 1, 2);
|
||||
if (ret) {
|
||||
mlx4_err(dev, "Fail to reset port map\n");
|
||||
return ret;
|
||||
}
|
||||
dev->flags &= ~MLX4_FLAG_BONDED;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->ctx_lock, flags);
|
||||
list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
|
||||
if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
|
||||
list_add_tail(&dev_ctx->bond_list, &bond_list);
|
||||
list_del(&dev_ctx->list);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
|
||||
list_for_each_entry(dev_ctx, &bond_list, bond_list) {
|
||||
dev_ctx->intf->remove(dev, dev_ctx->context);
|
||||
dev_ctx->context = dev_ctx->intf->add(dev);
|
||||
|
||||
spin_lock_irqsave(&priv->ctx_lock, flags);
|
||||
list_add_tail(&dev_ctx->list, &priv->ctx_list);
|
||||
spin_unlock_irqrestore(&priv->ctx_lock, flags);
|
||||
|
||||
mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
|
||||
dev_ctx->intf->protocol, enable ?
|
||||
"enabled" : "disabled");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
|
||||
unsigned long param)
|
||||
{
|
||||
@ -138,13 +199,13 @@ int mlx4_register_device(struct mlx4_dev *dev)
|
||||
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
|
||||
list_add_tail(&priv->dev_list, &dev_list);
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
mlx4_add_device(intf, priv);
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
if (!mlx4_is_slave(dev))
|
||||
mlx4_start_catas_poll(dev);
|
||||
mlx4_start_catas_poll(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -154,14 +215,17 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_interface *intf;
|
||||
|
||||
if (!mlx4_is_slave(dev))
|
||||
mlx4_stop_catas_poll(dev);
|
||||
if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
|
||||
return;
|
||||
|
||||
mlx4_stop_catas_poll(dev);
|
||||
mutex_lock(&intf_mutex);
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list)
|
||||
mlx4_remove_device(intf, priv);
|
||||
|
||||
list_del_init(&priv->dev_list);
|
||||
list_del(&priv->dev_list);
|
||||
dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
|
||||
|
||||
mutex_unlock(&intf_mutex);
|
||||
}
|
||||
@ -186,3 +250,4 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -323,15 +323,14 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns true if all the QPs != tqpn contained in this entry
|
||||
* are Promisc QPs. return false otherwise.
|
||||
/* Returns true if all the QPs != tqpn contained in this entry
|
||||
* are Promisc QPs. Returns false otherwise.
|
||||
*/
|
||||
static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
enum mlx4_steer_type steer,
|
||||
unsigned int index, u32 tqpn, u32 *members_count)
|
||||
enum mlx4_steer_type steer,
|
||||
unsigned int index, u32 tqpn,
|
||||
u32 *members_count)
|
||||
{
|
||||
struct mlx4_steer *s_steer;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_mgm *mgm;
|
||||
u32 m_count;
|
||||
@ -341,8 +340,6 @@ static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
return false;
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return false;
|
||||
@ -382,7 +379,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
|
||||
s_steer = &mlx4_priv(dev)->steer[port - 1];
|
||||
|
||||
if (!promisc_steering_entry(dev, port, steer, index, tqpn, &members_count))
|
||||
if (!promisc_steering_entry(dev, port, steer, index,
|
||||
tqpn, &members_count))
|
||||
goto out;
|
||||
|
||||
/* All the qps currently registered for this entry are promiscuous,
|
||||
@ -390,10 +388,10 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
|
||||
ret = true;
|
||||
list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
|
||||
if (entry->index == index) {
|
||||
if (list_empty(&entry->duplicates) || members_count == 1) {
|
||||
if (list_empty(&entry->duplicates) ||
|
||||
members_count == 1) {
|
||||
struct mlx4_promisc_qp *pqp, *tmp_pqp;
|
||||
/*
|
||||
* If there is only 1 entry in duplicates than
|
||||
/* If there is only 1 entry in duplicates then
|
||||
* this is the QP we want to delete, going over
|
||||
* the list and deleting the entry.
|
||||
*/
|
||||
@ -460,40 +458,53 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
mgm = mailbox->buf;
|
||||
|
||||
if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
|
||||
/* the promisc qp needs to be added for each one of the steering
|
||||
* entries, if it already exists, needs to be added as a duplicate
|
||||
* for this entry */
|
||||
list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
|
||||
/* The promisc QP needs to be added for each one of the steering
|
||||
* entries. If it already exists, needs to be added as
|
||||
* a duplicate for this entry.
|
||||
*/
|
||||
list_for_each_entry(entry,
|
||||
&s_steer->steer_entries[steer],
|
||||
list) {
|
||||
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
|
||||
if (err)
|
||||
goto out_mailbox;
|
||||
|
||||
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||
members_count = be32_to_cpu(mgm->members_count) &
|
||||
0xffffff;
|
||||
prot = be32_to_cpu(mgm->members_count) >> 30;
|
||||
found = false;
|
||||
for (i = 0; i < members_count; i++) {
|
||||
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
|
||||
/* Entry already exists, add to duplicates */
|
||||
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
|
||||
if ((be32_to_cpu(mgm->qp[i]) &
|
||||
MGM_QPN_MASK) == qpn) {
|
||||
/* Entry already exists.
|
||||
* Add to duplicates.
|
||||
*/
|
||||
dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
|
||||
if (!dqp) {
|
||||
err = -ENOMEM;
|
||||
goto out_mailbox;
|
||||
}
|
||||
dqp->qpn = qpn;
|
||||
list_add_tail(&dqp->list, &entry->duplicates);
|
||||
list_add_tail(&dqp->list,
|
||||
&entry->duplicates);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
/* Need to add the qpn to mgm */
|
||||
if (members_count == dev->caps.num_qp_per_mgm) {
|
||||
if (members_count ==
|
||||
dev->caps.num_qp_per_mgm) {
|
||||
/* entry is full */
|
||||
err = -ENOMEM;
|
||||
goto out_mailbox;
|
||||
}
|
||||
mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
|
||||
mgm->members_count = cpu_to_be32(members_count | (prot << 30));
|
||||
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
|
||||
mgm->qp[members_count++] =
|
||||
cpu_to_be32(qpn & MGM_QPN_MASK);
|
||||
mgm->members_count =
|
||||
cpu_to_be32(members_count |
|
||||
(prot << 30));
|
||||
err = mlx4_WRITE_ENTRY(dev, entry->index,
|
||||
mailbox);
|
||||
if (err)
|
||||
goto out_mailbox;
|
||||
}
|
||||
@ -547,7 +558,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
u32 members_count;
|
||||
bool found;
|
||||
bool back_to_list = false;
|
||||
int i, loc = -1;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
if (port < 1 || port > dev->caps.num_ports)
|
||||
@ -575,7 +586,6 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
goto out_list;
|
||||
}
|
||||
mgm = mailbox->buf;
|
||||
memset(mgm, 0, sizeof *mgm);
|
||||
members_count = 0;
|
||||
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
|
||||
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
|
||||
@ -586,8 +596,10 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
goto out_mailbox;
|
||||
|
||||
if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
|
||||
/* remove the qp from all the steering entries*/
|
||||
list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
|
||||
/* Remove the QP from all the steering entries */
|
||||
list_for_each_entry_safe(entry, tmp_entry,
|
||||
&s_steer->steer_entries[steer],
|
||||
list) {
|
||||
found = false;
|
||||
list_for_each_entry(dqp, &entry->duplicates, list) {
|
||||
if (dqp->qpn == qpn) {
|
||||
@ -596,25 +608,33 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
/* a duplicate, no need to change the mgm,
|
||||
* only update the duplicates list */
|
||||
/* A duplicate, no need to change the MGM,
|
||||
* only update the duplicates list
|
||||
*/
|
||||
list_del(&dqp->list);
|
||||
kfree(dqp);
|
||||
} else {
|
||||
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
|
||||
if (err)
|
||||
goto out_mailbox;
|
||||
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||
int loc = -1;
|
||||
|
||||
err = mlx4_READ_ENTRY(dev,
|
||||
entry->index,
|
||||
mailbox);
|
||||
if (err)
|
||||
goto out_mailbox;
|
||||
members_count =
|
||||
be32_to_cpu(mgm->members_count) &
|
||||
0xffffff;
|
||||
if (!members_count) {
|
||||
mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0."
|
||||
" deleting entry...\n", qpn, entry->index);
|
||||
mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n",
|
||||
qpn, entry->index);
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < members_count; ++i)
|
||||
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
|
||||
if ((be32_to_cpu(mgm->qp[i]) &
|
||||
MGM_QPN_MASK) == qpn) {
|
||||
loc = i;
|
||||
break;
|
||||
}
|
||||
@ -626,15 +646,20 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
|
||||
goto out_mailbox;
|
||||
}
|
||||
|
||||
/* copy the last QP in this MGM over removed QP */
|
||||
/* Copy the last QP in this MGM
|
||||
* over removed QP
|
||||
*/
|
||||
mgm->qp[loc] = mgm->qp[members_count - 1];
|
||||
mgm->qp[members_count - 1] = 0;
|
||||
mgm->members_count = cpu_to_be32(--members_count |
|
||||
(MLX4_PROT_ETH << 30));
|
||||
mgm->members_count =
|
||||
cpu_to_be32(--members_count |
|
||||
(MLX4_PROT_ETH << 30));
|
||||
|
||||
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
|
||||
if (err)
|
||||
goto out_mailbox;
|
||||
err = mlx4_WRITE_ENTRY(dev,
|
||||
entry->index,
|
||||
mailbox);
|
||||
if (err)
|
||||
goto out_mailbox;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -706,7 +731,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
|
||||
|
||||
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
|
||||
if (*index != hash) {
|
||||
mlx4_err(dev, "Found zero MGID in AMGM.\n");
|
||||
mlx4_err(dev, "Found zero MGID in AMGM\n");
|
||||
err = -EINVAL;
|
||||
}
|
||||
return err;
|
||||
@ -728,20 +753,22 @@ static const u8 __promisc_mode[] = {
|
||||
[MLX4_FS_REGULAR] = 0x0,
|
||||
[MLX4_FS_ALL_DEFAULT] = 0x1,
|
||||
[MLX4_FS_MC_DEFAULT] = 0x3,
|
||||
[MLX4_FS_UC_SNIFFER] = 0x4,
|
||||
[MLX4_FS_MC_SNIFFER] = 0x5,
|
||||
[MLX4_FS_MIRROR_RX_PORT] = 0x4,
|
||||
[MLX4_FS_MIRROR_SX_PORT] = 0x5,
|
||||
[MLX4_FS_UC_SNIFFER] = 0x6,
|
||||
[MLX4_FS_MC_SNIFFER] = 0x7,
|
||||
};
|
||||
|
||||
int map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
|
||||
enum mlx4_net_trans_promisc_mode flow_type)
|
||||
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
|
||||
enum mlx4_net_trans_promisc_mode flow_type)
|
||||
{
|
||||
if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
|
||||
if (flow_type >= MLX4_FS_MODE_NUM) {
|
||||
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return __promisc_mode[flow_type];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_mode);
|
||||
EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
|
||||
|
||||
static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
|
||||
struct mlx4_net_trans_rule_hw_ctrl *hw)
|
||||
@ -765,19 +792,20 @@ const u16 __sw_id_hw[] = {
|
||||
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
|
||||
[MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
|
||||
[MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
|
||||
[MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
|
||||
[MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
|
||||
[MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
|
||||
};
|
||||
|
||||
int map_sw_to_hw_steering_id(struct mlx4_dev *dev,
|
||||
enum mlx4_net_trans_rule_id id)
|
||||
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
|
||||
enum mlx4_net_trans_rule_id id)
|
||||
{
|
||||
if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
|
||||
if (id >= MLX4_NET_TRANS_RULE_NUM) {
|
||||
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
|
||||
return -EINVAL;
|
||||
}
|
||||
return __sw_id_hw[id];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(map_sw_to_hw_steering_id);
|
||||
EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
|
||||
|
||||
static const int __rule_hw_sz[] = {
|
||||
[MLX4_NET_TRANS_RULE_ID_ETH] =
|
||||
@ -790,29 +818,31 @@ static const int __rule_hw_sz[] = {
|
||||
[MLX4_NET_TRANS_RULE_ID_TCP] =
|
||||
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
|
||||
[MLX4_NET_TRANS_RULE_ID_UDP] =
|
||||
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
|
||||
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
|
||||
[MLX4_NET_TRANS_RULE_ID_VXLAN] =
|
||||
sizeof(struct mlx4_net_trans_rule_hw_vxlan)
|
||||
};
|
||||
|
||||
int hw_rule_sz(struct mlx4_dev *dev,
|
||||
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
|
||||
enum mlx4_net_trans_rule_id id)
|
||||
{
|
||||
if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
|
||||
if (id >= MLX4_NET_TRANS_RULE_NUM) {
|
||||
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return __rule_hw_sz[id];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hw_rule_sz);
|
||||
EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
|
||||
|
||||
static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
|
||||
struct _rule_hw *rule_hw)
|
||||
{
|
||||
if (hw_rule_sz(dev, spec->id) < 0)
|
||||
if (mlx4_hw_rule_sz(dev, spec->id) < 0)
|
||||
return -EINVAL;
|
||||
memset(rule_hw, 0, hw_rule_sz(dev, spec->id));
|
||||
memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
|
||||
rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
|
||||
rule_hw->size = hw_rule_sz(dev, spec->id) >> 2;
|
||||
rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
|
||||
|
||||
switch (spec->id) {
|
||||
case MLX4_NET_TRANS_RULE_ID_ETH:
|
||||
@ -855,6 +885,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
|
||||
rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
|
||||
break;
|
||||
|
||||
case MLX4_NET_TRANS_RULE_ID_VXLAN:
|
||||
rule_hw->vxlan.vni =
|
||||
cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
|
||||
rule_hw->vxlan.vni_mask =
|
||||
cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -879,7 +916,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
|
||||
switch (cur->id) {
|
||||
case MLX4_NET_TRANS_RULE_ID_ETH:
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"dmac = %pM ", &cur->eth.dst_mac);
|
||||
"dmac = 0x%02x%02x%02x%02x%02x%02x ",
|
||||
cur->eth.dst_mac[0], cur->eth.dst_mac[1],
|
||||
cur->eth.dst_mac[2], cur->eth.dst_mac[3],
|
||||
cur->eth.dst_mac[4], cur->eth.dst_mac[5]);
|
||||
if (cur->eth.ether_type)
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"ethertype = 0x%x ",
|
||||
@ -922,6 +962,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
|
||||
GID_PRINT_ARGS(cur->ib.dst_gid_msk));
|
||||
break;
|
||||
|
||||
case MLX4_NET_TRANS_RULE_ID_VXLAN:
|
||||
len += snprintf(buf + len, BUF_SIZE - len,
|
||||
"VNID = %d ", be32_to_cpu(cur->vxlan.vni));
|
||||
break;
|
||||
case MLX4_NET_TRANS_RULE_ID_IPV6:
|
||||
break;
|
||||
|
||||
@ -933,7 +977,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
|
||||
mlx4_err(dev, "%s", buf);
|
||||
|
||||
if (len >= BUF_SIZE)
|
||||
mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
|
||||
mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
|
||||
}
|
||||
|
||||
int mlx4_flow_attach(struct mlx4_dev *dev,
|
||||
@ -948,7 +992,6 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
|
||||
trans_rule_ctrl_to_hw(rule, mailbox->buf);
|
||||
|
||||
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
|
||||
@ -957,18 +1000,33 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
|
||||
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
|
||||
if (ret < 0) {
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
size += ret;
|
||||
}
|
||||
|
||||
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
|
||||
if (ret == -ENOMEM)
|
||||
if (ret == -ENOMEM) {
|
||||
mlx4_err_rule(dev,
|
||||
"mcg table is full. Fail to register network rule.\n",
|
||||
"mcg table is full. Fail to register network rule\n",
|
||||
rule);
|
||||
else if (ret)
|
||||
mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
|
||||
} else if (ret) {
|
||||
if (ret == -ENXIO) {
|
||||
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
|
||||
mlx4_err_rule(dev,
|
||||
"DMFS is not enabled, "
|
||||
"failed to register network rule.\n",
|
||||
rule);
|
||||
else
|
||||
mlx4_err_rule(dev,
|
||||
"Rule exceeds the dmfs_high_rate_mode limitations, "
|
||||
"failed to register network rule.\n",
|
||||
rule);
|
||||
|
||||
} else {
|
||||
mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
|
||||
}
|
||||
}
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
|
||||
@ -988,7 +1046,46 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
|
||||
|
||||
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn)
|
||||
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
|
||||
int port, int qpn, u16 prio, u64 *reg_id)
|
||||
{
|
||||
int err;
|
||||
struct mlx4_spec_list spec_eth_outer = { {NULL} };
|
||||
struct mlx4_spec_list spec_vxlan = { {NULL} };
|
||||
struct mlx4_spec_list spec_eth_inner = { {NULL} };
|
||||
|
||||
struct mlx4_net_trans_rule rule = {
|
||||
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
||||
.exclusive = 0,
|
||||
.allow_loopback = 1,
|
||||
.promisc_mode = MLX4_FS_REGULAR,
|
||||
};
|
||||
|
||||
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
||||
|
||||
rule.port = port;
|
||||
rule.qpn = qpn;
|
||||
rule.priority = prio;
|
||||
INIT_LIST_HEAD(&rule.list);
|
||||
|
||||
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
||||
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
|
||||
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
||||
|
||||
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
|
||||
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
|
||||
|
||||
list_add_tail(&spec_eth_outer.list, &rule.list);
|
||||
list_add_tail(&spec_vxlan.list, &rule.list);
|
||||
list_add_tail(&spec_eth_inner.list, &rule.list);
|
||||
|
||||
err = mlx4_flow_attach(dev, &rule, reg_id);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_tunnel_steer_add);
|
||||
|
||||
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
|
||||
u32 max_range_qpn)
|
||||
{
|
||||
int err;
|
||||
u64 in_param;
|
||||
@ -1012,7 +1109,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_mgm *mgm;
|
||||
u32 members_count;
|
||||
int index, prev;
|
||||
int index = -1, prev;
|
||||
int link = 0;
|
||||
int i;
|
||||
int err;
|
||||
@ -1053,7 +1150,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
|
||||
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||
if (members_count == dev->caps.num_qp_per_mgm) {
|
||||
mlx4_err(dev, "MGM at index %x is full.\n", index);
|
||||
mlx4_err(dev, "MGM at index %x is full\n", index);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1065,8 +1162,11 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
goto out;
|
||||
}
|
||||
|
||||
mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
|
||||
(!!mlx4_blck_lb << MGM_BLCK_LB_BIT));
|
||||
if (block_mcast_loopback)
|
||||
mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
|
||||
(1U << MGM_BLCK_LB_BIT));
|
||||
else
|
||||
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
|
||||
|
||||
mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
|
||||
|
||||
@ -1074,9 +1174,8 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* if !link, still add the new entry. */
|
||||
if (!link)
|
||||
goto skip_link;
|
||||
goto out;
|
||||
|
||||
err = mlx4_READ_ENTRY(dev, prev, mailbox);
|
||||
if (err)
|
||||
@ -1088,20 +1187,19 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
skip_link:
|
||||
if (prot == MLX4_PROT_ETH) {
|
||||
out:
|
||||
if (prot == MLX4_PROT_ETH && index != -1) {
|
||||
/* manage the steering entry for promisc mode */
|
||||
if (new_entry)
|
||||
new_steering_entry(dev, port, steer, index, qp->qpn);
|
||||
err = new_steering_entry(dev, port, steer,
|
||||
index, qp->qpn);
|
||||
else
|
||||
existing_steering_entry(dev, port, steer,
|
||||
index, qp->qpn);
|
||||
err = existing_steering_entry(dev, port, steer,
|
||||
index, qp->qpn);
|
||||
}
|
||||
|
||||
out:
|
||||
if (err && link && index != -1) {
|
||||
if (index < dev->caps.num_mgms)
|
||||
mlx4_warn(dev, "Got AMGM index %d < %d",
|
||||
mlx4_warn(dev, "Got AMGM index %d < %d\n",
|
||||
index, dev->caps.num_mgms);
|
||||
else
|
||||
mlx4_bitmap_free(&priv->mcg_table.bitmap,
|
||||
@ -1145,10 +1243,9 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
if this QP is also a promisc QP, it shouldn't be removed only if
|
||||
at least one none promisc QP is also attached to this MCG
|
||||
*/
|
||||
/* If this QP is also a promisc QP, it shouldn't be removed only if
|
||||
* at least one none promisc QP is also attached to this MCG
|
||||
*/
|
||||
if (prot == MLX4_PROT_ETH &&
|
||||
check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
|
||||
!promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
|
||||
@ -1199,7 +1296,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
|
||||
if (amgm_index) {
|
||||
if (amgm_index < dev->caps.num_mgms)
|
||||
mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
|
||||
mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
|
||||
index, amgm_index, dev->caps.num_mgms);
|
||||
else
|
||||
mlx4_bitmap_free(&priv->mcg_table.bitmap,
|
||||
@ -1219,7 +1316,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
goto out;
|
||||
|
||||
if (index < dev->caps.num_mgms)
|
||||
mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
|
||||
mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
|
||||
prev, index, dev->caps.num_mgms);
|
||||
else
|
||||
mlx4_bitmap_free(&priv->mcg_table.bitmap,
|
||||
@ -1230,6 +1327,9 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
mutex_unlock(&priv->mcg_table.mutex);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
|
||||
/* In case device is under an error, return success as a closing command */
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1259,6 +1359,9 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
||||
MLX4_CMD_WRAPPED);
|
||||
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
if (err && !attach &&
|
||||
dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1306,9 +1409,6 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
u8 port, int block_mcast_loopback,
|
||||
enum mlx4_protocol prot, u64 *reg_id)
|
||||
{
|
||||
enum mlx4_steer_type steer;
|
||||
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
|
||||
|
||||
switch (dev->caps.steering_mode) {
|
||||
case MLX4_STEERING_MODE_A0:
|
||||
if (prot == MLX4_PROT_ETH)
|
||||
@ -1316,7 +1416,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
|
||||
case MLX4_STEERING_MODE_B0:
|
||||
if (prot == MLX4_PROT_ETH)
|
||||
gid[7] |= (steer << 1);
|
||||
gid[7] |= (MLX4_MC_STEER << 1);
|
||||
|
||||
if (mlx4_is_mfunc(dev))
|
||||
return mlx4_QP_ATTACH(dev, qp, gid, 1,
|
||||
@ -1338,9 +1438,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
|
||||
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
enum mlx4_protocol prot, u64 reg_id)
|
||||
{
|
||||
enum mlx4_steer_type steer;
|
||||
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
|
||||
|
||||
switch (dev->caps.steering_mode) {
|
||||
case MLX4_STEERING_MODE_A0:
|
||||
if (prot == MLX4_PROT_ETH)
|
||||
@ -1348,7 +1445,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||
|
||||
case MLX4_STEERING_MODE_B0:
|
||||
if (prot == MLX4_PROT_ETH)
|
||||
gid[7] |= (steer << 1);
|
||||
gid[7] |= (MLX4_MC_STEER << 1);
|
||||
|
||||
if (mlx4_is_mfunc(dev))
|
||||
return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
|
||||
@ -1368,7 +1465,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
|
||||
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
|
||||
u32 qpn, enum mlx4_net_trans_promisc_mode mode)
|
||||
{
|
||||
struct mlx4_net_trans_rule rule;
|
||||
struct mlx4_net_trans_rule rule = {
|
||||
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
||||
.exclusive = 0,
|
||||
.allow_loopback = 1,
|
||||
};
|
||||
|
||||
u64 *regid_p;
|
||||
|
||||
switch (mode) {
|
||||
@ -1459,11 +1561,14 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_cmd_info *cmd)
|
||||
{
|
||||
u32 qpn = (u32) vhcr->in_param & 0xffffffff;
|
||||
u8 port = vhcr->in_param >> 62;
|
||||
int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
|
||||
enum mlx4_steer_type steer = vhcr->in_modifier;
|
||||
|
||||
/* Promiscuous unicast is not allowed in mfunc for VFs */
|
||||
if ((slave != dev->caps.function) && (steer == MLX4_UC_STEER))
|
||||
if (port < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Promiscuous unicast is not allowed in mfunc */
|
||||
if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
|
||||
return 0;
|
||||
|
||||
if (vhcr->op_modifier)
|
||||
|
@ -119,8 +119,11 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
|
||||
for (i = 0; i <= buddy->max_order; ++i) {
|
||||
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
|
||||
buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!buddy->bits[i])
|
||||
goto err_out_free;
|
||||
if (!buddy->bits[i]) {
|
||||
buddy->bits[i] = vzalloc(s * sizeof(long));
|
||||
if (!buddy->bits[i])
|
||||
goto err_out_free;
|
||||
}
|
||||
}
|
||||
|
||||
set_bit(0, buddy->bits[buddy->max_order]);
|
||||
@ -130,7 +133,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
|
||||
|
||||
err_out_free:
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
kvfree(buddy->bits[i]);
|
||||
|
||||
err_out:
|
||||
kfree(buddy->bits);
|
||||
@ -144,7 +147,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= buddy->max_order; ++i)
|
||||
kfree(buddy->bits[i]);
|
||||
kvfree(buddy->bits[i]);
|
||||
|
||||
kfree(buddy->bits);
|
||||
kfree(buddy->num_free);
|
||||
@ -210,11 +213,8 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
|
||||
++mtt->order;
|
||||
|
||||
mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
|
||||
if (mtt->offset == -1) {
|
||||
mlx4_err(dev, "Failed to allocate mtts for %d pages(order %d)\n",
|
||||
npages, mtt->order);
|
||||
if (mtt->offset == -1)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -247,11 +247,11 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
|
||||
MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
mlx4_warn(dev, "Failed to free mtt range at:"
|
||||
"%d order:%d\n", offset, order);
|
||||
mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
|
||||
offset, order);
|
||||
return;
|
||||
}
|
||||
__mlx4_free_mtt_range(dev, offset, order);
|
||||
__mlx4_free_mtt_range(dev, offset, order);
|
||||
}
|
||||
|
||||
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
|
||||
@ -295,6 +295,130 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
|
||||
}
|
||||
|
||||
/* Must protect against concurrent access */
|
||||
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
|
||||
struct mlx4_mpt_entry ***mpt_entry)
|
||||
{
|
||||
int err;
|
||||
int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
|
||||
struct mlx4_cmd_mailbox *mailbox = NULL;
|
||||
|
||||
if (mmr->enabled != MLX4_MPT_EN_HW)
|
||||
return -EINVAL;
|
||||
|
||||
err = mlx4_HW2SW_MPT(dev, NULL, key);
|
||||
if (err) {
|
||||
mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
|
||||
mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
mmr->enabled = MLX4_MPT_EN_SW;
|
||||
|
||||
if (!mlx4_is_mfunc(dev)) {
|
||||
**mpt_entry = mlx4_table_find(
|
||||
&mlx4_priv(dev)->mr_table.dmpt_table,
|
||||
key, NULL);
|
||||
} else {
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
|
||||
0, MLX4_CMD_QUERY_MPT,
|
||||
MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_WRAPPED);
|
||||
if (err)
|
||||
goto free_mailbox;
|
||||
|
||||
*mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
|
||||
}
|
||||
|
||||
if (!(*mpt_entry) || !(**mpt_entry)) {
|
||||
err = -ENOMEM;
|
||||
goto free_mailbox;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_mailbox:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
|
||||
|
||||
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
|
||||
struct mlx4_mpt_entry **mpt_entry)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!mlx4_is_mfunc(dev)) {
|
||||
/* Make sure any changes to this entry are flushed */
|
||||
wmb();
|
||||
|
||||
*(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
|
||||
|
||||
/* Make sure the new status is written */
|
||||
wmb();
|
||||
|
||||
err = mlx4_SYNC_TPT(dev);
|
||||
} else {
|
||||
int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
|
||||
|
||||
struct mlx4_cmd_mailbox *mailbox =
|
||||
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
|
||||
buf);
|
||||
|
||||
err = mlx4_SW2HW_MPT(dev, mailbox, key);
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
|
||||
mmr->enabled = MLX4_MPT_EN_HW;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
|
||||
|
||||
void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
|
||||
struct mlx4_mpt_entry **mpt_entry)
|
||||
{
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
struct mlx4_cmd_mailbox *mailbox =
|
||||
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
|
||||
buf);
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
|
||||
|
||||
int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
|
||||
u32 pdn)
|
||||
{
|
||||
u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
|
||||
/* The wrapper function will put the slave's id here */
|
||||
if (mlx4_is_mfunc(dev))
|
||||
pd_flags &= ~MLX4_MPT_PD_VF_MASK;
|
||||
|
||||
mpt_entry->pd_flags = cpu_to_be32(pd_flags |
|
||||
(pdn & MLX4_MPT_PD_MASK)
|
||||
| MLX4_MPT_PD_FLAG_EN_INV);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
|
||||
|
||||
int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
|
||||
struct mlx4_mpt_entry *mpt_entry,
|
||||
u32 access)
|
||||
{
|
||||
u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
|
||||
(access & MLX4_PERM_MASK);
|
||||
|
||||
mpt_entry->flags = cpu_to_be32(flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
|
||||
|
||||
static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
|
||||
u64 iova, u64 size, u32 access, int npages,
|
||||
int page_shift, struct mlx4_mr *mr)
|
||||
@ -361,14 +485,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
|
||||
__mlx4_mpt_release(dev, index);
|
||||
}
|
||||
|
||||
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
|
||||
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
|
||||
|
||||
return mlx4_table_get(dev, &mr_table->dmpt_table, index);
|
||||
return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
|
||||
}
|
||||
|
||||
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
|
||||
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
|
||||
{
|
||||
u64 param = 0;
|
||||
|
||||
@ -379,7 +503,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
|
||||
MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
}
|
||||
return __mlx4_mpt_alloc_icm(dev, index);
|
||||
return __mlx4_mpt_alloc_icm(dev, index, gfp);
|
||||
}
|
||||
|
||||
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
|
||||
@ -433,8 +557,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
||||
key_to_hw_index(mr->key) &
|
||||
(dev->caps.num_mpts - 1));
|
||||
if (err) {
|
||||
mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
|
||||
mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
|
||||
mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -460,13 +584,58 @@ int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_free);
|
||||
|
||||
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
||||
{
|
||||
mlx4_mtt_cleanup(dev, &mr->mtt);
|
||||
mr->mtt.order = -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
|
||||
|
||||
int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
|
||||
u64 iova, u64 size, int npages,
|
||||
int page_shift, struct mlx4_mpt_entry *mpt_entry)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mpt_entry->start = cpu_to_be64(iova);
|
||||
mpt_entry->length = cpu_to_be64(size);
|
||||
mpt_entry->entity_size = cpu_to_be32(page_shift);
|
||||
mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
|
||||
MLX4_MPT_FLAG_SW_OWNS));
|
||||
if (mr->mtt.order < 0) {
|
||||
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
|
||||
mpt_entry->mtt_addr = 0;
|
||||
} else {
|
||||
mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
|
||||
&mr->mtt));
|
||||
if (mr->mtt.page_shift == 0)
|
||||
mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
|
||||
}
|
||||
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
|
||||
/* fast register MR in free state */
|
||||
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
|
||||
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
|
||||
MLX4_MPT_PD_FLAG_RAE);
|
||||
} else {
|
||||
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
|
||||
}
|
||||
mr->enabled = MLX4_MPT_EN_SW;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
|
||||
|
||||
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_mpt_entry *mpt_entry;
|
||||
int err;
|
||||
|
||||
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
|
||||
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -476,9 +645,6 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
||||
goto err_table;
|
||||
}
|
||||
mpt_entry = mailbox->buf;
|
||||
|
||||
memset(mpt_entry, 0, sizeof *mpt_entry);
|
||||
|
||||
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
|
||||
MLX4_MPT_FLAG_REGION |
|
||||
mr->access);
|
||||
@ -542,13 +708,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
if (!mtts)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
|
||||
dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
|
||||
npages * sizeof (u64), DMA_TO_DEVICE);
|
||||
|
||||
for (i = 0; i < npages; ++i)
|
||||
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
|
||||
|
||||
dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
|
||||
dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
|
||||
npages * sizeof (u64), DMA_TO_DEVICE);
|
||||
|
||||
return 0;
|
||||
@ -627,13 +793,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
|
||||
|
||||
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
struct mlx4_buf *buf)
|
||||
struct mlx4_buf *buf, gfp_t gfp)
|
||||
{
|
||||
u64 *page_list;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
|
||||
page_list = kmalloc(buf->npages * sizeof *page_list,
|
||||
gfp);
|
||||
if (!page_list)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -655,6 +822,12 @@ int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
|
||||
{
|
||||
u32 index;
|
||||
|
||||
if ((type == MLX4_MW_TYPE_1 &&
|
||||
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
|
||||
(type == MLX4_MW_TYPE_2 &&
|
||||
!(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
|
||||
return -ENOTSUPP;
|
||||
|
||||
index = mlx4_mpt_reserve(dev);
|
||||
if (index == -1)
|
||||
return -ENOMEM;
|
||||
@ -674,7 +847,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
|
||||
struct mlx4_mpt_entry *mpt_entry;
|
||||
int err;
|
||||
|
||||
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
|
||||
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -685,11 +858,9 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
|
||||
}
|
||||
mpt_entry = mailbox->buf;
|
||||
|
||||
memset(mpt_entry, 0, sizeof(*mpt_entry));
|
||||
|
||||
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
|
||||
* off, thus creating a memory window and not a memory region.
|
||||
*/
|
||||
* off, thus creating a memory window and not a memory region.
|
||||
*/
|
||||
mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
|
||||
mpt_entry->pd_flags = cpu_to_be32(mw->pd);
|
||||
if (mw->type == MLX4_MW_TYPE_2) {
|
||||
@ -759,8 +930,8 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
|
||||
return err;
|
||||
|
||||
err = mlx4_buddy_init(&mr_table->mtt_buddy,
|
||||
ilog2(div_u64(dev->caps.num_mtts,
|
||||
(1 << log_mtts_per_seg))));
|
||||
ilog2((u32)dev->caps.num_mtts /
|
||||
(1 << log_mtts_per_seg)));
|
||||
if (err)
|
||||
goto err_buddy;
|
||||
|
||||
@ -769,7 +940,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
|
||||
mlx4_alloc_mtt_range(dev,
|
||||
fls(dev->caps.reserved_mtts - 1));
|
||||
if (priv->reserved_mtts < 0) {
|
||||
mlx4_warn(dev, "MTT table of order %u is too small.\n",
|
||||
mlx4_warn(dev, "MTT table of order %u is too small\n",
|
||||
mr_table->mtt_buddy.max_order);
|
||||
err = -ENOMEM;
|
||||
goto err_reserve_mtts;
|
||||
@ -849,13 +1020,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
|
||||
/* Make sure MPT status is visible before writing MTT entries */
|
||||
wmb();
|
||||
|
||||
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
|
||||
dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
|
||||
npages * sizeof(u64), DMA_TO_DEVICE);
|
||||
|
||||
for (i = 0; i < npages; ++i)
|
||||
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
|
||||
|
||||
dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
|
||||
dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
|
||||
npages * sizeof(u64), DMA_TO_DEVICE);
|
||||
|
||||
fmr->mpt->key = cpu_to_be32(key);
|
||||
@ -879,7 +1050,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
|
||||
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int err = -ENOMEM, ret;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (max_maps > dev->caps.max_fmr_maps)
|
||||
return -EINVAL;
|
||||
@ -913,9 +1084,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
ret = mlx4_mr_free(dev, &fmr->mr);
|
||||
if (ret)
|
||||
mlx4_err(dev, "Error deregistering MR. The system may have become unstable.");
|
||||
(void) mlx4_mr_free(dev, &fmr->mr);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
|
||||
@ -941,34 +1110,30 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
|
||||
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
|
||||
u32 *lkey, u32 *rkey)
|
||||
{
|
||||
u32 key;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int err;
|
||||
|
||||
if (!fmr->maps)
|
||||
return;
|
||||
|
||||
key = key_to_hw_index(fmr->mr.key) & (dev->caps.num_mpts - 1);
|
||||
|
||||
*(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
|
||||
|
||||
/* Make sure MPT status is visible before changing MPT fields */
|
||||
wmb();
|
||||
|
||||
fmr->mr.key = hw_index_to_key(key);
|
||||
|
||||
fmr->mpt->key = cpu_to_be32(key);
|
||||
fmr->mpt->lkey = cpu_to_be32(key);
|
||||
fmr->mpt->length = 0;
|
||||
fmr->mpt->start = 0;
|
||||
|
||||
/* Make sure MPT data is visible before changing MPT status */
|
||||
wmb();
|
||||
|
||||
*(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
|
||||
|
||||
/* Make sure MPT satus is visible */
|
||||
wmb();
|
||||
|
||||
fmr->maps = 0;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
|
||||
return;
|
||||
}
|
||||
|
||||
err = mlx4_HW2SW_MPT(dev, NULL,
|
||||
key_to_hw_index(fmr->mr.key) &
|
||||
(dev->caps.num_mpts - 1));
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
if (err) {
|
||||
pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
|
||||
return;
|
||||
}
|
||||
fmr->mr.enabled = MLX4_MPT_EN_SW;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
|
||||
|
||||
@ -990,7 +1155,7 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_free);
|
||||
|
||||
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
|
||||
{
|
||||
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
|
||||
MLX4_CMD_NATIVE);
|
||||
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
|
||||
|
@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
|
||||
return -ENOMEM;
|
||||
|
||||
if (mlx4_is_slave(dev))
|
||||
offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
|
||||
offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
|
||||
2) /
|
||||
dev->caps.uar_page_size);
|
||||
else
|
||||
offset = uar->index;
|
||||
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
|
||||
uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
|
||||
+ offset;
|
||||
uar->map = NULL;
|
||||
return 0;
|
||||
}
|
||||
@ -167,7 +169,6 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_uar_free);
|
||||
|
||||
#ifndef CONFIG_PPC
|
||||
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
@ -186,9 +187,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
uar = kmalloc_node(sizeof *uar, GFP_KERNEL, node);
|
||||
uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
|
||||
if (!uar) {
|
||||
uar = kmalloc(sizeof *uar, GFP_KERNEL);
|
||||
uar = kmalloc(sizeof(*uar), GFP_KERNEL);
|
||||
if (!uar) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
@ -204,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
|
||||
goto free_uar;
|
||||
}
|
||||
|
||||
uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT, PAGE_SIZE);
|
||||
uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
|
||||
uar->index << PAGE_SHIFT,
|
||||
PAGE_SIZE);
|
||||
if (!uar->bf_map) {
|
||||
err = -ENOMEM;
|
||||
goto unamp_uar;
|
||||
@ -213,7 +216,6 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
|
||||
list_add(&uar->bf_list, &priv->bf_list);
|
||||
}
|
||||
|
||||
bf->uar = uar;
|
||||
idx = ffz(uar->free_bf_bmap);
|
||||
uar->free_bf_bmap |= 1 << idx;
|
||||
bf->uar = uar;
|
||||
@ -267,26 +269,16 @@ void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_bf_free);
|
||||
|
||||
#else
|
||||
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
|
||||
{
|
||||
memset(bf, 0, sizeof *bf);
|
||||
return -ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
|
||||
|
||||
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
|
||||
{
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_bf_free);
|
||||
#endif
|
||||
|
||||
int mlx4_init_uar_table(struct mlx4_dev *dev)
|
||||
{
|
||||
if (dev->caps.num_uars <= 128) {
|
||||
mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
|
||||
dev->caps.num_uars);
|
||||
int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
|
||||
|
||||
mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
|
||||
mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
|
||||
|
||||
if (dev->caps.num_uars <= num_reserved_uar) {
|
||||
mlx4_err(dev, "Only %d UAR pages (need more than %d)\n",
|
||||
dev->caps.num_uars, num_reserved_uar);
|
||||
mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -76,13 +76,12 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
u64 size;
|
||||
u64 start;
|
||||
int type;
|
||||
u64 num;
|
||||
u32 num;
|
||||
int log_num;
|
||||
};
|
||||
|
||||
u64 total_size = 0;
|
||||
struct mlx4_resource *profile;
|
||||
struct mlx4_resource tmp;
|
||||
int i, j;
|
||||
|
||||
profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
|
||||
@ -107,13 +106,11 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
profile[MLX4_RES_AUXC].num = request->num_qp;
|
||||
profile[MLX4_RES_SRQ].num = request->num_srq;
|
||||
profile[MLX4_RES_CQ].num = request->num_cq;
|
||||
profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
|
||||
dev->phys_caps.num_phys_eqs :
|
||||
profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs :
|
||||
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
|
||||
profile[MLX4_RES_DMPT].num = request->num_mpt;
|
||||
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
|
||||
profile[MLX4_RES_MTT].num = ((u64)request->num_mtt_segs) *
|
||||
(1 << log_mtts_per_seg);
|
||||
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
|
||||
profile[MLX4_RES_MCG].num = request->num_mcg;
|
||||
|
||||
for (i = 0; i < MLX4_RES_NUM; ++i) {
|
||||
@ -132,11 +129,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
*/
|
||||
for (i = MLX4_RES_NUM; i > 0; --i)
|
||||
for (j = 1; j < i; ++j) {
|
||||
if (profile[j].size > profile[j - 1].size) {
|
||||
tmp = profile[j];
|
||||
profile[j] = profile[j - 1];
|
||||
profile[j - 1] = tmp;
|
||||
}
|
||||
if (profile[j].size > profile[j - 1].size)
|
||||
swap(profile[j], profile[j - 1]);
|
||||
}
|
||||
|
||||
for (i = 0; i < MLX4_RES_NUM; ++i) {
|
||||
@ -146,18 +140,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
}
|
||||
|
||||
if (total_size > dev_cap->max_icm_sz) {
|
||||
mlx4_err(dev, "Profile requires 0x%llx bytes; "
|
||||
"won't fit in 0x%llx bytes of context memory.\n",
|
||||
(unsigned long long) total_size,
|
||||
(unsigned long long) dev_cap->max_icm_sz);
|
||||
mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
|
||||
(unsigned long long) total_size,
|
||||
(unsigned long long) dev_cap->max_icm_sz);
|
||||
kfree(profile);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (profile[i].size)
|
||||
mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
|
||||
"size 0x%10llx\n",
|
||||
i, res_name[profile[i].type], profile[i].log_num,
|
||||
mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
|
||||
i, res_name[profile[i].type],
|
||||
profile[i].log_num,
|
||||
(unsigned long long) profile[i].start,
|
||||
(unsigned long long) profile[i].size);
|
||||
}
|
||||
@ -200,15 +193,16 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
break;
|
||||
case MLX4_RES_EQ:
|
||||
if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
|
||||
init_hca->log_num_eqs = 0x1f;
|
||||
init_hca->eqc_base = profile[i].start;
|
||||
init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
|
||||
init_hca->log_num_eqs = 0x1f;
|
||||
init_hca->eqc_base = profile[i].start;
|
||||
init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
|
||||
} else {
|
||||
dev->caps.num_eqs = roundup_pow_of_two(
|
||||
min_t(unsigned,
|
||||
dev_cap->max_eqs, MAX_MSIX));
|
||||
init_hca->eqc_base = profile[i].start;
|
||||
init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
|
||||
dev->caps.num_eqs = roundup_pow_of_two(
|
||||
min_t(unsigned,
|
||||
dev_cap->max_eqs,
|
||||
MAX_MSIX));
|
||||
init_hca->eqc_base = profile[i].start;
|
||||
init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
|
||||
}
|
||||
break;
|
||||
case MLX4_RES_DMPT:
|
||||
|
@ -43,9 +43,7 @@
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
|
||||
/*
|
||||
* QP to support BF should have bits 6,7 cleared
|
||||
*/
|
||||
/* QP to support BF should have bits 6,7 cleared */
|
||||
#define MLX4_BF_QP_SKIP_MASK 0xc0
|
||||
#define MLX4_MAX_BF_QP_RANGE 0x40
|
||||
|
||||
@ -170,6 +168,12 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
if ((cur_state == MLX4_QP_STATE_RTR) &&
|
||||
(new_state == MLX4_QP_STATE_RTS) &&
|
||||
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
|
||||
context->roce_entropy =
|
||||
cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
|
||||
|
||||
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
|
||||
memcpy(mailbox->buf + 8, context, sizeof *context);
|
||||
|
||||
@ -216,19 +220,25 @@ EXPORT_SYMBOL_GPL(mlx4_qp_modify);
|
||||
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
|
||||
int *base, u8 flags)
|
||||
{
|
||||
int bf_qp = !!(flags & (u8) MLX4_RESERVE_BF_QP);
|
||||
u32 uid;
|
||||
int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
|
||||
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
|
||||
/* Only IPoIB uses a large cnt. In this case, just allocate
|
||||
* as usual, ignoring bf skipping, since IPoIB does not run over RoCE
|
||||
*/
|
||||
if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
|
||||
bf_qp = 0;
|
||||
return -ENOMEM;
|
||||
|
||||
*base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
|
||||
bf_qp ? MLX4_BF_QP_SKIP_MASK : 0);
|
||||
uid = MLX4_QP_TABLE_ZONE_GENERAL;
|
||||
if (flags & (u8)MLX4_RESERVE_A0_QP) {
|
||||
if (bf_qp)
|
||||
uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
|
||||
else
|
||||
uid = MLX4_QP_TABLE_ZONE_RSS;
|
||||
}
|
||||
|
||||
*base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
|
||||
bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
|
||||
if (*base == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -246,7 +256,7 @@ int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
|
||||
flags &= dev->caps.alloc_res_qp_mask;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, (((u32) flags) << 24) | (u32) cnt);
|
||||
set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
|
||||
set_param_h(&in_param, align);
|
||||
err = mlx4_cmd_imm(dev, in_param, &out_param,
|
||||
RES_QP, RES_OP_RESERVE,
|
||||
@ -269,7 +279,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
|
||||
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
|
||||
return;
|
||||
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR);
|
||||
mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
|
||||
}
|
||||
|
||||
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
@ -284,37 +294,37 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
MLX4_CMD_FREE_RES,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
if (err) {
|
||||
mlx4_warn(dev, "Failed to release qp range"
|
||||
" base:%d cnt:%d\n", base_qpn, cnt);
|
||||
mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
|
||||
base_qpn, cnt);
|
||||
}
|
||||
} else
|
||||
__mlx4_qp_release_range(dev, base_qpn, cnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
|
||||
|
||||
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
|
||||
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
int err;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
|
||||
err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
|
||||
err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_qp;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
|
||||
err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_auxc;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
|
||||
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_altc;
|
||||
|
||||
err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
|
||||
err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
|
||||
if (err)
|
||||
goto err_put_rdmarc;
|
||||
|
||||
@ -336,7 +346,7 @@ int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
|
||||
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
|
||||
{
|
||||
u64 param = 0;
|
||||
|
||||
@ -346,7 +356,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
|
||||
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
}
|
||||
return __mlx4_qp_alloc_icm(dev, qpn);
|
||||
return __mlx4_qp_alloc_icm(dev, qpn, gfp);
|
||||
}
|
||||
|
||||
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
||||
@ -375,7 +385,7 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
||||
__mlx4_qp_free_icm(dev, qpn);
|
||||
}
|
||||
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
|
||||
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
||||
@ -386,7 +396,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
|
||||
|
||||
qp->qpn = qpn;
|
||||
|
||||
err = mlx4_qp_alloc_icm(dev, qpn);
|
||||
err = mlx4_qp_alloc_icm(dev, qpn, gfp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -409,6 +419,75 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
|
||||
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
|
||||
|
||||
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
||||
enum mlx4_update_qp_attr attr,
|
||||
struct mlx4_update_qp_params *params)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
struct mlx4_update_qp_context *cmd;
|
||||
u64 pri_addr_path_mask = 0;
|
||||
u64 qp_mask = 0;
|
||||
int err = 0;
|
||||
|
||||
if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_SMAC) {
|
||||
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
|
||||
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
|
||||
}
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
|
||||
if (!(dev->caps.flags2
|
||||
& MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
|
||||
mlx4_warn(dev,
|
||||
"Trying to set src check LB, but it isn't supported\n");
|
||||
err = -ENOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
pri_addr_path_mask |=
|
||||
1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
|
||||
if (params->flags &
|
||||
MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
|
||||
cmd->qp_context.pri_path.fl |=
|
||||
MLX4_FL_ETH_SRC_CHECK_MC_LB;
|
||||
}
|
||||
}
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_VSD) {
|
||||
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
|
||||
if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
|
||||
cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
|
||||
}
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
|
||||
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
|
||||
cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
|
||||
}
|
||||
|
||||
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
|
||||
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
|
||||
cmd->qp_context.qos_vport = params->qos_vport;
|
||||
}
|
||||
|
||||
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
|
||||
cmd->qp_mask = cpu_to_be64(qp_mask);
|
||||
|
||||
err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
|
||||
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_NATIVE);
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_update_qp);
|
||||
|
||||
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
@ -436,6 +515,227 @@ static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
}
|
||||
|
||||
#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
|
||||
#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
|
||||
#define MLX4_QP_TABLE_RAW_ETH_SIZE 256
|
||||
|
||||
static int mlx4_create_zones(struct mlx4_dev *dev,
|
||||
u32 reserved_bottom_general,
|
||||
u32 reserved_top_general,
|
||||
u32 reserved_bottom_rss,
|
||||
u32 start_offset_rss,
|
||||
u32 max_table_offset)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
|
||||
int bitmap_initialized = 0;
|
||||
u32 last_offset;
|
||||
int k;
|
||||
int err;
|
||||
|
||||
qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
|
||||
|
||||
if (NULL == qp_table->zones)
|
||||
return -ENOMEM;
|
||||
|
||||
bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
|
||||
|
||||
if (NULL == bitmap) {
|
||||
err = -ENOMEM;
|
||||
goto free_zone;
|
||||
}
|
||||
|
||||
err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
|
||||
(1 << 23) - 1, reserved_bottom_general,
|
||||
reserved_top_general);
|
||||
|
||||
if (err)
|
||||
goto free_bitmap;
|
||||
|
||||
++bitmap_initialized;
|
||||
|
||||
err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
|
||||
MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
|
||||
MLX4_ZONE_USE_RR, 0,
|
||||
0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
|
||||
|
||||
if (err)
|
||||
goto free_bitmap;
|
||||
|
||||
err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
|
||||
reserved_bottom_rss,
|
||||
reserved_bottom_rss - 1,
|
||||
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
|
||||
reserved_bottom_rss - start_offset_rss);
|
||||
|
||||
if (err)
|
||||
goto free_bitmap;
|
||||
|
||||
++bitmap_initialized;
|
||||
|
||||
err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
|
||||
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
|
||||
MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
|
||||
MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
|
||||
0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
|
||||
|
||||
if (err)
|
||||
goto free_bitmap;
|
||||
|
||||
last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
|
||||
/* We have a single zone for the A0 steering QPs area of the FW. This area
|
||||
* needs to be split into subareas. One set of subareas is for RSS QPs
|
||||
* (in which qp number bits 6 and/or 7 are set); the other set of subareas
|
||||
* is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
|
||||
* Currently, the values returned by the FW (A0 steering area starting qp number
|
||||
* and A0 steering area size) are such that there are only two subareas -- one
|
||||
* for RSS and one for RAW_ETH.
|
||||
*/
|
||||
for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
|
||||
k++) {
|
||||
int size;
|
||||
u32 offset = start_offset_rss;
|
||||
u32 bf_mask;
|
||||
u32 requested_size;
|
||||
|
||||
/* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
|
||||
* a mask of all LSB bits set until (and not including) the first
|
||||
* set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
|
||||
* is 0xc0, bf_mask will be 0x3f.
|
||||
*/
|
||||
bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
|
||||
requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
|
||||
|
||||
if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
|
||||
((int)(max_table_offset - last_offset)) >=
|
||||
roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
|
||||
(!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
|
||||
!((last_offset + requested_size - 1) &
|
||||
MLX4_BF_QP_SKIP_MASK)))
|
||||
size = requested_size;
|
||||
else {
|
||||
u32 candidate_offset =
|
||||
(last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
|
||||
|
||||
if (last_offset & MLX4_BF_QP_SKIP_MASK)
|
||||
last_offset = candidate_offset;
|
||||
|
||||
/* From this point, the BF bits are 0 */
|
||||
|
||||
if (last_offset > max_table_offset) {
|
||||
/* need to skip */
|
||||
size = -1;
|
||||
} else {
|
||||
size = min3(max_table_offset - last_offset,
|
||||
bf_mask - (last_offset & bf_mask),
|
||||
requested_size);
|
||||
if (size < requested_size) {
|
||||
int candidate_size;
|
||||
|
||||
candidate_size = min3(
|
||||
max_table_offset - candidate_offset,
|
||||
bf_mask - (last_offset & bf_mask),
|
||||
requested_size);
|
||||
|
||||
/* We will not take this path if last_offset was
|
||||
* already set above to candidate_offset
|
||||
*/
|
||||
if (candidate_size > size) {
|
||||
last_offset = candidate_offset;
|
||||
size = candidate_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
/* mlx4_bitmap_alloc_range will find a contiguous range of "size"
|
||||
* QPs in which both bits 6 and 7 are zero, because we pass it the
|
||||
* MLX4_BF_SKIP_MASK).
|
||||
*/
|
||||
offset = mlx4_bitmap_alloc_range(
|
||||
*bitmap + MLX4_QP_TABLE_ZONE_RSS,
|
||||
size, 1,
|
||||
MLX4_BF_QP_SKIP_MASK);
|
||||
|
||||
if (offset == (u32)-1) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
last_offset = offset + size;
|
||||
|
||||
err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
|
||||
roundup_pow_of_two(size) - 1, 0,
|
||||
roundup_pow_of_two(size) - size);
|
||||
} else {
|
||||
/* Add an empty bitmap, we'll allocate from different zones (since
|
||||
* at least one is reserved)
|
||||
*/
|
||||
err = mlx4_bitmap_init(*bitmap + k, 1,
|
||||
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
|
||||
0);
|
||||
mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
|
||||
}
|
||||
|
||||
if (err)
|
||||
break;
|
||||
|
||||
++bitmap_initialized;
|
||||
|
||||
err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
|
||||
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
|
||||
MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
|
||||
MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
|
||||
offset, qp_table->zones_uids + k);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto free_bitmap;
|
||||
|
||||
qp_table->bitmap_gen = *bitmap;
|
||||
|
||||
return err;
|
||||
|
||||
free_bitmap:
|
||||
for (k = 0; k < bitmap_initialized; k++)
|
||||
mlx4_bitmap_cleanup(*bitmap + k);
|
||||
kfree(bitmap);
|
||||
free_zone:
|
||||
mlx4_zone_allocator_destroy(qp_table->zones);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
|
||||
if (qp_table->zones) {
|
||||
int i;
|
||||
|
||||
for (i = 0;
|
||||
i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
|
||||
i++) {
|
||||
struct mlx4_bitmap *bitmap =
|
||||
mlx4_zone_get_bitmap(qp_table->zones,
|
||||
qp_table->zones_uids[i]);
|
||||
|
||||
mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
|
||||
if (NULL == bitmap)
|
||||
continue;
|
||||
|
||||
mlx4_bitmap_cleanup(bitmap);
|
||||
}
|
||||
mlx4_zone_allocator_destroy(qp_table->zones);
|
||||
kfree(qp_table->bitmap_gen);
|
||||
qp_table->bitmap_gen = NULL;
|
||||
qp_table->zones = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int mlx4_init_qp_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
||||
@ -443,49 +743,56 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
|
||||
int reserved_from_top = 0;
|
||||
int reserved_from_bot;
|
||||
int k;
|
||||
int fixed_reserved_from_bot_rv = 0;
|
||||
int bottom_reserved_for_rss_bitmap;
|
||||
u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
|
||||
dev->caps.dmfs_high_rate_qpn_range;
|
||||
|
||||
spin_lock_init(&qp_table->lock);
|
||||
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
|
||||
if (mlx4_is_slave(dev))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We reserve 2 extra QPs per port for the special QPs. The
|
||||
/* We reserve 2 extra QPs per port for the special QPs. The
|
||||
* block of special QPs must be aligned to a multiple of 8, so
|
||||
* round up.
|
||||
*
|
||||
* We also reserve the MSB of the 24-bit QP number to indicate
|
||||
* that a QP is an XRC QP.
|
||||
*/
|
||||
dev->phys_caps.base_sqpn =
|
||||
ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
|
||||
for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
|
||||
fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
|
||||
|
||||
if (fixed_reserved_from_bot_rv < max_table_offset)
|
||||
fixed_reserved_from_bot_rv = max_table_offset;
|
||||
|
||||
/* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
|
||||
bottom_reserved_for_rss_bitmap =
|
||||
roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
|
||||
dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
|
||||
|
||||
{
|
||||
int sort[MLX4_NUM_QP_REGION];
|
||||
int i, j, tmp;
|
||||
int i, j;
|
||||
int last_base = dev->caps.num_qps;
|
||||
|
||||
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
|
||||
sort[i] = i;
|
||||
|
||||
for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
|
||||
for (j = 2; j < i; ++j) {
|
||||
for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
|
||||
for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
|
||||
if (dev->caps.reserved_qps_cnt[sort[j]] >
|
||||
dev->caps.reserved_qps_cnt[sort[j - 1]]) {
|
||||
tmp = sort[j];
|
||||
sort[j] = sort[j - 1];
|
||||
sort[j - 1] = tmp;
|
||||
}
|
||||
dev->caps.reserved_qps_cnt[sort[j - 1]])
|
||||
swap(sort[j], sort[j - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
|
||||
for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
|
||||
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
|
||||
dev->caps.reserved_qps_base[sort[i]] = last_base;
|
||||
reserved_from_top +=
|
||||
dev->caps.reserved_qps_cnt[sort[i]];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Reserve 8 real SQPs in both native and SRIOV modes.
|
||||
@ -500,14 +807,15 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
|
||||
*/
|
||||
reserved_from_bot = mlx4_num_reserved_sqps(dev);
|
||||
if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
|
||||
mlx4_err(dev, "Number of reserved QPs is higher than number "
|
||||
"of QPs, increase the value of log_num_qp\n");
|
||||
mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
|
||||
(1 << 23) - 1, reserved_from_bot,
|
||||
reserved_from_top);
|
||||
err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
|
||||
bottom_reserved_for_rss_bitmap,
|
||||
fixed_reserved_from_bot_rv,
|
||||
max_table_offset);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -543,7 +851,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
|
||||
err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
|
||||
if (err)
|
||||
goto err_mem;
|
||||
return 0;
|
||||
|
||||
return err;
|
||||
|
||||
err_mem:
|
||||
kfree(dev->caps.qp0_tunnel);
|
||||
@ -552,6 +861,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
|
||||
kfree(dev->caps.qp1_proxy);
|
||||
dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
|
||||
dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
|
||||
mlx4_cleanup_qp_zones(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -561,7 +871,8 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
|
||||
return;
|
||||
|
||||
mlx4_CONF_SPECIAL_QP(dev, 0);
|
||||
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
|
||||
|
||||
mlx4_cleanup_qp_zones(dev);
|
||||
}
|
||||
|
||||
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
||||
@ -601,11 +912,12 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
|
||||
context->flags &= cpu_to_be32(~(0xf << 28));
|
||||
context->flags |= cpu_to_be32(states[i + 1] << 28);
|
||||
if (states[i + 1] != MLX4_QP_STATE_RTR)
|
||||
context->params2 &= ~MLX4_QP_BIT_FPP;
|
||||
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
|
||||
context, 0, 0, qp);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Failed to bring QP to state: "
|
||||
"%d with error: %d\n",
|
||||
mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
|
||||
states[i + 1], err);
|
||||
return err;
|
||||
}
|
||||
@ -616,3 +928,23 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
|
||||
|
||||
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
|
||||
{
|
||||
struct mlx4_qp_context context;
|
||||
struct mlx4_qp qp;
|
||||
int err;
|
||||
|
||||
qp.qpn = qpn;
|
||||
err = mlx4_qp_query(dev, &qp, &context);
|
||||
if (!err) {
|
||||
u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
|
||||
u16 folded_dst = folded_qp(dest_qpn);
|
||||
u16 folded_src = folded_qp(qpn);
|
||||
|
||||
return (dest_qpn != qpn) ?
|
||||
((folded_dst ^ folded_src) | 0xC000) :
|
||||
folded_src | 0xC000;
|
||||
}
|
||||
return 0xdead;
|
||||
}
|
||||
|
@ -72,29 +72,29 @@ int mlx4_reset(struct mlx4_dev *dev)
|
||||
hca_header = kmalloc(256, GFP_KERNEL);
|
||||
if (!hca_header) {
|
||||
err = -ENOMEM;
|
||||
mlx4_err(dev, "Couldn't allocate memory to save HCA "
|
||||
"PCI header, aborting.\n");
|
||||
mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
pcie_cap = pci_pcie_cap(dev->pdev);
|
||||
pcie_cap = pci_pcie_cap(dev->persist->pdev);
|
||||
|
||||
for (i = 0; i < 64; ++i) {
|
||||
if (i == 22 || i == 23)
|
||||
continue;
|
||||
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
|
||||
if (pci_read_config_dword(dev->persist->pdev, i * 4,
|
||||
hca_header + i)) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't save HCA "
|
||||
"PCI header, aborting.\n");
|
||||
mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
|
||||
reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
|
||||
MLX4_RESET_BASE,
|
||||
MLX4_RESET_SIZE);
|
||||
if (!reset) {
|
||||
err = -ENOMEM;
|
||||
mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
|
||||
mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -119,13 +119,13 @@ int mlx4_reset(struct mlx4_dev *dev)
|
||||
writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
|
||||
iounmap(reset);
|
||||
|
||||
/* wait half a second before accessing device */
|
||||
msleep(500);
|
||||
/* Docs say to wait one second before accessing device */
|
||||
msleep(1000);
|
||||
|
||||
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
|
||||
do {
|
||||
if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
|
||||
vendor != 0xffff)
|
||||
if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
|
||||
&vendor) && vendor != 0xffff)
|
||||
break;
|
||||
|
||||
msleep(1);
|
||||
@ -133,27 +133,26 @@ int mlx4_reset(struct mlx4_dev *dev)
|
||||
|
||||
if (vendor == 0xffff) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "PCI device did not come back after reset, "
|
||||
"aborting.\n");
|
||||
mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Now restore the PCI headers */
|
||||
if (pcie_cap) {
|
||||
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
|
||||
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
|
||||
if (pcie_capability_write_word(dev->persist->pdev,
|
||||
PCI_EXP_DEVCTL,
|
||||
devctl)) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA PCI Express "
|
||||
"Device Control register, aborting.\n");
|
||||
mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
|
||||
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
|
||||
if (pcie_capability_write_word(dev->persist->pdev,
|
||||
PCI_EXP_LNKCTL,
|
||||
linkctl)) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA PCI Express "
|
||||
"Link control register, aborting.\n");
|
||||
mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -162,19 +161,19 @@ int mlx4_reset(struct mlx4_dev *dev)
|
||||
if (i * 4 == PCI_COMMAND)
|
||||
continue;
|
||||
|
||||
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
|
||||
if (pci_write_config_dword(dev->persist->pdev, i * 4,
|
||||
hca_header[i])) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA reg %x, "
|
||||
"aborting.\n", i);
|
||||
mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
|
||||
i);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
|
||||
if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
|
||||
hca_header[PCI_COMMAND / 4])) {
|
||||
err = -ENODEV;
|
||||
mlx4_err(dev, "Couldn't restore HCA COMMAND, "
|
||||
"aborting.\n");
|
||||
mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -53,7 +53,7 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
|
||||
}
|
||||
|
||||
if (out_param > 2) {
|
||||
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", (unsigned long long)out_param);
|
||||
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", (long long)out_param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -98,6 +98,10 @@ static void mlx4_sense_port(struct work_struct *work)
|
||||
enum mlx4_port_type stype[MLX4_MAX_PORTS];
|
||||
|
||||
mutex_lock(&priv->port_mutex);
|
||||
if (sense->gone != 0) {
|
||||
mutex_unlock(&priv->port_mutex);
|
||||
return;
|
||||
}
|
||||
mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
|
||||
|
||||
if (mlx4_check_port_params(dev, stype))
|
||||
@ -107,9 +111,9 @@ static void mlx4_sense_port(struct work_struct *work)
|
||||
mlx4_err(dev, "Failed to change port_types\n");
|
||||
|
||||
sense_again:
|
||||
mutex_unlock(&priv->port_mutex);
|
||||
queue_delayed_work(mlx4_wq , &sense->sense_poll,
|
||||
round_jiffies_relative(MLX4_SENSE_RANGE));
|
||||
mutex_unlock(&priv->port_mutex);
|
||||
}
|
||||
|
||||
void mlx4_start_sense(struct mlx4_dev *dev)
|
||||
@ -120,12 +124,22 @@ void mlx4_start_sense(struct mlx4_dev *dev)
|
||||
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->port_mutex);
|
||||
sense->gone = 0;
|
||||
queue_delayed_work(mlx4_wq , &sense->sense_poll,
|
||||
round_jiffies_relative(MLX4_SENSE_RANGE));
|
||||
mutex_unlock(&priv->port_mutex);
|
||||
}
|
||||
|
||||
void mlx4_stop_sense(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_sense *sense = &priv->sense;
|
||||
|
||||
mutex_lock(&priv->port_mutex);
|
||||
sense->gone = 1;
|
||||
mutex_unlock(&priv->port_mutex);
|
||||
|
||||
cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <dev/mlx4/srq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "icm.h"
|
||||
@ -44,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
struct mlx4_srq *srq;
|
||||
|
||||
spin_lock(&srq_table->lock);
|
||||
|
||||
rcu_read_lock();
|
||||
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
|
||||
rcu_read_unlock();
|
||||
if (srq)
|
||||
atomic_inc(&srq->refcount);
|
||||
|
||||
spin_unlock(&srq_table->lock);
|
||||
|
||||
if (!srq) {
|
||||
else {
|
||||
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
|
||||
return;
|
||||
}
|
||||
@ -102,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
|
||||
if (*srqn == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_table_get(dev, &srq_table->table, *srqn);
|
||||
err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
|
||||
err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_put;
|
||||
return 0;
|
||||
@ -187,8 +185,6 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
|
||||
}
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
memset(srq_context, 0, sizeof *srq_context);
|
||||
|
||||
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
|
||||
srq->srqn);
|
||||
srq_context->logstride = srq->wqe_shift - 4;
|
||||
@ -302,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
|
||||
{
|
||||
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
|
||||
struct mlx4_srq *srq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&srq_table->lock, flags);
|
||||
rcu_read_lock();
|
||||
srq = radix_tree_lookup(&srq_table->tree,
|
||||
srqn & (dev->caps.num_srqs - 1));
|
||||
spin_unlock_irqrestore(&srq_table->lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
return srq;
|
||||
}
|
||||
|
@ -1,323 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2014 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
||||
#if defined(CONFIG_X86) && defined(CONFIG_APM_MODULE)
|
||||
|
||||
/* Each CPU is put into a group. In most cases, the group number is
|
||||
* equal to the CPU number of one of the CPUs in the group. The
|
||||
* exception is group NR_CPUS which is the default group. This is
|
||||
* protected by sys_tune_startup_mutex. */
|
||||
DEFINE_PER_CPU(int, idle_cpu_group) = NR_CPUS;
|
||||
|
||||
/* For each group, a count of the number of CPUs in the group which
|
||||
* are known to be busy. A busy CPU might be running the busy loop
|
||||
* below or general kernel code. The count is decremented on entry to
|
||||
* the old pm_idle handler and incremented on exit. The aim is to
|
||||
* avoid the count going to zero or negative. This situation can
|
||||
* occur temporarily during module unload or CPU hot-plug but
|
||||
* normality will be restored when the affected CPUs next exit the
|
||||
* idle loop. */
|
||||
static atomic_t busy_cpu_count[NR_CPUS+1];
|
||||
|
||||
/* A workqueue item to be executed to cause the CPU to exit from the
|
||||
* idle loop. */
|
||||
DEFINE_PER_CPU(struct work_struct, sys_tune_cpu_work);
|
||||
|
||||
#define sys_tune_set_state(CPU,STATE) \
|
||||
do { } while(0)
|
||||
|
||||
|
||||
/* A mutex to protect most of the module datastructures. */
|
||||
static DEFINE_MUTEX(sys_tune_startup_mutex);
|
||||
|
||||
/* The old pm_idle handler. */
|
||||
static void (*old_pm_idle)(void) = NULL;
|
||||
|
||||
static void sys_tune_pm_idle(void)
|
||||
{
|
||||
atomic_t *busy_cpus_ptr;
|
||||
int busy_cpus;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
busy_cpus_ptr = &(busy_cpu_count[per_cpu(idle_cpu_group, cpu)]);
|
||||
|
||||
sys_tune_set_state(cpu, 2);
|
||||
|
||||
local_irq_enable();
|
||||
while (!need_resched()) {
|
||||
busy_cpus = atomic_read(busy_cpus_ptr);
|
||||
|
||||
/* If other CPUs in this group are busy then let this
|
||||
* CPU go idle. We mustn't let the number of busy
|
||||
* CPUs drop below 1. */
|
||||
if ( busy_cpus > 1 &&
|
||||
old_pm_idle != NULL &&
|
||||
( atomic_cmpxchg(busy_cpus_ptr, busy_cpus,
|
||||
busy_cpus-1) == busy_cpus ) ) {
|
||||
local_irq_disable();
|
||||
sys_tune_set_state(cpu, 3);
|
||||
/* This check might not be necessary, but it
|
||||
* seems safest to include it because there
|
||||
* might be a kernel version which requires
|
||||
* it. */
|
||||
if (need_resched())
|
||||
local_irq_enable();
|
||||
else
|
||||
old_pm_idle();
|
||||
/* This CPU is busy again. */
|
||||
sys_tune_set_state(cpu, 1);
|
||||
atomic_add(1, busy_cpus_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
sys_tune_set_state(cpu, 0);
|
||||
}
|
||||
|
||||
|
||||
void sys_tune_work_func(struct work_struct *work)
|
||||
{
|
||||
/* Do nothing. Since this function is running in process
|
||||
* context, the idle thread isn't running on this CPU. */
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sys_tune_smp_call(void *info)
|
||||
{
|
||||
schedule_work(&get_cpu_var(sys_tune_cpu_work));
|
||||
put_cpu_var(sys_tune_cpu_work);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sys_tune_refresh(void)
|
||||
{
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
|
||||
on_each_cpu(&sys_tune_smp_call, NULL, 0, 1);
|
||||
#else
|
||||
on_each_cpu(&sys_tune_smp_call, NULL, 1);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static void sys_tune_refresh(void)
|
||||
{
|
||||
/* The current thread is executing on the one and only CPU so
|
||||
* the idle thread isn't running. */
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
static int sys_tune_cpu_group(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
const cpumask_t *mask;
|
||||
int other_cpu;
|
||||
int group;
|
||||
|
||||
#if defined(topology_thread_cpumask) && defined(ST_HAVE_EXPORTED_CPU_SIBLING_MAP)
|
||||
/* Keep one hyperthread busy per core. */
|
||||
mask = topology_thread_cpumask(cpu);
|
||||
#else
|
||||
return cpu;
|
||||
#endif
|
||||
for_each_cpu_mask(cpu, *(mask)) {
|
||||
group = per_cpu(idle_cpu_group, other_cpu);
|
||||
if (group != NR_CPUS)
|
||||
return group;
|
||||
}
|
||||
#endif
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
|
||||
static void sys_tune_add_cpu(int cpu)
|
||||
{
|
||||
int group;
|
||||
|
||||
/* Do nothing if this CPU has already been added. */
|
||||
if (per_cpu(idle_cpu_group, cpu) != NR_CPUS)
|
||||
return;
|
||||
|
||||
group = sys_tune_cpu_group(cpu);
|
||||
per_cpu(idle_cpu_group, cpu) = group;
|
||||
atomic_inc(&(busy_cpu_count[group]));
|
||||
|
||||
}
|
||||
|
||||
static void sys_tune_del_cpu(int cpu)
|
||||
{
|
||||
|
||||
int group;
|
||||
|
||||
if (per_cpu(idle_cpu_group, cpu) == NR_CPUS)
|
||||
return;
|
||||
|
||||
group = per_cpu(idle_cpu_group, cpu);
|
||||
/* If the CPU was busy, this can cause the count to drop to
|
||||
* zero. To rectify this, we need to cause one of the other
|
||||
* CPUs in the group to exit the idle loop. If the CPU was
|
||||
* not busy then this causes the contribution for this CPU to
|
||||
* go to -1 which can cause the overall count to drop to zero
|
||||
* or go negative. To rectify this situation we need to cause
|
||||
* this CPU to exit the idle loop. */
|
||||
atomic_dec(&(busy_cpu_count[group]));
|
||||
per_cpu(idle_cpu_group, cpu) = NR_CPUS;
|
||||
|
||||
}
|
||||
|
||||
|
||||
static int sys_tune_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int cpu = (long)hcpu;
|
||||
|
||||
switch(action) {
|
||||
#ifdef CPU_ONLINE_FROZEN
|
||||
case CPU_ONLINE_FROZEN:
|
||||
#endif
|
||||
case CPU_ONLINE:
|
||||
mutex_lock(&sys_tune_startup_mutex);
|
||||
sys_tune_add_cpu(cpu);
|
||||
mutex_unlock(&sys_tune_startup_mutex);
|
||||
/* The CPU might have already entered the idle loop in
|
||||
* the wrong group. Make sure it exits the idle loop
|
||||
* so that it picks up the correct group. */
|
||||
sys_tune_refresh();
|
||||
break;
|
||||
|
||||
#ifdef CPU_DEAD_FROZEN
|
||||
case CPU_DEAD_FROZEN:
|
||||
#endif
|
||||
case CPU_DEAD:
|
||||
mutex_lock(&sys_tune_startup_mutex);
|
||||
sys_tune_del_cpu(cpu);
|
||||
mutex_unlock(&sys_tune_startup_mutex);
|
||||
/* The deleted CPU may have been the only busy CPU in
|
||||
* the group. Make sure one of the other CPUs in the
|
||||
* group exits the idle loop. */
|
||||
sys_tune_refresh();
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
||||
static struct notifier_block sys_tune_cpu_nb = {
|
||||
.notifier_call = sys_tune_cpu_notify,
|
||||
};
|
||||
|
||||
|
||||
static void sys_tune_ensure_init(void)
|
||||
{
|
||||
BUG_ON (old_pm_idle != NULL);
|
||||
|
||||
/* Atomically update pm_idle to &sys_tune_pm_idle. The old value
|
||||
* is stored in old_pm_idle before installing the new
|
||||
* handler. */
|
||||
do {
|
||||
old_pm_idle = pm_idle;
|
||||
} while (cmpxchg(&pm_idle, old_pm_idle, &sys_tune_pm_idle) !=
|
||||
old_pm_idle);
|
||||
}
|
||||
#endif
|
||||
|
||||
void sys_tune_fini(void)
|
||||
{
|
||||
#if defined(CONFIG_X86) && defined(CONFIG_APM_MODULE)
|
||||
void (*old)(void);
|
||||
int cpu;
|
||||
|
||||
unregister_cpu_notifier(&sys_tune_cpu_nb);
|
||||
|
||||
mutex_lock(&sys_tune_startup_mutex);
|
||||
|
||||
|
||||
old = cmpxchg(&pm_idle, &sys_tune_pm_idle, old_pm_idle);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
sys_tune_del_cpu(cpu);
|
||||
|
||||
mutex_unlock(&sys_tune_startup_mutex);
|
||||
|
||||
/* Our handler may still be executing on other CPUs.
|
||||
* Schedule this thread on all CPUs to make sure all
|
||||
* idle threads get interrupted. */
|
||||
sys_tune_refresh();
|
||||
|
||||
/* Make sure the work item has finished executing on all CPUs.
|
||||
* This in turn ensures that all idle threads have been
|
||||
* interrupted. */
|
||||
flush_scheduled_work();
|
||||
#endif /* CONFIG_X86 */
|
||||
}
|
||||
|
||||
void sys_tune_init(void)
|
||||
{
|
||||
#if defined(CONFIG_X86) && defined(CONFIG_APM_MODULE)
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
INIT_WORK(&per_cpu(sys_tune_cpu_work, cpu),
|
||||
sys_tune_work_func);
|
||||
}
|
||||
|
||||
/* Start by registering the handler to ensure we don't miss
|
||||
* any updates. */
|
||||
register_cpu_notifier(&sys_tune_cpu_nb);
|
||||
|
||||
mutex_lock(&sys_tune_startup_mutex);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
sys_tune_add_cpu(cpu);
|
||||
|
||||
sys_tune_ensure_init();
|
||||
|
||||
|
||||
mutex_unlock(&sys_tune_startup_mutex);
|
||||
|
||||
/* Ensure our idle handler starts to run. */
|
||||
sys_tune_refresh();
|
||||
#endif
|
||||
}
|
||||
|
@ -75,6 +75,7 @@
|
||||
#define MIN_RX_RINGS 4
|
||||
#define TXBB_SIZE 64
|
||||
#define HEADROOM (2048 / TXBB_SIZE + 1)
|
||||
#define INIT_OWNER_BIT 0xffffffff
|
||||
#define STAMP_STRIDE 64
|
||||
#define STAMP_DWORDS (STAMP_STRIDE / 4)
|
||||
#define STAMP_SHIFT 31
|
||||
@ -131,11 +132,13 @@ enum mlx4_en_alloc_type {
|
||||
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
|
||||
MLX4_EN_NUM_UP)
|
||||
|
||||
#define MLX4_EN_NO_VLAN 0xffff
|
||||
|
||||
#define MLX4_EN_DEF_TX_RING_SIZE 1024
|
||||
#define MLX4_EN_DEF_RX_RING_SIZE 1024
|
||||
|
||||
/* Target number of bytes to coalesce with interrupt moderation */
|
||||
#define MLX4_EN_RX_COAL_TARGET 0x20000
|
||||
#define MLX4_EN_RX_COAL_TARGET 44
|
||||
#define MLX4_EN_RX_COAL_TIME 0x10
|
||||
|
||||
#define MLX4_EN_TX_COAL_PKTS 64
|
||||
@ -192,6 +195,13 @@ enum mlx4_en_alloc_type {
|
||||
#define GET_AVG_PERF_COUNTER(cnt) (0)
|
||||
#endif /* MLX4_EN_PERF_STAT */
|
||||
|
||||
/* Constants for TX flow */
|
||||
enum {
|
||||
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
||||
MAX_BF = 256,
|
||||
MIN_PKT_LEN = 17,
|
||||
};
|
||||
|
||||
/*
|
||||
* Configurables
|
||||
*/
|
||||
@ -264,7 +274,6 @@ struct mlx4_en_tx_ring {
|
||||
int blocked;
|
||||
struct mlx4_en_tx_info *tx_info;
|
||||
u8 queue_index;
|
||||
cpuset_t affinity_mask;
|
||||
struct buf_ring *br;
|
||||
u32 last_nr_txbb;
|
||||
struct mlx4_qp qp;
|
||||
@ -272,14 +281,14 @@ struct mlx4_en_tx_ring {
|
||||
int qpn;
|
||||
enum mlx4_qp_state qp_state;
|
||||
struct mlx4_srq dummy;
|
||||
unsigned long bytes;
|
||||
unsigned long packets;
|
||||
unsigned long tx_csum;
|
||||
unsigned long queue_stopped;
|
||||
unsigned long oversized_packets;
|
||||
unsigned long wake_queue;
|
||||
unsigned long tso_packets;
|
||||
unsigned long defrag_attempts;
|
||||
u64 bytes;
|
||||
u64 packets;
|
||||
u64 tx_csum;
|
||||
u64 queue_stopped;
|
||||
u64 oversized_packets;
|
||||
u64 wake_queue;
|
||||
u64 tso_packets;
|
||||
u64 defrag_attempts;
|
||||
struct mlx4_bf bf;
|
||||
bool bf_enabled;
|
||||
int hwtstamp_tx_type;
|
||||
@ -322,16 +331,16 @@ struct mlx4_en_rx_ring {
|
||||
int qpn;
|
||||
u8 *buf;
|
||||
struct mlx4_en_rx_mbuf *mbuf;
|
||||
unsigned long errors;
|
||||
unsigned long bytes;
|
||||
unsigned long packets;
|
||||
u64 errors;
|
||||
u64 bytes;
|
||||
u64 packets;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
unsigned long yields;
|
||||
unsigned long misses;
|
||||
unsigned long cleaned;
|
||||
u64 yields;
|
||||
u64 misses;
|
||||
u64 cleaned;
|
||||
#endif
|
||||
unsigned long csum_ok;
|
||||
unsigned long csum_none;
|
||||
u64 csum_ok;
|
||||
u64 csum_none;
|
||||
int hwtstamp_rx_filter;
|
||||
int numa_node;
|
||||
struct lro_ctrl lro;
|
||||
@ -385,14 +394,14 @@ struct mlx4_en_cq {
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned int state;
|
||||
#define MLX4_EN_CQ_STATEIDLE 0
|
||||
#define MLX4_EN_CQ_STATENAPI 1 /* NAPI owns this CQ */
|
||||
#define MLX4_EN_CQ_STATEPOLL 2 /* poll owns this CQ */
|
||||
#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATENAPI | MLX4_EN_CQ_STATEPOLL)
|
||||
#define MLX4_EN_CQ_STATENAPI_YIELD 4 /* NAPI yielded this CQ */
|
||||
#define MLX4_EN_CQ_STATEPOLL_YIELD 8 /* poll yielded this CQ */
|
||||
#define CQ_YIELD (MLX4_EN_CQ_STATENAPI_YIELD | MLX4_EN_CQ_STATEPOLL_YIELD)
|
||||
#define CQ_USER_PEND (MLX4_EN_CQ_STATEPOLL | MLX4_EN_CQ_STATEPOLL_YIELD)
|
||||
#define MLX4_EN_CQ_STATE_IDLE 0
|
||||
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
|
||||
#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
|
||||
#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
|
||||
#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
|
||||
#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
|
||||
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
};
|
||||
@ -408,6 +417,7 @@ struct mlx4_en_port_profile {
|
||||
u8 tx_pause;
|
||||
u8 tx_ppp;
|
||||
int rss_rings;
|
||||
int inline_thold;
|
||||
};
|
||||
|
||||
struct mlx4_en_profile {
|
||||
@ -451,24 +461,30 @@ struct mlx4_en_rss_map {
|
||||
enum mlx4_qp_state indir_state;
|
||||
};
|
||||
|
||||
enum mlx4_en_port_flag {
|
||||
MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
|
||||
MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
|
||||
};
|
||||
|
||||
struct mlx4_en_port_state {
|
||||
int link_state;
|
||||
int link_speed;
|
||||
int transciver;
|
||||
int autoneg;
|
||||
int transceiver;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
enum mlx4_en_mclist_act {
|
||||
MCLIST_NONE,
|
||||
MCLIST_REM,
|
||||
MCLIST_ADD,
|
||||
enum mlx4_en_addr_list_act {
|
||||
MLX4_ADDR_LIST_NONE,
|
||||
MLX4_ADDR_LIST_REM,
|
||||
MLX4_ADDR_LIST_ADD,
|
||||
};
|
||||
|
||||
struct mlx4_en_mc_list {
|
||||
struct mlx4_en_addr_list {
|
||||
struct list_head list;
|
||||
enum mlx4_en_mclist_act action;
|
||||
enum mlx4_en_addr_list_act action;
|
||||
u8 addr[ETH_ALEN];
|
||||
u64 reg_id;
|
||||
u64 tunnel_reg_id;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
@ -476,6 +492,7 @@ struct mlx4_en_mc_list {
|
||||
#define MLX4_EN_BW_MIN 1
|
||||
#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
|
||||
|
||||
#define MLX4_EN_TC_VENDOR 0
|
||||
#define MLX4_EN_TC_ETS 7
|
||||
|
||||
#endif
|
||||
@ -541,6 +558,7 @@ struct mlx4_en_priv {
|
||||
bool port_up;
|
||||
int port;
|
||||
int registered;
|
||||
int gone;
|
||||
int allocated;
|
||||
int stride;
|
||||
unsigned char current_mac[ETH_ALEN + 2];
|
||||
@ -570,12 +588,17 @@ struct mlx4_en_priv {
|
||||
struct mlx4_en_perf_stats pstats;
|
||||
struct mlx4_en_pkt_stats pkstats;
|
||||
struct mlx4_en_pkt_stats pkstats_last;
|
||||
struct mlx4_en_flow_stats flowstats[MLX4_NUM_PRIORITIES];
|
||||
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
|
||||
struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES];
|
||||
struct mlx4_en_flow_stats_rx rx_flowstats;
|
||||
struct mlx4_en_flow_stats_tx tx_flowstats;
|
||||
struct mlx4_en_port_stats port_stats;
|
||||
struct mlx4_en_vport_stats vport_stats;
|
||||
struct mlx4_en_vf_stats vf_stats;
|
||||
struct list_head mc_list;
|
||||
struct list_head curr_list;
|
||||
struct list_head uc_list;
|
||||
struct list_head curr_mc_list;
|
||||
struct list_head curr_uc_list;
|
||||
u64 broadcast_id;
|
||||
struct mlx4_en_stat_out_mbox hw_stats;
|
||||
int vids[128];
|
||||
@ -592,8 +615,6 @@ struct mlx4_en_priv {
|
||||
struct sysctl_oid *stat_sysctl;
|
||||
struct sysctl_ctx_list conf_ctx;
|
||||
struct sysctl_ctx_list stat_ctx;
|
||||
#define MLX4_EN_MAC_HASH_IDX 5
|
||||
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
|
||||
|
||||
#ifdef CONFIG_MLX4_EN_DCB
|
||||
struct ieee_ets ets;
|
||||
@ -606,6 +627,7 @@ struct mlx4_en_priv {
|
||||
struct list_head filters;
|
||||
struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
|
||||
#endif
|
||||
u64 tunnel_reg_id;
|
||||
struct en_port *vf_ports[MLX4_MAX_NUM_VF];
|
||||
unsigned long last_ifq_jiffies;
|
||||
u64 if_counters_rx_errors;
|
||||
@ -623,11 +645,16 @@ struct mlx4_mac_entry {
|
||||
u64 reg_id;
|
||||
};
|
||||
|
||||
static inline struct mlx4_cqe *mlx4_en_get_cqe(u8 *buf, int idx, int cqe_sz)
|
||||
{
|
||||
return (struct mlx4_cqe *)(buf + idx * cqe_sz);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
|
||||
{
|
||||
spin_lock_init(&cq->poll_lock);
|
||||
cq->state = MLX4_EN_CQ_STATEIDLE;
|
||||
cq->state = MLX4_EN_CQ_STATE_IDLE;
|
||||
}
|
||||
|
||||
/* called from the device poll rutine to get ownership of a cq */
|
||||
@ -636,12 +663,12 @@ static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
|
||||
int rc = true;
|
||||
spin_lock(&cq->poll_lock);
|
||||
if (cq->state & MLX4_CQ_LOCKED) {
|
||||
WARN_ON(cq->state & MLX4_EN_CQ_STATENAPI);
|
||||
cq->state |= MLX4_EN_CQ_STATENAPI_YIELD;
|
||||
WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
|
||||
cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
|
||||
rc = false;
|
||||
} else
|
||||
/* we don't care if someone yielded */
|
||||
cq->state = MLX4_EN_CQ_STATENAPI;
|
||||
cq->state = MLX4_EN_CQ_STATE_NAPI;
|
||||
spin_unlock(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
@ -651,12 +678,12 @@ static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock(&cq->poll_lock);
|
||||
WARN_ON(cq->state & (MLX4_EN_CQ_STATEPOLL |
|
||||
MLX4_EN_CQ_STATENAPI_YIELD));
|
||||
WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
|
||||
MLX4_EN_CQ_STATE_NAPI_YIELD));
|
||||
|
||||
if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
|
||||
if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
cq->state = MLX4_EN_CQ_STATEIDLE;
|
||||
cq->state = MLX4_EN_CQ_STATE_IDLE;
|
||||
spin_unlock(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
@ -671,14 +698,14 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
|
||||
|
||||
cq->state |= MLX4_EN_CQ_STATEPOLL_YIELD;
|
||||
cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
|
||||
rc = false;
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
rx_ring->yields++;
|
||||
#endif
|
||||
} else
|
||||
/* preserve yield marks */
|
||||
cq->state |= MLX4_EN_CQ_STATEPOLL;
|
||||
cq->state |= MLX4_EN_CQ_STATE_POLL;
|
||||
spin_unlock_bh(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
@ -688,17 +715,17 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock_bh(&cq->poll_lock);
|
||||
WARN_ON(cq->state & (MLX4_EN_CQ_STATENAPI));
|
||||
WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
|
||||
|
||||
if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
|
||||
if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
cq->state = MLX4_EN_CQ_STATEIDLE;
|
||||
cq->state = MLX4_EN_CQ_STATE_IDLE;
|
||||
spin_unlock_bh(&cq->poll_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* true if a socket is polling, even if it did not get the lock */
|
||||
static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
|
||||
static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
|
||||
{
|
||||
WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
|
||||
return cq->state & CQ_USER_PEND;
|
||||
@ -728,7 +755,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
|
||||
static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -770,6 +797,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||
int cq, int user_prio);
|
||||
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring);
|
||||
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
|
||||
void mlx4_en_qflush(struct ifnet *dev);
|
||||
|
||||
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
||||
@ -883,10 +911,10 @@ enum {
|
||||
{ \
|
||||
if ((priv)->registered) \
|
||||
printk(level "%s: %s: " format, DRV_NAME, \
|
||||
(priv->dev)->if_xname, ## arg); \
|
||||
(priv)->dev->if_xname, ## arg); \
|
||||
else \
|
||||
printk(level "%s: %s: Port %d: " format, \
|
||||
DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
|
||||
DRV_NAME, dev_name(&(priv)->mdev->pdev->dev), \
|
||||
(priv)->port, ## arg); \
|
||||
}
|
||||
|
||||
@ -905,12 +933,12 @@ do { \
|
||||
|
||||
#define mlx4_err(mdev, format, arg...) \
|
||||
pr_err("%s %s: " format, DRV_NAME, \
|
||||
dev_name(&mdev->pdev->dev), ##arg)
|
||||
dev_name(&(mdev)->pdev->dev), ##arg)
|
||||
#define mlx4_info(mdev, format, arg...) \
|
||||
pr_info("%s %s: " format, DRV_NAME, \
|
||||
dev_name(&mdev->pdev->dev), ##arg)
|
||||
dev_name(&(mdev)->pdev->dev), ##arg)
|
||||
#define mlx4_warn(mdev, format, arg...) \
|
||||
pr_warning("%s %s: " format, DRV_NAME, \
|
||||
dev_name(&mdev->pdev->dev), ##arg)
|
||||
dev_name(&(mdev)->pdev->dev), ##arg)
|
||||
|
||||
#endif
|
||||
|
@ -53,7 +53,30 @@ enum {
|
||||
MLX4_MCAST_ENABLE = 2,
|
||||
};
|
||||
|
||||
enum mlx4_link_mode {
|
||||
MLX4_1000BASE_CX_SGMII = 0,
|
||||
MLX4_1000BASE_KX = 1,
|
||||
MLX4_10GBASE_CX4 = 2,
|
||||
MLX4_10GBASE_KX4 = 3,
|
||||
MLX4_10GBASE_KR = 4,
|
||||
MLX4_20GBASE_KR2 = 5,
|
||||
MLX4_40GBASE_CR4 = 6,
|
||||
MLX4_40GBASE_KR4 = 7,
|
||||
MLX4_56GBASE_KR4 = 8,
|
||||
MLX4_10GBASE_CR = 12,
|
||||
MLX4_10GBASE_SR = 13,
|
||||
MLX4_40GBASE_SR4 = 15,
|
||||
MLX4_56GBASE_CR4 = 17,
|
||||
MLX4_56GBASE_SR4 = 18,
|
||||
MLX4_100BASE_TX = 24,
|
||||
MLX4_1000BASE_T = 25,
|
||||
MLX4_10GBASE_T = 26,
|
||||
};
|
||||
|
||||
#define MLX4_PROT_MASK(link_mode) (1<<(link_mode))
|
||||
|
||||
enum {
|
||||
MLX4_EN_100M_SPEED = 0x04,
|
||||
MLX4_EN_10G_SPEED_XAUI = 0x00,
|
||||
MLX4_EN_10G_SPEED_XFI = 0x01,
|
||||
MLX4_EN_1G_SPEED = 0x02,
|
||||
@ -66,12 +89,13 @@ enum {
|
||||
struct mlx4_en_query_port_context {
|
||||
u8 link_up;
|
||||
#define MLX4_EN_LINK_UP_MASK 0x80
|
||||
#define MLX4_EN_ANC_MASK 0x40
|
||||
u8 autoneg;
|
||||
#define MLX4_EN_AUTONEG_MASK 0x80
|
||||
__be16 mtu;
|
||||
u8 reserved2;
|
||||
u8 link_speed;
|
||||
#define MLX4_EN_SPEED_MASK 0x6b
|
||||
#define MLX4_EN_SPEED_MASK 0x6f
|
||||
u16 reserved3[5];
|
||||
__be64 mac;
|
||||
u8 transceiver;
|
||||
@ -559,5 +583,4 @@ struct mlx4_en_stat_out_mbox {
|
||||
__be32 TDROP;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -31,13 +31,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include <dev/mlx4/cq.h>
|
||||
#include <dev/mlx4/qp.h>
|
||||
#include <dev/mlx4/cmd.h>
|
||||
|
||||
#include "en.h"
|
||||
|
||||
|
||||
static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
|
||||
{
|
||||
return;
|
||||
@ -53,11 +54,11 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_cq *cq;
|
||||
int err;
|
||||
|
||||
cq = kzalloc_node(sizeof(struct mlx4_en_cq), GFP_KERNEL, node);
|
||||
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
|
||||
if (!cq) {
|
||||
cq = kzalloc(sizeof(struct mlx4_en_cq), GFP_KERNEL);
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
if (!cq) {
|
||||
en_err(priv, "Failed to allocate CW struture\n");
|
||||
en_err(priv, "Failed to allocate CQ structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -80,6 +81,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
||||
|
||||
cq->ring = ring;
|
||||
cq->is_tx = mode;
|
||||
cq->vector = mdev->dev->caps.num_comp_vectors;
|
||||
spin_lock_init(&cq->lock);
|
||||
|
||||
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
|
||||
@ -91,7 +93,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
||||
if (err)
|
||||
goto err_res;
|
||||
|
||||
cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
|
||||
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
|
||||
*pcq = cq;
|
||||
|
||||
return 0;
|
||||
@ -100,17 +102,17 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
||||
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
|
||||
err_cq:
|
||||
kfree(cq);
|
||||
*pcq = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
int cq_idx)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int err = 0;
|
||||
char name[25];
|
||||
int timestamp_en = 0;
|
||||
bool assigned_eq = false;
|
||||
|
||||
cq->dev = mdev->pndev[priv->port];
|
||||
cq->mcq.set_ci_db = cq->wqres.db.db;
|
||||
@ -120,22 +122,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
memset(cq->buf, 0, cq->buf_size);
|
||||
|
||||
if (cq->is_tx == RX) {
|
||||
if (mdev->dev->caps.comp_pool) {
|
||||
if (!cq->vector) {
|
||||
sprintf(name, "%s-%d", if_name(priv->dev),
|
||||
cq->ring);
|
||||
/* Set IRQ for specific name (per ring) */
|
||||
if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
|
||||
cq->vector = (cq->ring + 1 + priv->port)
|
||||
% mdev->dev->caps.num_comp_vectors;
|
||||
mlx4_warn(mdev, "Failed Assigning an EQ to "
|
||||
"%s ,Falling back to legacy EQ's\n",
|
||||
name);
|
||||
}
|
||||
if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
|
||||
cq->vector)) {
|
||||
cq->vector = cq_idx % mdev->dev->caps.num_comp_vectors;
|
||||
|
||||
err = mlx4_assign_eq(mdev->dev, priv->port,
|
||||
&cq->vector);
|
||||
if (err) {
|
||||
mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
|
||||
cq->vector);
|
||||
goto free_eq;
|
||||
}
|
||||
} else {
|
||||
cq->vector = (cq->ring + 1 + priv->port) %
|
||||
mdev->dev->caps.num_comp_vectors;
|
||||
|
||||
assigned_eq = true;
|
||||
}
|
||||
} else {
|
||||
struct mlx4_en_cq *rx_cq;
|
||||
@ -150,11 +149,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
|
||||
if (!cq->is_tx)
|
||||
cq->size = priv->rx_ring[cq->ring]->actual_size;
|
||||
|
||||
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
|
||||
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
|
||||
cq->vector, 0, timestamp_en);
|
||||
if (err)
|
||||
return err;
|
||||
goto free_eq;
|
||||
|
||||
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
|
||||
cq->mcq.event = mlx4_en_cq_event;
|
||||
@ -167,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
free_eq:
|
||||
if (assigned_eq)
|
||||
mlx4_release_eq(mdev->dev, cq->vector);
|
||||
cq->vector = mdev->dev->caps.num_comp_vectors;
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
|
||||
@ -178,24 +184,28 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
|
||||
taskqueue_free(cq->tq);
|
||||
mlx4_en_unmap_buffer(&cq->wqres.buf);
|
||||
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
|
||||
if (priv->mdev->dev->caps.comp_pool && cq->vector)
|
||||
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
|
||||
cq->is_tx == RX)
|
||||
mlx4_release_eq(priv->mdev->dev, cq->vector);
|
||||
cq->vector = 0;
|
||||
cq->buf_size = 0;
|
||||
cq->buf = NULL;
|
||||
kfree(cq);
|
||||
*pcq = NULL;
|
||||
}
|
||||
|
||||
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
taskqueue_drain(cq->tq, &cq->cq_task);
|
||||
if (!cq->is_tx) {
|
||||
synchronize_rcu();
|
||||
} else {
|
||||
del_timer_sync(&cq->timer);
|
||||
}
|
||||
|
||||
taskqueue_drain(cq->tq, &cq->cq_task);
|
||||
if (cq->is_tx)
|
||||
del_timer_sync(&cq->timer);
|
||||
|
||||
mlx4_cq_free(mdev->dev, &cq->mcq);
|
||||
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
|
||||
}
|
||||
|
||||
|
||||
/* Set rx cq moderation parameters */
|
||||
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||
{
|
||||
|
@ -58,7 +58,7 @@
|
||||
|
||||
/* Enable RSS UDP traffic */
|
||||
MLX4_EN_PARM_INT(udp_rss, 1,
|
||||
"Enable RSS for incoming UDP traffic");
|
||||
"Enable RSS for incoming UDP traffic or disabled (0)");
|
||||
|
||||
/* Priority pausing */
|
||||
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
|
||||
@ -66,9 +66,11 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
|
||||
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
|
||||
" Per priority bit mask");
|
||||
|
||||
#define MAX_PFC_TX 0xff
|
||||
#define MAX_PFC_RX 0xff
|
||||
MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
|
||||
"Threshold for using inline data (range: 17-104, default: 104)");
|
||||
|
||||
#define MAX_PFC_TX 0xff
|
||||
#define MAX_PFC_RX 0xff
|
||||
|
||||
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
@ -93,6 +95,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
|
||||
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
|
||||
MLX4_EN_NUM_UP;
|
||||
params->prof[i].rss_rings = 0;
|
||||
params->prof[i].inline_thold = inline_thold;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -142,7 +145,7 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
|
||||
static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = endev_ptr;
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
mdev->device_up = false;
|
||||
@ -154,28 +157,34 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
|
||||
|
||||
flush_workqueue(mdev->workqueue);
|
||||
destroy_workqueue(mdev->workqueue);
|
||||
ret = mlx4_mr_free(dev, &mdev->mr);
|
||||
if (ret)
|
||||
mlx4_err(mdev, "Error deregistering MR. The system may have become unstable.");
|
||||
(void) mlx4_mr_free(dev, &mdev->mr);
|
||||
iounmap(mdev->uar_map);
|
||||
mlx4_uar_free(dev, &mdev->priv_uar);
|
||||
mlx4_pd_free(dev, mdev->priv_pdn);
|
||||
kfree(mdev);
|
||||
}
|
||||
|
||||
static void mlx4_en_activate(struct mlx4_dev *dev, void *ctx)
|
||||
{
|
||||
int i;
|
||||
struct mlx4_en_dev *mdev = ctx;
|
||||
|
||||
/* Create a netdev for each port */
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||
mlx4_info(mdev, "Activating port:%d\n", i);
|
||||
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
|
||||
mdev->pndev[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void *mlx4_en_add(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_en_dev *mdev;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
|
||||
if (!mdev) {
|
||||
dev_err(&dev->pdev->dev, "Device struct alloc failed, "
|
||||
"aborting.\n");
|
||||
err = -ENOMEM;
|
||||
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
|
||||
if (!mdev)
|
||||
goto err_free_res;
|
||||
}
|
||||
|
||||
if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
|
||||
goto err_free_dev;
|
||||
@ -190,8 +199,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
||||
spin_lock_init(&mdev->uar_lock);
|
||||
|
||||
mdev->dev = dev;
|
||||
mdev->dma_device = &(dev->pdev->dev);
|
||||
mdev->pdev = dev->pdev;
|
||||
mdev->dma_device = &dev->persist->pdev->dev;
|
||||
mdev->pdev = dev->persist->pdev;
|
||||
mdev->device_up = false;
|
||||
|
||||
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
|
||||
@ -211,9 +220,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
||||
}
|
||||
|
||||
/* Build device profile according to supplied module parameters */
|
||||
err = mlx4_en_get_profile(mdev);
|
||||
if (err) {
|
||||
mlx4_err(mdev, "Bad module parameters, aborting.\n");
|
||||
if (mlx4_en_get_profile(mdev)) {
|
||||
mlx4_err(mdev, "Bad module parameters, aborting\n");
|
||||
goto err_mr;
|
||||
}
|
||||
|
||||
@ -222,50 +230,25 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
||||
mdev->port_cnt++;
|
||||
|
||||
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||
if (!dev->caps.comp_pool) {
|
||||
mdev->profile.prof[i].rx_ring_num =
|
||||
rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
|
||||
min_t(int,
|
||||
dev->caps.num_comp_vectors,
|
||||
DEF_RX_RINGS)));
|
||||
} else {
|
||||
mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
|
||||
min_t(int, dev->caps.comp_pool /
|
||||
dev->caps.num_ports, MAX_MSIX_P_PORT));
|
||||
}
|
||||
}
|
||||
/* Set default number of RX rings*/
|
||||
mlx4_en_set_num_rx_rings(mdev);
|
||||
|
||||
/* Create our own workqueue for reset/multicast tasks
|
||||
* Note: we cannot use the shared workqueue because of deadlocks caused
|
||||
* by the rtnl lock */
|
||||
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
|
||||
if (!mdev->workqueue) {
|
||||
err = -ENOMEM;
|
||||
if (!mdev->workqueue)
|
||||
goto err_mr;
|
||||
}
|
||||
|
||||
/* At this stage all non-port specific tasks are complete:
|
||||
* mark the card state as up */
|
||||
mutex_init(&mdev->state_lock);
|
||||
mdev->device_up = true;
|
||||
|
||||
/* Setup ports */
|
||||
|
||||
/* Create a netdev for each port */
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||
mlx4_info(mdev, "Activating port:%d\n", i);
|
||||
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
|
||||
mdev->pndev[i] = NULL;
|
||||
}
|
||||
|
||||
return mdev;
|
||||
|
||||
err_mr:
|
||||
err = mlx4_mr_free(dev, &mdev->mr);
|
||||
if (err)
|
||||
mlx4_err(mdev, "Error deregistering MR. The system may have become unstable.");
|
||||
(void) mlx4_mr_free(dev, &mdev->mr);
|
||||
err_map:
|
||||
if (mdev->uar_map)
|
||||
iounmap(mdev->uar_map);
|
||||
@ -285,45 +268,40 @@ static struct mlx4_interface mlx4_en_interface = {
|
||||
.event = mlx4_en_event,
|
||||
.get_dev = mlx4_en_get_netdev,
|
||||
.protocol = MLX4_PROT_ETH,
|
||||
.activate = mlx4_en_activate,
|
||||
};
|
||||
|
||||
static void mlx4_en_verify_params(void)
|
||||
{
|
||||
if (pfctx > MAX_PFC_TX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - "
|
||||
"should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfctx, MAX_PFC_TX);
|
||||
pfctx = 0;
|
||||
}
|
||||
if (pfctx > MAX_PFC_TX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfctx, MAX_PFC_TX);
|
||||
pfctx = 0;
|
||||
}
|
||||
|
||||
if (pfcrx > MAX_PFC_RX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - "
|
||||
"should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfcrx, MAX_PFC_RX);
|
||||
pfcrx = 0;
|
||||
}
|
||||
if (pfcrx > MAX_PFC_RX) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
|
||||
pfcrx, MAX_PFC_RX);
|
||||
pfcrx = 0;
|
||||
}
|
||||
|
||||
if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
|
||||
pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
|
||||
inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
|
||||
inline_thold = MAX_INLINE;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int __init mlx4_en_init(void)
|
||||
{
|
||||
mlx4_en_verify_params();
|
||||
mlx4_en_verify_params();
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int err = 0;
|
||||
err = mlx4_en_register_debugfs();
|
||||
if (err)
|
||||
pr_err(KERN_ERR "Failed to register debugfs\n");
|
||||
#endif
|
||||
return mlx4_register_interface(&mlx4_en_interface);
|
||||
}
|
||||
|
||||
static void __exit mlx4_en_cleanup(void)
|
||||
{
|
||||
mlx4_unregister_interface(&mlx4_en_interface);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
mlx4_en_unregister_debugfs();
|
||||
#endif
|
||||
}
|
||||
|
||||
module_init(mlx4_en_init);
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
#include <net/busy_poll.h>
|
||||
#endif
|
||||
@ -73,7 +74,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
|
||||
|
||||
done = mlx4_en_process_rx_cq(dev, cq, 4);
|
||||
#ifdef LL_EXTENDED_STATS
|
||||
if (done)
|
||||
if (likely(done))
|
||||
rx_ring->cleaned += done;
|
||||
else
|
||||
rx_ring->misses++;
|
||||
@ -118,7 +119,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
|
||||
case IPPROTO_TCP:
|
||||
return MLX4_NET_TRANS_RULE_ID_TCP;
|
||||
default:
|
||||
return -EPROTONOSUPPORT;
|
||||
return MLX4_NET_TRANS_RULE_NUM;
|
||||
}
|
||||
};
|
||||
|
||||
@ -165,7 +166,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
|
||||
int rc;
|
||||
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
||||
|
||||
if (spec_tcp_udp.id < 0) {
|
||||
if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
|
||||
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
|
||||
filter->ip_proto);
|
||||
goto ignore;
|
||||
@ -344,8 +345,7 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring *rx_ring)
|
||||
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct mlx4_en_filter *filter, *tmp;
|
||||
LIST_HEAD(del_list);
|
||||
@ -450,6 +450,25 @@ static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid)
|
||||
|
||||
}
|
||||
|
||||
static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
|
||||
int qpn, u64 *reg_id)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
|
||||
priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
|
||||
return 0; /* do nothing */
|
||||
|
||||
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
|
||||
MLX4_DOMAIN_NIC, reg_id);
|
||||
if (err) {
|
||||
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
|
||||
return err;
|
||||
}
|
||||
en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
|
||||
unsigned char *mac, int *qpn, u64 *reg_id)
|
||||
{
|
||||
@ -533,10 +552,8 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
struct mlx4_mac_entry *entry;
|
||||
int index = 0;
|
||||
int err = 0;
|
||||
u64 reg_id;
|
||||
int *qpn = &priv->base_qpn;
|
||||
u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
|
||||
|
||||
@ -556,39 +573,15 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0);
|
||||
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
|
||||
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to reserve qp for mac registration\n");
|
||||
goto qp_err;
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx4_en_uc_steer_add(priv, IF_LLADDR(priv->dev), qpn, ®_id);
|
||||
if (err)
|
||||
goto steer_err;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto alloc_err;
|
||||
}
|
||||
memcpy(entry->mac, IF_LLADDR(priv->dev), sizeof(entry->mac));
|
||||
entry->reg_id = reg_id;
|
||||
|
||||
hlist_add_head(&entry->hlist,
|
||||
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
|
||||
|
||||
return 0;
|
||||
|
||||
alloc_err:
|
||||
mlx4_en_uc_steer_release(priv, IF_LLADDR(priv->dev), *qpn, reg_id);
|
||||
|
||||
steer_err:
|
||||
mlx4_qp_release_range(dev, *qpn, 1);
|
||||
|
||||
qp_err:
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
||||
@ -596,34 +589,13 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
int qpn = priv->base_qpn;
|
||||
u64 mac;
|
||||
|
||||
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
||||
mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
|
||||
u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev));
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
IF_LLADDR(priv->dev));
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
} else {
|
||||
struct mlx4_mac_entry *entry;
|
||||
struct hlist_node *tmp;
|
||||
struct hlist_head *bucket;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
||||
bucket = &priv->mac_hash[i];
|
||||
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
||||
mac = mlx4_mac_to_u64(entry->mac);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
entry->mac);
|
||||
mlx4_en_uc_steer_release(priv, entry->mac,
|
||||
qpn, entry->reg_id);
|
||||
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
hlist_del(&entry->hlist);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
|
||||
priv->port, qpn);
|
||||
mlx4_qp_release_range(dev, qpn, 1);
|
||||
@ -631,10 +603,48 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx4_en_clear_list(struct net_device *dev)
|
||||
static void mlx4_en_clear_uclist(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_mc_list *tmp, *mc_to_del;
|
||||
struct mlx4_en_addr_list *tmp, *uc_to_del;
|
||||
|
||||
list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) {
|
||||
list_del(&uc_to_del->list);
|
||||
kfree(uc_to_del);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx4_en_cache_uclist(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_addr_list *tmp;
|
||||
struct ifaddr *ifa;
|
||||
|
||||
mlx4_en_clear_uclist(dev);
|
||||
|
||||
if_addr_rlock(dev);
|
||||
TAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) {
|
||||
if (ifa->ifa_addr->sa_family != AF_LINK)
|
||||
continue;
|
||||
if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen !=
|
||||
ETHER_ADDR_LEN)
|
||||
continue;
|
||||
tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
|
||||
if (tmp == NULL) {
|
||||
en_err(priv, "Failed to allocate address list\n");
|
||||
break;
|
||||
}
|
||||
memcpy(tmp->addr,
|
||||
LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN);
|
||||
list_add_tail(&tmp->list, &priv->uc_list);
|
||||
}
|
||||
if_addr_runlock(dev);
|
||||
}
|
||||
|
||||
static void mlx4_en_clear_mclist(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_addr_list *tmp, *mc_to_del;
|
||||
|
||||
list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
|
||||
list_del(&mc_to_del->list);
|
||||
@ -644,35 +654,36 @@ static void mlx4_en_clear_list(struct net_device *dev)
|
||||
|
||||
static void mlx4_en_cache_mclist(struct net_device *dev)
|
||||
{
|
||||
struct ifmultiaddr *ifma;
|
||||
struct mlx4_en_mc_list *tmp;
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_addr_list *tmp;
|
||||
struct ifmultiaddr *ifma;
|
||||
|
||||
if_maddr_rlock(dev);
|
||||
TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
|
||||
if (ifma->ifma_addr->sa_family != AF_LINK)
|
||||
continue;
|
||||
if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen !=
|
||||
ETHER_ADDR_LEN)
|
||||
continue;
|
||||
/* Make sure the list didn't grow. */
|
||||
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
|
||||
mlx4_en_clear_mclist(dev);
|
||||
|
||||
if_maddr_rlock(dev);
|
||||
TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
|
||||
if (ifma->ifma_addr->sa_family != AF_LINK)
|
||||
continue;
|
||||
if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen !=
|
||||
ETHER_ADDR_LEN)
|
||||
continue;
|
||||
tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC);
|
||||
if (tmp == NULL) {
|
||||
en_err(priv, "Failed to allocate multicast list\n");
|
||||
en_err(priv, "Failed to allocate address list\n");
|
||||
break;
|
||||
}
|
||||
memcpy(tmp->addr,
|
||||
LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
|
||||
list_add_tail(&tmp->list, &priv->mc_list);
|
||||
}
|
||||
if_maddr_runlock(dev);
|
||||
}
|
||||
if_maddr_runlock(dev);
|
||||
}
|
||||
|
||||
static void update_mclist_flags(struct mlx4_en_priv *priv,
|
||||
static void update_addr_list_flags(struct mlx4_en_priv *priv,
|
||||
struct list_head *dst,
|
||||
struct list_head *src)
|
||||
{
|
||||
struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
|
||||
struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc;
|
||||
bool found;
|
||||
|
||||
/* Find all the entries that should be removed from dst,
|
||||
@ -687,7 +698,7 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
dst_tmp->action = MCLIST_REM;
|
||||
dst_tmp->action = MLX4_ADDR_LIST_REM;
|
||||
}
|
||||
|
||||
/* Add entries that exist in src but not in dst
|
||||
@ -697,21 +708,21 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
|
||||
found = false;
|
||||
list_for_each_entry(dst_tmp, dst, list) {
|
||||
if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
|
||||
dst_tmp->action = MCLIST_NONE;
|
||||
dst_tmp->action = MLX4_ADDR_LIST_NONE;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
|
||||
new_mc = kmalloc(sizeof(struct mlx4_en_addr_list),
|
||||
GFP_KERNEL);
|
||||
if (!new_mc) {
|
||||
en_err(priv, "Failed to allocate current multicast list\n");
|
||||
return;
|
||||
}
|
||||
memcpy(new_mc, src_tmp,
|
||||
sizeof(struct mlx4_en_mc_list));
|
||||
new_mc->action = MCLIST_ADD;
|
||||
sizeof(struct mlx4_en_addr_list));
|
||||
new_mc->action = MLX4_ADDR_LIST_ADD;
|
||||
list_add_tail(&new_mc->list, dst);
|
||||
}
|
||||
}
|
||||
@ -731,6 +742,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_dev *mdev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
|
||||
priv->flags |= MLX4_EN_FLAG_PROMISC;
|
||||
|
||||
@ -833,7 +845,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
||||
struct net_device *dev,
|
||||
struct mlx4_en_dev *mdev)
|
||||
{
|
||||
struct mlx4_en_mc_list *mclist, *tmp;
|
||||
struct mlx4_en_addr_list *addr_list, *tmp;
|
||||
u8 mc_list[16] = {0};
|
||||
int err = 0;
|
||||
u64 mcast_addr = 0;
|
||||
@ -893,6 +905,28 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
||||
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
||||
}
|
||||
|
||||
/* Update unicast list */
|
||||
mlx4_en_cache_uclist(dev);
|
||||
|
||||
update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list);
|
||||
|
||||
list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
|
||||
if (addr_list->action == MLX4_ADDR_LIST_REM) {
|
||||
mlx4_en_uc_steer_release(priv, addr_list->addr,
|
||||
priv->rss_map.indir_qp.qpn,
|
||||
addr_list->reg_id);
|
||||
/* remove from list */
|
||||
list_del(&addr_list->list);
|
||||
kfree(addr_list);
|
||||
} else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
|
||||
err = mlx4_en_uc_steer_add(priv, addr_list->addr,
|
||||
&priv->rss_map.indir_qp.qpn,
|
||||
&addr_list->reg_id);
|
||||
if (err)
|
||||
en_err(priv, "Fail to add unicast address\n");
|
||||
}
|
||||
}
|
||||
|
||||
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
||||
0, MLX4_MCAST_DISABLE);
|
||||
if (err)
|
||||
@ -905,8 +939,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
||||
/* Update multicast list - we cache all addresses so they won't
|
||||
* change while HW is updated holding the command semaphor */
|
||||
mlx4_en_cache_mclist(dev);
|
||||
list_for_each_entry(mclist, &priv->mc_list, list) {
|
||||
mcast_addr = mlx4_mac_to_u64(mclist->addr);
|
||||
list_for_each_entry(addr_list, &priv->mc_list, list) {
|
||||
mcast_addr = mlx4_mac_to_u64(addr_list->addr);
|
||||
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
|
||||
mcast_addr, 0, MLX4_MCAST_CONFIG);
|
||||
}
|
||||
@ -915,26 +949,33 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
||||
if (err)
|
||||
en_err(priv, "Failed enabling multicast filter\n");
|
||||
|
||||
update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
|
||||
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
|
||||
if (mclist->action == MCLIST_REM) {
|
||||
update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list);
|
||||
|
||||
list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
|
||||
if (addr_list->action == MLX4_ADDR_LIST_REM) {
|
||||
/* detach this address and delete from list */
|
||||
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
|
||||
memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
|
||||
mc_list[5] = priv->port;
|
||||
err = mlx4_multicast_detach(mdev->dev,
|
||||
&priv->rss_map.indir_qp,
|
||||
mc_list,
|
||||
MLX4_PROT_ETH,
|
||||
mclist->reg_id);
|
||||
addr_list->reg_id);
|
||||
if (err)
|
||||
en_err(priv, "Fail to detach multicast address\n");
|
||||
|
||||
if (addr_list->tunnel_reg_id) {
|
||||
err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id);
|
||||
if (err)
|
||||
en_err(priv, "Failed to detach multicast address\n");
|
||||
}
|
||||
|
||||
/* remove from list */
|
||||
list_del(&mclist->list);
|
||||
kfree(mclist);
|
||||
} else if (mclist->action == MCLIST_ADD) {
|
||||
list_del(&addr_list->list);
|
||||
kfree(addr_list);
|
||||
} else if (addr_list->action == MLX4_ADDR_LIST_ADD) {
|
||||
/* attach the address */
|
||||
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
|
||||
memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
|
||||
/* needed for B0 steering support */
|
||||
mc_list[5] = priv->port;
|
||||
err = mlx4_multicast_attach(mdev->dev,
|
||||
@ -942,10 +983,14 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
||||
mc_list,
|
||||
priv->port, 0,
|
||||
MLX4_PROT_ETH,
|
||||
&mclist->reg_id);
|
||||
&addr_list->reg_id);
|
||||
if (err)
|
||||
en_err(priv, "Fail to attach multicast address\n");
|
||||
|
||||
err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
|
||||
&addr_list->tunnel_reg_id);
|
||||
if (err)
|
||||
en_err(priv, "Failed to attach multicast address\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -958,7 +1003,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
if (!mdev->device_up) {
|
||||
en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
|
||||
@ -998,24 +1042,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void mlx4_en_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_cq *cq;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
cq = priv->rx_cq[i];
|
||||
spin_lock_irqsave(&cq->lock, flags);
|
||||
napi_synchronize(&cq->napi);
|
||||
mlx4_en_process_rx_cq(dev, cq, 0);
|
||||
spin_unlock_irqrestore(&cq->lock, flags);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void mlx4_en_watchdog_timeout(void *arg)
|
||||
{
|
||||
struct mlx4_en_priv *priv = arg;
|
||||
@ -1038,10 +1064,10 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
||||
/* If we haven't received a specific coalescing setting
|
||||
* (module param), we set the moderation parameters as follows:
|
||||
* - moder_cnt is set to the number of mtu sized packets to
|
||||
* satisfy our coelsing target.
|
||||
* satisfy our coalescing target.
|
||||
* - moder_time is set to a fixed value.
|
||||
*/
|
||||
priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->if_mtu + 1;
|
||||
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
|
||||
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
|
||||
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
|
||||
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
|
||||
@ -1126,6 +1152,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
||||
priv->last_moder_time[ring] = moder_time;
|
||||
cq = priv->rx_cq[ring];
|
||||
cq->moder_time = moder_time;
|
||||
cq->moder_cnt = priv->rx_frames;
|
||||
err = mlx4_en_set_cq_moder(priv, cq);
|
||||
if (err)
|
||||
en_err(priv, "Failed modifying moderation for cq:%d\n",
|
||||
@ -1149,7 +1176,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
|
||||
mutex_lock(&mdev->state_lock);
|
||||
if (mdev->device_up) {
|
||||
if (priv->port_up) {
|
||||
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
|
||||
if (mlx4_is_slave(mdev->dev))
|
||||
err = mlx4_en_get_vport_stats(mdev, priv->port);
|
||||
else
|
||||
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
|
||||
if (err)
|
||||
en_dbg(HW, priv, "Could not update stats\n");
|
||||
|
||||
@ -1236,7 +1266,9 @@ int mlx4_en_start_port(struct net_device *dev)
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&priv->mc_list);
|
||||
INIT_LIST_HEAD(&priv->curr_list);
|
||||
INIT_LIST_HEAD(&priv->uc_list);
|
||||
INIT_LIST_HEAD(&priv->curr_mc_list);
|
||||
INIT_LIST_HEAD(&priv->curr_uc_list);
|
||||
INIT_LIST_HEAD(&priv->ethtool_list);
|
||||
|
||||
/* Calculate Rx buf size */
|
||||
@ -1281,12 +1313,8 @@ int mlx4_en_start_port(struct net_device *dev)
|
||||
}
|
||||
mdev->mac_removed[priv->port] = 0;
|
||||
|
||||
/* gets default allocated counter index from func cap */
|
||||
/* or sink counter index if no resources */
|
||||
priv->counter_index = mdev->dev->caps.def_counter_index[priv->port - 1];
|
||||
|
||||
en_dbg(DRV, priv, "%s: default counter index %d for port %d\n",
|
||||
__func__, priv->counter_index, priv->port);
|
||||
priv->counter_index =
|
||||
mlx4_get_default_counter_index(mdev->dev, priv->port);
|
||||
|
||||
err = mlx4_en_config_rss_steer(priv);
|
||||
if (err) {
|
||||
@ -1332,7 +1360,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
||||
|
||||
/* Set initial ownership of all Tx TXBBs to SW (1) */
|
||||
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
|
||||
*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
|
||||
*((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT;
|
||||
++tx_index;
|
||||
}
|
||||
|
||||
@ -1415,7 +1443,7 @@ void mlx4_en_stop_port(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_en_mc_list *mclist, *tmp;
|
||||
struct mlx4_en_addr_list *addr_list, *tmp;
|
||||
int i;
|
||||
u8 mc_list[16] = {0};
|
||||
|
||||
@ -1433,10 +1461,7 @@ void mlx4_en_stop_port(struct net_device *dev)
|
||||
|
||||
/* Set port as not active */
|
||||
priv->port_up = false;
|
||||
if (priv->counter_index != 0xff) {
|
||||
mlx4_counter_free(mdev->dev, priv->port, priv->counter_index);
|
||||
priv->counter_index = 0xff;
|
||||
}
|
||||
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
|
||||
|
||||
/* Promsicuous mode */
|
||||
if (mdev->dev->caps.steering_mode ==
|
||||
@ -1464,21 +1489,33 @@ void mlx4_en_stop_port(struct net_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/* Detach All unicasts */
|
||||
list_for_each_entry(addr_list, &priv->curr_uc_list, list) {
|
||||
mlx4_en_uc_steer_release(priv, addr_list->addr,
|
||||
priv->rss_map.indir_qp.qpn,
|
||||
addr_list->reg_id);
|
||||
}
|
||||
mlx4_en_clear_uclist(dev);
|
||||
list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) {
|
||||
list_del(&addr_list->list);
|
||||
kfree(addr_list);
|
||||
}
|
||||
|
||||
/* Detach All multicasts */
|
||||
memset(&mc_list[10], 0xff, ETH_ALEN);
|
||||
mc_list[5] = priv->port; /* needed for B0 steering support */
|
||||
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
|
||||
MLX4_PROT_ETH, priv->broadcast_id);
|
||||
list_for_each_entry(mclist, &priv->curr_list, list) {
|
||||
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
|
||||
list_for_each_entry(addr_list, &priv->curr_mc_list, list) {
|
||||
memcpy(&mc_list[10], addr_list->addr, ETH_ALEN);
|
||||
mc_list[5] = priv->port;
|
||||
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
|
||||
mc_list, MLX4_PROT_ETH, mclist->reg_id);
|
||||
mc_list, MLX4_PROT_ETH, addr_list->reg_id);
|
||||
}
|
||||
mlx4_en_clear_list(dev);
|
||||
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
|
||||
list_del(&mclist->list);
|
||||
kfree(mclist);
|
||||
mlx4_en_clear_mclist(dev);
|
||||
list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) {
|
||||
list_del(&addr_list->list);
|
||||
kfree(addr_list);
|
||||
}
|
||||
|
||||
/* Flush multicast filter */
|
||||
@ -1716,10 +1753,16 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
||||
|
||||
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
|
||||
|
||||
if (priv->vlan_attach != NULL)
|
||||
EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
|
||||
if (priv->vlan_detach != NULL)
|
||||
EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
|
||||
/* don't allow more IOCTLs */
|
||||
priv->gone = 1;
|
||||
|
||||
/* XXX wait a bit to allow IOCTL handlers to complete */
|
||||
pause("W", hz);
|
||||
|
||||
if (priv->vlan_attach != NULL)
|
||||
EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
|
||||
if (priv->vlan_detach != NULL)
|
||||
EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
|
||||
|
||||
/* Unregister device - this will close the port if it was up */
|
||||
if (priv->registered) {
|
||||
@ -1805,9 +1848,12 @@ static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
|
||||
if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
|
||||
return (active);
|
||||
active |= IFM_FDX;
|
||||
trans_type = priv->port_state.transciver;
|
||||
trans_type = priv->port_state.transceiver;
|
||||
/* XXX I don't know all of the transceiver values. */
|
||||
switch (priv->port_state.link_speed) {
|
||||
case 100:
|
||||
active |= IFM_100_T;
|
||||
break;
|
||||
case 1000:
|
||||
active |= IFM_1000_T;
|
||||
break;
|
||||
@ -1904,10 +1950,15 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
|
||||
error = 0;
|
||||
mask = 0;
|
||||
priv = dev->if_softc;
|
||||
|
||||
/* check if detaching */
|
||||
if (priv == NULL || priv->gone != 0)
|
||||
return (ENXIO);
|
||||
|
||||
mdev = priv->mdev;
|
||||
ifr = (struct ifreq *) data;
|
||||
switch (command) {
|
||||
|
||||
switch (command) {
|
||||
case SIOCSIFMTU:
|
||||
error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu);
|
||||
break;
|
||||
@ -2156,9 +2207,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
|
||||
INIT_HLIST_HEAD(&priv->mac_hash[i]);
|
||||
|
||||
/* Query for default mac and max mtu */
|
||||
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
|
||||
priv->mac = mdev->dev->caps.def_mac[priv->port];
|
||||
@ -2602,7 +2650,6 @@ static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
|
||||
struct sysctl_oid *coal;
|
||||
struct sysctl_oid_list *coal_list;
|
||||
const char *pnameunit;
|
||||
|
||||
dev = priv->dev;
|
||||
ctx = &priv->conf_ctx;
|
||||
pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
|
||||
@ -2641,7 +2688,6 @@ static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
|
||||
SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name",
|
||||
CTLFLAG_RD, __DECONST(void *, pnameunit), 0,
|
||||
"PCI device name");
|
||||
|
||||
/* Add coalescer configuration. */
|
||||
coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
|
||||
"coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
|
||||
@ -2700,124 +2746,123 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
|
||||
&priv->pstats.rx_coal_avg, "RX average coalesced completions");
|
||||
#endif
|
||||
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
|
||||
&priv->port_stats.tso_packets, "TSO packets sent");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
|
||||
&priv->port_stats.queue_stopped, "Queue full");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
|
||||
&priv->port_stats.wake_queue, "Queue resumed after full");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
|
||||
&priv->port_stats.tx_timeout, "Transmit timeouts");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
|
||||
&priv->port_stats.oversized_packets, "TX oversized packets, m_defrag failed");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
|
||||
&priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
|
||||
&priv->port_stats.rx_chksum_good, "RX checksum offload success");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
|
||||
&priv->port_stats.rx_chksum_none, "RX without checksum offload");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload",
|
||||
CTLFLAG_RD, &priv->port_stats.tx_chksum_offload,
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD,
|
||||
&priv->port_stats.tso_packets, 0, "TSO packets sent");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD,
|
||||
&priv->port_stats.queue_stopped, 0, "Queue full");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD,
|
||||
&priv->port_stats.wake_queue, 0, "Queue resumed after full");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
|
||||
&priv->port_stats.tx_timeout, 0, "Transmit timeouts");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
|
||||
&priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
|
||||
&priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
|
||||
&priv->port_stats.rx_chksum_good, 0, "RX checksum offload success");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD,
|
||||
&priv->port_stats.rx_chksum_none, 0, "RX without checksum offload");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload",
|
||||
CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0,
|
||||
"TX checksum offloads");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "defrag_attempts", CTLFLAG_RD,
|
||||
&priv->port_stats.defrag_attempts, "Oversized chains defragged");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts",
|
||||
CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0,
|
||||
"Oversized chains defragged");
|
||||
|
||||
/* Could strdup the names and add in a loop. This is simpler. */
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_bytes, "RX Bytes");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_packets, "RX packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_multicast_packets, "RX Multicast Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_broadcast_packets, "RX Broadcast Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_errors, "RX Errors");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_dropped, "RX Dropped");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_length_errors, "RX Length Errors");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_over_errors, "RX Over Errors");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_crc_errors, "RX CRC Errors");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_jabbers, "RX Jabbers");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_bytes, 0, "RX Bytes");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_packets, 0, "RX packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_errors, 0, "RX Errors");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_dropped, 0, "RX Dropped");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_length_errors, 0, "RX Length Errors");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_over_errors, 0, "RX Over Errors");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_crc_errors, 0, "RX CRC Errors");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_jabbers, 0, "RX Jabbers");
|
||||
|
||||
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_in_range_length_error, "RX IN_Range Length Error");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
|
||||
CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error,
|
||||
"RX Out Range Length Error");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_lt_64_bytes_packets, "RX Lt 64 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_127_bytes_packets, "RX 127 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_255_bytes_packets, "RX 255 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_511_bytes_packets, "RX 511 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1023_bytes_packets, "RX 1023 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1518_bytes_packets, "RX 1518 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1522_bytes_packets, "RX 1522 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1548_bytes_packets, "RX 1548 bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_gt_1548_bytes_packets,
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error",
|
||||
CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0,
|
||||
"RX Out Range Length Error");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.rx_gt_1548_bytes_packets, 0,
|
||||
"RX Greater Then 1548 bytes Packets");
|
||||
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_packets, "TX packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_bytes, "TX Bytes");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_multicast_packets, "TX Multicast Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_broadcast_packets, "TX Broadcast Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_errors, "TX Errors");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_dropped, "TX Dropped");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_lt_64_bytes_packets, "TX Less Then 64 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_127_bytes_packets, "TX 127 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_255_bytes_packets, "TX 255 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_511_bytes_packets, "TX 511 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1023_bytes_packets, "TX 1023 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1518_bytes_packets, "TX 1518 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1522_bytes_packets, "TX 1522 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1548_bytes_packets, "TX 1548 Bytes Packets");
|
||||
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_gt_1548_bytes_packets,
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_packets, 0, "TX packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_bytes, 0, "TX Bytes");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_errors, 0, "TX Errors");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_dropped, 0, "TX Dropped");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets");
|
||||
SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD,
|
||||
&priv->pkstats.tx_gt_1548_bytes_packets, 0,
|
||||
"TX Greater Then 1548 Bytes Packets");
|
||||
|
||||
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||
tx_ring = priv->tx_ring[i];
|
||||
snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i);
|
||||
ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
|
||||
CTLFLAG_RD, NULL, "TX Ring");
|
||||
ring_list = SYSCTL_CHILDREN(ring_node);
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets",
|
||||
CTLFLAG_RD, &tx_ring->packets, "TX packets");
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
|
||||
CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "tso_packets",
|
||||
CTLFLAG_RD, &tx_ring->tso_packets, "TSO packets");
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "defrag_attempts",
|
||||
CTLFLAG_RD, &tx_ring->defrag_attempts, "Oversized chains defragged");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
|
||||
CTLFLAG_RD, &tx_ring->packets, 0, "TX packets");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
|
||||
CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets",
|
||||
CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts",
|
||||
CTLFLAG_RD, &tx_ring->defrag_attempts, 0,
|
||||
"Oversized chains defragged");
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
@ -2826,11 +2871,11 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
|
||||
ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf,
|
||||
CTLFLAG_RD, NULL, "RX Ring");
|
||||
ring_list = SYSCTL_CHILDREN(ring_node);
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets",
|
||||
CTLFLAG_RD, &rx_ring->packets, "RX packets");
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
|
||||
CTLFLAG_RD, &rx_ring->bytes, "RX bytes");
|
||||
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "error",
|
||||
CTLFLAG_RD, &rx_ring->errors, "RX soft errors");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets",
|
||||
CTLFLAG_RD, &rx_ring->packets, 0, "RX packets");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes",
|
||||
CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes");
|
||||
SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error",
|
||||
CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors");
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,6 @@
|
||||
|
||||
#include "en_port.h"
|
||||
#include "en.h"
|
||||
#define EN_IFQ_MIN_INTERVAL 3000
|
||||
|
||||
|
||||
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
|
||||
@ -57,7 +56,6 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
filter = mailbox->buf;
|
||||
memset(filter, 0, sizeof(*filter));
|
||||
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
|
||||
entry = 0;
|
||||
for (j = 0; j < 32; j++) {
|
||||
@ -84,7 +82,6 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
|
||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
memset(mailbox->buf, 0, sizeof(*qport_context));
|
||||
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
|
||||
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_WRAPPED);
|
||||
@ -96,6 +93,9 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
|
||||
* already synchronized, no need in locking */
|
||||
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
|
||||
switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
|
||||
case MLX4_EN_100M_SPEED:
|
||||
state->link_speed = 100;
|
||||
break;
|
||||
case MLX4_EN_1G_SPEED:
|
||||
state->link_speed = 1000;
|
||||
break;
|
||||
@ -116,14 +116,39 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
|
||||
state->link_speed = -1;
|
||||
break;
|
||||
}
|
||||
state->transciver = qport_context->transceiver;
|
||||
state->autoneg = !!(qport_context->autoneg & MLX4_EN_AUTONEG_MASK);
|
||||
|
||||
state->transceiver = qport_context->transceiver;
|
||||
|
||||
state->flags = 0; /* Reset and recalculate the port flags */
|
||||
state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
|
||||
MLX4_EN_PORT_ANC : 0;
|
||||
state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
|
||||
MLX4_EN_PORT_ANE : 0;
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Each counter set is located in struct mlx4_en_stat_out_mbox
|
||||
* with a const offset between its prio components.
|
||||
* This function runs over a counter set and sum all of it's prio components.
|
||||
*/
|
||||
static u64 en_stats_adder(__be64 *start, __be64 *next, int num)
|
||||
{
|
||||
__be64 *curr = start;
|
||||
u64 ret = 0;
|
||||
int i;
|
||||
int offset = next - start;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
ret += be64_to_cpu(*curr);
|
||||
curr += offset;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mlx4_en_fold_software_stats(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
@ -159,51 +184,20 @@ static void mlx4_en_fold_software_stats(struct net_device *dev)
|
||||
|
||||
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||
{
|
||||
struct mlx4_counter tmp_vport_stats;
|
||||
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
|
||||
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
|
||||
struct net_device *dev = mdev->pndev[port];
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_vport_stats *vport_stats = &priv->vport_stats;
|
||||
struct mlx4_cmd_mailbox *mailbox = NULL;
|
||||
struct mlx4_cmd_mailbox *mailbox_flow = NULL;
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
u64 in_mod = reset << 8 | port;
|
||||
int err;
|
||||
int i;
|
||||
int do_if_stat = 1;
|
||||
unsigned long period = (unsigned long) (jiffies - priv->last_ifq_jiffies);
|
||||
struct mlx4_en_vport_stats tmp_vport_stats;
|
||||
|
||||
if (jiffies_to_msecs(period) < EN_IFQ_MIN_INTERVAL ||
|
||||
priv->counter_index == 0xff)
|
||||
do_if_stat = 0;
|
||||
int i, counter_index;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
goto mailbox_out;
|
||||
}
|
||||
|
||||
mailbox_flow = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
if (IS_ERR(mailbox_flow)) {
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
err = PTR_ERR(mailbox_flow);
|
||||
goto mailbox_out;
|
||||
}
|
||||
|
||||
/* 0xffs indicates invalid value */
|
||||
memset(mailbox_flow->buf, 0xff, sizeof(*flowstats) *
|
||||
MLX4_NUM_PRIORITIES);
|
||||
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
|
||||
memset(mailbox_flow->buf, 0, sizeof(*flowstats));
|
||||
err = mlx4_cmd_box(mdev->dev, 0, mailbox_flow->dma,
|
||||
in_mod | 1<<12, 0, MLX4_CMD_DUMP_ETH_STATS,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
|
||||
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
|
||||
MLX4_CMD_NATIVE);
|
||||
@ -220,292 +214,83 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
|
||||
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
|
||||
}
|
||||
|
||||
priv->port_stats.tx_chksum_offload = 0;
|
||||
priv->port_stats.queue_stopped = 0;
|
||||
priv->port_stats.wake_queue = 0;
|
||||
priv->port_stats.oversized_packets = 0;
|
||||
priv->port_stats.tso_packets = 0;
|
||||
priv->port_stats.defrag_attempts = 0;
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
|
||||
priv->port_stats.queue_stopped += priv->tx_ring[i]->queue_stopped;
|
||||
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
|
||||
priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
|
||||
priv->port_stats.tso_packets += priv->tx_ring[i]->tso_packets;
|
||||
priv->port_stats.defrag_attempts += priv->tx_ring[i]->defrag_attempts;
|
||||
const struct mlx4_en_tx_ring *ring;
|
||||
ring = priv->tx_ring[i];
|
||||
|
||||
priv->port_stats.tx_chksum_offload += ring->tx_csum;
|
||||
priv->port_stats.queue_stopped += ring->queue_stopped;
|
||||
priv->port_stats.wake_queue += ring->wake_queue;
|
||||
priv->port_stats.oversized_packets += ring->oversized_packets;
|
||||
priv->port_stats.tso_packets += ring->tso_packets;
|
||||
priv->port_stats.defrag_attempts += ring->defrag_attempts;
|
||||
}
|
||||
/* RX Statistics */
|
||||
priv->pkstats.rx_packets = be64_to_cpu(mlx4_en_stats->RTOT_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->RTOT_novlan);
|
||||
priv->pkstats.rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->ROCT_novlan);
|
||||
priv->pkstats.rx_multicast_packets = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->MCAST_novlan);
|
||||
priv->pkstats.rx_broadcast_packets = be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
|
||||
priv->pkstats.rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
|
||||
be32_to_cpu(mlx4_en_stats->RJBBR) +
|
||||
be32_to_cpu(mlx4_en_stats->RCRC) +
|
||||
be32_to_cpu(mlx4_en_stats->RRUNT) +
|
||||
be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
|
||||
be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
|
||||
be32_to_cpu(mlx4_en_stats->RSHORT) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->RGIANT_novlan);
|
||||
priv->pkstats.rx_dropped = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
|
||||
priv->pkstats.rx_errors =
|
||||
be64_to_cpu(mlx4_en_stats->PCS) +
|
||||
be32_to_cpu(mlx4_en_stats->RJBBR) +
|
||||
be32_to_cpu(mlx4_en_stats->RCRC) +
|
||||
be32_to_cpu(mlx4_en_stats->RRUNT) +
|
||||
be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
|
||||
be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
|
||||
be32_to_cpu(mlx4_en_stats->RSHORT) +
|
||||
en_stats_adder(&mlx4_en_stats->RGIANT_prio_0,
|
||||
&mlx4_en_stats->RGIANT_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.tx_errors =
|
||||
en_stats_adder(&mlx4_en_stats->TGIANT_prio_0,
|
||||
&mlx4_en_stats->TGIANT_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.rx_multicast_packets =
|
||||
en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
|
||||
&mlx4_en_stats->MCAST_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
|
||||
priv->pkstats.rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
|
||||
priv->pkstats.rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
priv->pkstats.rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
|
||||
priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
|
||||
priv->pkstats.rx_in_range_length_error = be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
|
||||
priv->pkstats.rx_out_range_length_error = be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
|
||||
priv->pkstats.rx_lt_64_bytes_packets = be64_to_cpu(mlx4_en_stats->R64_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R64_novlan);
|
||||
priv->pkstats.rx_127_bytes_packets = be64_to_cpu(mlx4_en_stats->R127_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R127_novlan);
|
||||
priv->pkstats.rx_255_bytes_packets = be64_to_cpu(mlx4_en_stats->R255_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R255_novlan);
|
||||
priv->pkstats.rx_511_bytes_packets = be64_to_cpu(mlx4_en_stats->R511_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R511_novlan);
|
||||
priv->pkstats.rx_1023_bytes_packets = be64_to_cpu(mlx4_en_stats->R1023_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R1023_novlan);
|
||||
priv->pkstats.rx_1518_bytes_packets = be64_to_cpu(mlx4_en_stats->R1518_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R1518_novlan);
|
||||
priv->pkstats.rx_1522_bytes_packets = be64_to_cpu(mlx4_en_stats->R1522_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R1522_novlan);
|
||||
priv->pkstats.rx_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->R1548_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R1548_novlan);
|
||||
priv->pkstats.rx_gt_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->R2MTU_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->R2MTU_novlan);
|
||||
priv->pkstats.rx_dropped = be32_to_cpu(mlx4_en_stats->RdropOvflw);
|
||||
priv->pkstats.tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
|
||||
|
||||
/* Tx Stats */
|
||||
priv->pkstats.tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->TTOT_novlan);
|
||||
priv->pkstats.tx_bytes = be64_to_cpu(mlx4_en_stats->TOCT_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->TOCT_novlan);
|
||||
priv->pkstats.tx_multicast_packets = be64_to_cpu(mlx4_en_stats->TMCAST_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->TMCAST_novlan);
|
||||
priv->pkstats.tx_broadcast_packets = be64_to_cpu(mlx4_en_stats->TBCAST_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->TBCAST_novlan);
|
||||
priv->pkstats.tx_errors = be64_to_cpu(mlx4_en_stats->TGIANT_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->TGIANT_novlan);
|
||||
priv->pkstats.tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) -
|
||||
priv->pkstats.tx_errors;
|
||||
priv->pkstats.tx_lt_64_bytes_packets = be64_to_cpu(mlx4_en_stats->T64_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T64_novlan);
|
||||
priv->pkstats.tx_127_bytes_packets = be64_to_cpu(mlx4_en_stats->T127_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T127_novlan);
|
||||
priv->pkstats.tx_255_bytes_packets = be64_to_cpu(mlx4_en_stats->T255_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T255_novlan);
|
||||
priv->pkstats.tx_511_bytes_packets = be64_to_cpu(mlx4_en_stats->T511_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T511_novlan);
|
||||
priv->pkstats.tx_1023_bytes_packets = be64_to_cpu(mlx4_en_stats->T1023_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T1023_novlan);
|
||||
priv->pkstats.tx_1518_bytes_packets = be64_to_cpu(mlx4_en_stats->T1518_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T1518_novlan);
|
||||
priv->pkstats.tx_1522_bytes_packets = be64_to_cpu(mlx4_en_stats->T1522_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T1522_novlan);
|
||||
priv->pkstats.tx_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->T1548_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T1548_novlan);
|
||||
priv->pkstats.tx_gt_1548_bytes_packets = be64_to_cpu(mlx4_en_stats->T2MTU_prio_0) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_1) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_2) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_3) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_4) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_5) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_6) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_prio_7) +
|
||||
be64_to_cpu(mlx4_en_stats->T2MTU_novlan);
|
||||
/* RX stats */
|
||||
priv->pkstats.rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
|
||||
&mlx4_en_stats->RTOT_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
|
||||
&mlx4_en_stats->ROCT_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.rx_broadcast_packets =
|
||||
en_stats_adder(&mlx4_en_stats->RBCAST_prio_0,
|
||||
&mlx4_en_stats->RBCAST_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
|
||||
priv->pkstats.rx_in_range_length_error =
|
||||
be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
|
||||
priv->pkstats.rx_out_range_length_error =
|
||||
be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
|
||||
|
||||
/* Tx stats */
|
||||
priv->pkstats.tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
|
||||
&mlx4_en_stats->TTOT_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
|
||||
&mlx4_en_stats->TOCT_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.tx_multicast_packets =
|
||||
en_stats_adder(&mlx4_en_stats->TMCAST_prio_0,
|
||||
&mlx4_en_stats->TMCAST_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
priv->pkstats.tx_broadcast_packets =
|
||||
en_stats_adder(&mlx4_en_stats->TBCAST_prio_0,
|
||||
&mlx4_en_stats->TBCAST_prio_1,
|
||||
NUM_PRIORITIES);
|
||||
|
||||
priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
|
||||
priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
|
||||
@ -544,49 +329,22 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||
priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
|
||||
priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
|
||||
|
||||
flowstats = mailbox_flow->buf;
|
||||
|
||||
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
|
||||
priv->flowstats[i].rx_pause =
|
||||
be64_to_cpu(flowstats[i].rx_pause);
|
||||
priv->flowstats[i].rx_pause_duration =
|
||||
be64_to_cpu(flowstats[i].rx_pause_duration);
|
||||
priv->flowstats[i].rx_pause_transition =
|
||||
be64_to_cpu(flowstats[i].rx_pause_transition);
|
||||
priv->flowstats[i].tx_pause =
|
||||
be64_to_cpu(flowstats[i].tx_pause);
|
||||
priv->flowstats[i].tx_pause_duration =
|
||||
be64_to_cpu(flowstats[i].tx_pause_duration);
|
||||
priv->flowstats[i].tx_pause_transition =
|
||||
be64_to_cpu(flowstats[i].tx_pause_transition);
|
||||
}
|
||||
|
||||
mlx4_en_fold_software_stats(dev);
|
||||
|
||||
spin_unlock(&priv->stats_lock);
|
||||
|
||||
memset(&tmp_vport_stats, 0, sizeof(tmp_vport_stats));
|
||||
counter_index = mlx4_get_default_counter_index(mdev->dev, port);
|
||||
err = mlx4_get_counter_stats(mdev->dev, counter_index,
|
||||
&tmp_vport_stats, reset);
|
||||
|
||||
err = mlx4_get_vport_ethtool_stats(mdev->dev, port,
|
||||
&tmp_vport_stats, reset);
|
||||
spin_lock(&priv->stats_lock);
|
||||
if (!err) {
|
||||
/* ethtool stats format */
|
||||
vport_stats->rx_unicast_packets = tmp_vport_stats.rx_unicast_packets;
|
||||
vport_stats->rx_unicast_bytes = tmp_vport_stats.rx_unicast_bytes;
|
||||
vport_stats->rx_multicast_packets = tmp_vport_stats.rx_multicast_packets;
|
||||
vport_stats->rx_multicast_bytes = tmp_vport_stats.rx_multicast_bytes;
|
||||
vport_stats->rx_broadcast_packets = tmp_vport_stats.rx_broadcast_packets;
|
||||
vport_stats->rx_broadcast_bytes = tmp_vport_stats.rx_broadcast_bytes;
|
||||
vport_stats->rx_dropped = tmp_vport_stats.rx_dropped;
|
||||
vport_stats->rx_errors = tmp_vport_stats.rx_errors;
|
||||
vport_stats->tx_unicast_packets = tmp_vport_stats.tx_unicast_packets;
|
||||
vport_stats->tx_unicast_bytes = tmp_vport_stats.tx_unicast_bytes;
|
||||
vport_stats->tx_multicast_packets = tmp_vport_stats.tx_multicast_packets;
|
||||
vport_stats->tx_multicast_bytes = tmp_vport_stats.tx_multicast_bytes;
|
||||
vport_stats->tx_broadcast_packets = tmp_vport_stats.tx_broadcast_packets;
|
||||
vport_stats->tx_broadcast_bytes = tmp_vport_stats.tx_broadcast_bytes;
|
||||
vport_stats->tx_errors = tmp_vport_stats.tx_errors;
|
||||
vport_stats->rx_bytes = be64_to_cpu(tmp_vport_stats.rx_bytes);
|
||||
vport_stats->rx_frames = be64_to_cpu(tmp_vport_stats.rx_frames);
|
||||
vport_stats->tx_bytes = be64_to_cpu(tmp_vport_stats.tx_bytes);
|
||||
vport_stats->tx_frames = be64_to_cpu(tmp_vport_stats.tx_frames);
|
||||
}
|
||||
|
||||
#if __FreeBSD_version >= 1100000
|
||||
@ -623,13 +381,128 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
||||
|
||||
spin_unlock(&priv->stats_lock);
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox_flow);
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
/* 0xffs indicates invalid value */
|
||||
memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
|
||||
|
||||
mailbox_out:
|
||||
if (do_if_stat)
|
||||
priv->last_ifq_jiffies = jiffies;
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
|
||||
memset(mailbox->buf, 0,
|
||||
sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
|
||||
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
|
||||
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
|
||||
0, MLX4_CMD_DUMP_ETH_STATS,
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
flowstats = mailbox->buf;
|
||||
|
||||
spin_lock(&priv->stats_lock);
|
||||
|
||||
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
|
||||
priv->rx_priority_flowstats[i].rx_pause =
|
||||
be64_to_cpu(flowstats[i].rx_pause);
|
||||
priv->rx_priority_flowstats[i].rx_pause_duration =
|
||||
be64_to_cpu(flowstats[i].rx_pause_duration);
|
||||
priv->rx_priority_flowstats[i].rx_pause_transition =
|
||||
be64_to_cpu(flowstats[i].rx_pause_transition);
|
||||
priv->tx_priority_flowstats[i].tx_pause =
|
||||
be64_to_cpu(flowstats[i].tx_pause);
|
||||
priv->tx_priority_flowstats[i].tx_pause_duration =
|
||||
be64_to_cpu(flowstats[i].tx_pause_duration);
|
||||
priv->tx_priority_flowstats[i].tx_pause_transition =
|
||||
be64_to_cpu(flowstats[i].tx_pause_transition);
|
||||
}
|
||||
|
||||
/* if pfc is not in use, all priorities counters have the same value */
|
||||
priv->rx_flowstats.rx_pause =
|
||||
be64_to_cpu(flowstats[0].rx_pause);
|
||||
priv->rx_flowstats.rx_pause_duration =
|
||||
be64_to_cpu(flowstats[0].rx_pause_duration);
|
||||
priv->rx_flowstats.rx_pause_transition =
|
||||
be64_to_cpu(flowstats[0].rx_pause_transition);
|
||||
priv->tx_flowstats.tx_pause =
|
||||
be64_to_cpu(flowstats[0].tx_pause);
|
||||
priv->tx_flowstats.tx_pause_duration =
|
||||
be64_to_cpu(flowstats[0].tx_pause_duration);
|
||||
priv->tx_flowstats.tx_pause_transition =
|
||||
be64_to_cpu(flowstats[0].tx_pause_transition);
|
||||
|
||||
spin_unlock(&priv->stats_lock);
|
||||
|
||||
out:
|
||||
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_en_get_vport_stats(struct mlx4_en_dev *mdev, u8 port)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
|
||||
struct mlx4_counter tmp_vport_stats;
|
||||
struct mlx4_en_vf_stats *vf_stats = &priv->vf_stats;
|
||||
int err, i, counter_index;
|
||||
|
||||
spin_lock(&priv->stats_lock);
|
||||
|
||||
priv->pkstats.rx_packets = 0;
|
||||
priv->pkstats.rx_bytes = 0;
|
||||
priv->port_stats.rx_chksum_good = 0;
|
||||
priv->port_stats.rx_chksum_none = 0;
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
priv->pkstats.rx_packets += priv->rx_ring[i]->packets;
|
||||
priv->pkstats.rx_bytes += priv->rx_ring[i]->bytes;
|
||||
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
|
||||
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
|
||||
}
|
||||
priv->pkstats.tx_packets = 0;
|
||||
priv->pkstats.tx_bytes = 0;
|
||||
priv->port_stats.tx_chksum_offload = 0;
|
||||
priv->port_stats.queue_stopped = 0;
|
||||
priv->port_stats.wake_queue = 0;
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||
const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
|
||||
|
||||
priv->pkstats.tx_packets += ring->packets;
|
||||
priv->pkstats.tx_bytes += ring->bytes;
|
||||
priv->port_stats.tx_chksum_offload += ring->tx_csum;
|
||||
priv->port_stats.queue_stopped += ring->queue_stopped;
|
||||
priv->port_stats.wake_queue += ring->wake_queue;
|
||||
priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
|
||||
}
|
||||
|
||||
spin_unlock(&priv->stats_lock);
|
||||
|
||||
memset(&tmp_vport_stats, 0, sizeof(tmp_vport_stats));
|
||||
|
||||
counter_index = mlx4_get_default_counter_index(mdev->dev, port);
|
||||
err = mlx4_get_counter_stats(mdev->dev, counter_index,
|
||||
&tmp_vport_stats, 0);
|
||||
|
||||
if (!err) {
|
||||
spin_lock(&priv->stats_lock);
|
||||
|
||||
vf_stats->rx_bytes = be64_to_cpu(tmp_vport_stats.rx_bytes);
|
||||
vf_stats->rx_frames = be64_to_cpu(tmp_vport_stats.rx_frames);
|
||||
vf_stats->tx_bytes = be64_to_cpu(tmp_vport_stats.tx_bytes);
|
||||
vf_stats->tx_frames = be64_to_cpu(tmp_vport_stats.tx_frames);
|
||||
|
||||
priv->pkstats.rx_packets = vf_stats->rx_frames;
|
||||
priv->pkstats.rx_bytes = vf_stats->rx_bytes;
|
||||
priv->pkstats.tx_packets = vf_stats->tx_frames;
|
||||
priv->pkstats.tx_bytes = vf_stats->tx_bytes;
|
||||
|
||||
/* PF&VFs are not expected to report errors in ifconfig.
|
||||
* rx_errors will be reprted in PF's ethtool statistics,
|
||||
* see: mlx4_en_DUMP_ETH_STATS
|
||||
*/
|
||||
priv->pkstats.rx_errors = 0;
|
||||
priv->pkstats.rx_dropped = 0;
|
||||
priv->pkstats.tx_dropped = 0;
|
||||
priv->pkstats.rx_multicast_packets = 0;
|
||||
|
||||
spin_unlock(&priv->stats_lock);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
|
||||
/* disable multicast loopback to qp with same counter */
|
||||
context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
|
||||
context->pri_path.vlan_control |=
|
||||
MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
|
||||
MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
|
||||
}
|
||||
|
||||
context->cqn_send = cpu_to_be32(cqn);
|
||||
|
@ -252,6 +252,26 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
|
||||
}
|
||||
}
|
||||
|
||||
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
|
||||
{
|
||||
int i;
|
||||
int num_of_eqs;
|
||||
int num_rx_rings;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||
num_of_eqs = max_t(int, MIN_RX_RINGS,
|
||||
min_t(int,
|
||||
mlx4_get_eqs_per_port(mdev->dev, i),
|
||||
DEF_RX_RINGS));
|
||||
|
||||
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
|
||||
num_of_eqs;
|
||||
mdev->profile.prof[i].rx_ring_num =
|
||||
rounddown_pow_of_two(num_rx_rings);
|
||||
}
|
||||
}
|
||||
|
||||
void mlx4_en_calc_rx_buf(struct net_device *dev)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
@ -259,7 +279,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
|
||||
MLX4_NET_IP_ALIGN;
|
||||
|
||||
if (eff_mtu > MJUM16BYTES) {
|
||||
en_err(priv, "MTU(%d) is too big\n", dev->if_mtu);
|
||||
en_err(priv, "MTU(%u) is too big\n", (unsigned)dev->if_mtu);
|
||||
eff_mtu = MJUM16BYTES;
|
||||
} else if (eff_mtu > MJUM9BYTES) {
|
||||
eff_mtu = MJUM16BYTES;
|
||||
@ -399,7 +419,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
|
||||
__be32 *ptr = (__be32 *)ring->buf;
|
||||
__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
|
||||
*ptr = stamp;
|
||||
/* Move pointer to start of rx section */
|
||||
/* Move pointer to start of rx section */
|
||||
ring->buf += TXBB_SIZE;
|
||||
}
|
||||
|
||||
@ -607,7 +627,7 @@ mlx4_en_rss_hash(__be16 status, int udp_rss)
|
||||
* was added in the beginning of each cqe (the real data is in the corresponding 32B).
|
||||
* The following calc ensures that when factor==1, it means we are aligned to 64B
|
||||
* and we get the real cqe data*/
|
||||
#define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor)
|
||||
#define CQE_FACTOR_INDEX(index, factor) (((index) << (factor)) + (factor))
|
||||
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
@ -676,7 +696,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
||||
M_HASHTYPE_SET(mb, mlx4_en_rss_hash(cqe->status, udp_rss));
|
||||
mb->m_pkthdr.rcvif = dev;
|
||||
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
||||
MLX4_CQE_VLAN_PRESENT_MASK) {
|
||||
MLX4_CQE_CVLAN_PRESENT_MASK) {
|
||||
mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
|
||||
mb->m_flags |= M_VLANTAG;
|
||||
}
|
||||
@ -802,7 +822,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
|
||||
err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to allocate qp #%x\n", qpn);
|
||||
goto out;
|
||||
@ -842,7 +862,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
|
||||
en_err(priv, "Failed reserving drop qpn\n");
|
||||
return err;
|
||||
}
|
||||
err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
|
||||
err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
|
||||
if (err) {
|
||||
en_err(priv, "Failed allocating drop qp\n");
|
||||
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
|
||||
@ -930,7 +950,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
|
||||
}
|
||||
|
||||
/* Configure RSS indirection qp */
|
||||
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
|
||||
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
|
||||
if (err) {
|
||||
en_err(priv, "Failed to allocate RSS indirection QP\n");
|
||||
goto rss_err;
|
||||
|
@ -52,17 +52,6 @@
|
||||
|
||||
#include "en.h"
|
||||
|
||||
enum {
|
||||
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
||||
MAX_BF = 256,
|
||||
MIN_PKT_LEN = 17,
|
||||
};
|
||||
|
||||
static int inline_thold __read_mostly = MAX_INLINE;
|
||||
|
||||
module_param_named(inline_thold, inline_thold, uint, 0444);
|
||||
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
|
||||
|
||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring **pring, u32 size,
|
||||
u16 stride, int node, int queue_idx)
|
||||
@ -101,7 +90,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
ring->size = size;
|
||||
ring->size_mask = size - 1;
|
||||
ring->stride = stride;
|
||||
ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE));
|
||||
ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE));
|
||||
mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
|
||||
mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
|
||||
|
||||
@ -163,13 +152,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
|
||||
|
||||
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
|
||||
MLX4_RESERVE_BF_QP);
|
||||
MLX4_RESERVE_ETH_BF_QP);
|
||||
if (err) {
|
||||
en_err(priv, "failed reserving qp for TX ring\n");
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
|
||||
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
|
||||
if (err) {
|
||||
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
|
||||
goto err_reserve;
|
||||
@ -185,8 +174,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||
} else
|
||||
ring->bf_enabled = true;
|
||||
ring->queue_index = queue_idx;
|
||||
if (queue_idx < priv->num_tx_rings_p_up )
|
||||
CPU_SET(queue_idx, &ring->affinity_mask);
|
||||
|
||||
*pring = ring;
|
||||
return 0;
|
||||
@ -447,8 +434,8 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
ring->blocked = 0;
|
||||
if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
|
||||
atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE);
|
||||
ring->wake_queue++;
|
||||
priv->port_stats.wake_queue++;
|
||||
ring->wake_queue++;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
@ -752,7 +739,7 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
|
||||
/* check for VLAN tag */
|
||||
if (mb->m_flags & M_VLANTAG) {
|
||||
tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag);
|
||||
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN;
|
||||
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
|
||||
} else {
|
||||
tx_desc->ctrl.vlan_tag = 0;
|
||||
tx_desc->ctrl.ins_vlan = 0;
|
||||
@ -930,7 +917,7 @@ static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp
|
||||
ring->prod += tx_info->nr_txbb;
|
||||
|
||||
if (ring->bf_enabled && bf_size <= MAX_BF &&
|
||||
(tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_VLAN)) {
|
||||
(tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_CVLAN)) {
|
||||
|
||||
/* store doorbell number */
|
||||
*(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
|
||||
|
@ -771,9 +771,6 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,
|
||||
u8 *mac, int *is_mcast, u8 port);
|
||||
|
||||
int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
|
||||
union mlx4_counter *counter, u8 clear);
|
||||
|
||||
static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
|
||||
{
|
||||
u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
|
||||
|
@ -105,12 +105,8 @@ int mlx4_ib_modify_cq(struct ib_cq *cq,
|
||||
if (cq_attr->cq_cap_flags & IB_CQ_TIMESTAMP)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (cq_attr->cq_cap_flags & IB_CQ_IGNORE_OVERRUN) {
|
||||
if (dev->dev->caps.cq_flags & MLX4_DEV_CAP_CQ_FLAG_IO)
|
||||
err = mlx4_cq_ignore_overrun(dev->dev, &mcq->mcq);
|
||||
else
|
||||
err = -ENOSYS;
|
||||
}
|
||||
if (cq_attr->cq_cap_flags & IB_CQ_IGNORE_OVERRUN)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (!err)
|
||||
@ -127,7 +123,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
|
||||
int err;
|
||||
|
||||
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
|
||||
PAGE_SIZE * 2, &buf->buf);
|
||||
PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
@ -138,7 +134,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
|
||||
err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
@ -248,7 +244,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
|
||||
uar = &to_mucontext(context)->uar;
|
||||
} else {
|
||||
err = mlx4_db_alloc(dev->dev, &cq->db, 1);
|
||||
err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_cq;
|
||||
|
||||
@ -509,13 +505,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
|
||||
int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
|
||||
struct mlx4_ib_cq *cq = to_mcq(ibcq);
|
||||
|
||||
if (dev->dev->caps.fw_ver < MLX4_FW_VER_IGNORE_OVERRUN_CQ)
|
||||
return -ENOSYS;
|
||||
|
||||
return mlx4_cq_ignore_overrun(dev->dev, &cq->mcq);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_cq(struct ib_cq *cq)
|
||||
@ -862,6 +852,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
}
|
||||
|
||||
if (timestamp_en) {
|
||||
const struct mlx4_ts_cqe *ts_cqe =
|
||||
(const struct mlx4_ts_cqe *)cqe;
|
||||
/* currently, only CQ_CREATE_WITH_TIMESTAMPING_RAW is
|
||||
* supported. CQ_CREATE_WITH_TIMESTAMPING_SYS isn't
|
||||
* supported */
|
||||
@ -869,9 +861,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
wc->ts.timestamp = 0;
|
||||
} else {
|
||||
wc->ts.timestamp =
|
||||
((u64)(be32_to_cpu(cqe->timestamp_16_47)
|
||||
+ !cqe->timestamp_0_15) << 16)
|
||||
| be16_to_cpu(cqe->timestamp_0_15);
|
||||
((u64)(be32_to_cpu(ts_cqe->timestamp_hi)
|
||||
+ !ts_cqe->timestamp_lo) << 16)
|
||||
| be16_to_cpu(ts_cqe->timestamp_lo);
|
||||
wc->wc_flags |= IB_WC_WITH_TIMESTAMP;
|
||||
}
|
||||
} else {
|
||||
@ -895,7 +887,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
wc->wc_flags |= IB_WC_WITH_SL;
|
||||
}
|
||||
if ((be32_to_cpu(cqe->vlan_my_qpn) &
|
||||
MLX4_CQE_VLAN_PRESENT_MASK) && !timestamp_en) {
|
||||
MLX4_CQE_CVLAN_PRESENT_MASK) && !timestamp_en) {
|
||||
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
|
||||
MLX4_CQE_VID_MASK;
|
||||
wc->wc_flags |= IB_WC_WITH_VLAN;
|
||||
|
@ -788,204 +788,51 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
|
||||
}
|
||||
|
||||
static void edit_counter_ext(struct mlx4_if_stat_extended *cnt, void *counters,
|
||||
__be16 attr_id)
|
||||
static void edit_counter(struct mlx4_counter *cnt, void *counters,
|
||||
__be16 attr_id)
|
||||
{
|
||||
switch (attr_id) {
|
||||
case IB_PMA_PORT_COUNTERS:
|
||||
{
|
||||
struct ib_pma_portcounters *pma_cnt =
|
||||
(struct ib_pma_portcounters *)counters;
|
||||
pma_cnt->port_xmit_data =
|
||||
cpu_to_be32((be64_to_cpu(cnt->counters[0].
|
||||
IfTxUnicastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxMulticastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxBroadcastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxDroppedOctets)) >> 2);
|
||||
pma_cnt->port_rcv_data =
|
||||
cpu_to_be32((be64_to_cpu(cnt->counters[0].
|
||||
IfRxUnicastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxMulticastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxBroadcastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxNoBufferOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxErrorOctets)) >> 2);
|
||||
pma_cnt->port_xmit_packets =
|
||||
cpu_to_be32(be64_to_cpu(cnt->counters[0].
|
||||
IfTxUnicastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxMulticastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxBroadcastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxDroppedFrames));
|
||||
pma_cnt->port_rcv_packets =
|
||||
cpu_to_be32(be64_to_cpu(cnt->counters[0].
|
||||
IfRxUnicastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxMulticastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxBroadcastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxNoBufferFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxErrorFrames));
|
||||
pma_cnt->port_rcv_errors = cpu_to_be32(be64_to_cpu(cnt->
|
||||
counters[0].
|
||||
IfRxErrorFrames));
|
||||
break;
|
||||
}
|
||||
(struct ib_pma_portcounters *)counters;
|
||||
|
||||
case IB_PMA_PORT_COUNTERS_EXT:
|
||||
{
|
||||
struct ib_pma_portcounters_ext *pma_cnt_ext =
|
||||
(struct ib_pma_portcounters_ext *)counters;
|
||||
|
||||
pma_cnt_ext->port_xmit_data =
|
||||
cpu_to_be64((be64_to_cpu(cnt->counters[0].
|
||||
IfTxUnicastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxMulticastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxBroadcastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxDroppedOctets)) >> 2);
|
||||
pma_cnt_ext->port_rcv_data =
|
||||
cpu_to_be64((be64_to_cpu(cnt->counters[0].
|
||||
IfRxUnicastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxMulticastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxBroadcastOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxNoBufferOctets) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxErrorOctets)) >> 2);
|
||||
pma_cnt_ext->port_xmit_packets =
|
||||
cpu_to_be64(be64_to_cpu(cnt->counters[0].
|
||||
IfTxUnicastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxMulticastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxBroadcastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxDroppedFrames));
|
||||
pma_cnt_ext->port_rcv_packets =
|
||||
cpu_to_be64(be64_to_cpu(cnt->counters[0].
|
||||
IfRxUnicastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxMulticastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxBroadcastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxNoBufferFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfRxErrorFrames));
|
||||
pma_cnt_ext->port_unicast_xmit_packets = cnt->counters[0].
|
||||
IfTxUnicastFrames;
|
||||
pma_cnt_ext->port_unicast_rcv_packets = cnt->counters[0].
|
||||
IfRxUnicastFrames;
|
||||
pma_cnt_ext->port_multicast_xmit_packets =
|
||||
cpu_to_be64(be64_to_cpu(cnt->counters[0].
|
||||
IfTxMulticastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxBroadcastFrames));
|
||||
pma_cnt_ext->port_multicast_rcv_packets =
|
||||
cpu_to_be64(be64_to_cpu(cnt->counters[0].
|
||||
IfTxMulticastFrames) +
|
||||
be64_to_cpu(cnt->counters[0].
|
||||
IfTxBroadcastFrames));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
pr_warn("Unsupported attr_id 0x%x\n", attr_id);
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void edit_counter(struct mlx4_if_stat_basic *cnt, void *counters,
|
||||
__be16 attr_id)
|
||||
{
|
||||
switch (attr_id) {
|
||||
case IB_PMA_PORT_COUNTERS:
|
||||
{
|
||||
struct ib_pma_portcounters *pma_cnt =
|
||||
(struct ib_pma_portcounters *) counters;
|
||||
pma_cnt->port_xmit_data =
|
||||
cpu_to_be32(be64_to_cpu(
|
||||
cnt->counters[0].IfTxOctets) >> 2);
|
||||
pma_cnt->port_rcv_data =
|
||||
cpu_to_be32(be64_to_cpu(
|
||||
cnt->counters[0].IfRxOctets) >> 2);
|
||||
pma_cnt->port_xmit_packets =
|
||||
cpu_to_be32(be64_to_cpu(cnt->counters[0].IfTxFrames));
|
||||
pma_cnt->port_rcv_packets =
|
||||
cpu_to_be32(be64_to_cpu(cnt->counters[0].IfRxFrames));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
|
||||
(be64_to_cpu(cnt->tx_bytes) >> 2));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
|
||||
(be64_to_cpu(cnt->rx_bytes) >> 2));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
|
||||
be64_to_cpu(cnt->tx_frames));
|
||||
ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
|
||||
be64_to_cpu(cnt->rx_frames));
|
||||
break;
|
||||
}
|
||||
case IB_PMA_PORT_COUNTERS_EXT:
|
||||
{
|
||||
struct ib_pma_portcounters_ext *pma_cnt_ext =
|
||||
(struct ib_pma_portcounters_ext *) counters;
|
||||
(struct ib_pma_portcounters_ext *)counters;
|
||||
|
||||
pma_cnt_ext->port_xmit_data =
|
||||
cpu_to_be64((be64_to_cpu(cnt->counters[0].
|
||||
IfTxOctets) >> 2));
|
||||
pma_cnt_ext->port_rcv_data =
|
||||
cpu_to_be64((be64_to_cpu(cnt->counters[0].
|
||||
IfRxOctets) >> 2));
|
||||
pma_cnt_ext->port_xmit_packets = cnt->counters[0].IfTxFrames;
|
||||
pma_cnt_ext->port_rcv_packets = cnt->counters[0].IfRxFrames;
|
||||
cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
|
||||
pma_cnt_ext->port_rcv_data =
|
||||
cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
|
||||
pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
|
||||
pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
pr_warn("Unsupported attr_id 0x%x\n", attr_id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int mlx4_ib_query_if_stat(struct mlx4_ib_dev *dev, u32 counter_index,
|
||||
union mlx4_counter *counter, u8 clear)
|
||||
{
|
||||
struct mlx4_cmd_mailbox *mailbox;
|
||||
int err;
|
||||
u32 inmod = counter_index | ((clear & 1) << 31);
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
|
||||
MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
|
||||
MLX4_CMD_NATIVE);
|
||||
if (!err)
|
||||
memcpy(counter, mailbox->buf, MLX4_IF_STAT_SZ(1));
|
||||
|
||||
mlx4_free_cmd_mailbox(dev->dev, mailbox);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
struct ib_wc *in_wc, struct ib_grh *in_grh,
|
||||
struct ib_mad *in_mad, struct ib_mad *out_mad)
|
||||
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||
const struct ib_mad *in_mad, struct ib_mad *out_mad)
|
||||
{
|
||||
struct mlx4_counter counter_stats;
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
||||
int err;
|
||||
u32 counter_index = dev->counters[port_num - 1].counter_index & 0xffff;
|
||||
u8 mode;
|
||||
char counter_buf[MLX4_IF_STAT_SZ(1)] __aligned(8);
|
||||
union mlx4_counter *counter = (union mlx4_counter *)
|
||||
counter_buf;
|
||||
|
||||
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
|
||||
return -EINVAL;
|
||||
@ -996,23 +843,21 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
memset(out_mad->data, 0, sizeof out_mad->data);
|
||||
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
|
||||
} else {
|
||||
if (mlx4_ib_query_if_stat(dev, counter_index, counter, 0))
|
||||
memset(&counter_stats, 0, sizeof(counter_stats));
|
||||
err = mlx4_get_counter_stats(dev->dev,
|
||||
counter_index,
|
||||
&counter_stats, 0);
|
||||
if (err)
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
memset(out_mad->data, 0, sizeof(out_mad->data));
|
||||
mode = counter->control.cnt_mode & 0xFF;
|
||||
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
|
||||
switch (mode & 0xf) {
|
||||
switch (counter_stats.counter_mode & 0xf) {
|
||||
case 0:
|
||||
edit_counter((void *)counter,
|
||||
edit_counter(&counter_stats,
|
||||
(void *)(out_mad->data + 40),
|
||||
in_mad->mad_hdr.attr_id);
|
||||
break;
|
||||
case 1:
|
||||
edit_counter_ext((void *)counter,
|
||||
(void *)(out_mad->data + 40),
|
||||
in_mad->mad_hdr.attr_id);
|
||||
break;
|
||||
default:
|
||||
err = IB_MAD_RESULT_FAILURE;
|
||||
}
|
||||
@ -1106,7 +951,7 @@ static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
|
||||
|
||||
if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
|
||||
mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
|
||||
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK, 0, 0);
|
||||
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
|
||||
}
|
||||
|
||||
static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
|
||||
@ -1118,7 +963,7 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
|
||||
if (!dev->sriov.is_going_down) {
|
||||
mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
|
||||
mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
|
||||
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK, 0, 0);
|
||||
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
|
||||
}
|
||||
}
|
||||
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
|
||||
@ -1204,11 +1049,6 @@ void handle_port_mgmt_change_event(struct work_struct *work)
|
||||
u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
|
||||
u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
|
||||
update_sm_ah(dev, port, lid, sl);
|
||||
mlx4_ib_dispatch_event(dev, port, IB_EVENT_SM_CHANGE);
|
||||
if (mlx4_is_master(dev->dev))
|
||||
mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
|
||||
changed_attr & MSTR_SM_CHANGE_MASK,
|
||||
lid, sl);
|
||||
}
|
||||
|
||||
/* Check if it is a lid change event */
|
||||
@ -1221,7 +1061,7 @@ void handle_port_mgmt_change_event(struct work_struct *work)
|
||||
/*if master, notify all slaves*/
|
||||
if (mlx4_is_master(dev->dev))
|
||||
mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
|
||||
MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK, 0, 0);
|
||||
MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
|
||||
}
|
||||
|
||||
if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
|
||||
@ -1436,7 +1276,7 @@ static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
|
||||
return slave;
|
||||
|
||||
gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
|
||||
vfs = dev->dev->num_vfs;
|
||||
vfs = dev->dev->persist->num_vfs;
|
||||
|
||||
if (slave == 0)
|
||||
return 0;
|
||||
|
@ -77,28 +77,6 @@ int mlx4_ib_sm_guid_assign = 1;
|
||||
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
|
||||
MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
|
||||
|
||||
enum {
|
||||
MAX_NUM_STR_BITMAP = 1 << 15,
|
||||
DEFAULT_TBL_VAL = -1
|
||||
};
|
||||
|
||||
static struct mlx4_dbdf2val_lst dev_assign_str = {
|
||||
.name = "dev_assign_str param",
|
||||
.num_vals = 1,
|
||||
.def_val = {DEFAULT_TBL_VAL},
|
||||
.range = {0, MAX_NUM_STR_BITMAP - 1}
|
||||
};
|
||||
module_param_string(dev_assign_str, dev_assign_str.str,
|
||||
sizeof(dev_assign_str.str), 0444);
|
||||
MODULE_PARM_DESC(dev_assign_str,
|
||||
"Map device function numbers to IB device numbers (e.g. '0000:04:00.0-0,002b:1c:0b.a-1,...').\n"
|
||||
"\t\tHexadecimal digits for the device function (e.g. 002b:1c:0b.a) and decimal for IB device numbers (e.g. 1).\n"
|
||||
"\t\tMax supported devices - 32");
|
||||
|
||||
|
||||
static unsigned long *dev_num_str_bitmap;
|
||||
static spinlock_t dev_num_str_lock;
|
||||
|
||||
static const char mlx4_ib_version[] =
|
||||
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
|
||||
DRV_VERSION "\n";
|
||||
@ -117,8 +95,6 @@ struct dev_rec {
|
||||
int nr;
|
||||
};
|
||||
|
||||
static int dr_active;
|
||||
|
||||
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
|
||||
|
||||
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, struct net_device*,
|
||||
@ -169,6 +145,7 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
|
||||
struct ib_smp *in_mad = NULL;
|
||||
struct ib_smp *out_mad = NULL;
|
||||
int err = -ENOMEM;
|
||||
struct mlx4_clock_params clock_params;
|
||||
|
||||
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
|
||||
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
||||
@ -213,8 +190,6 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
|
||||
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
|
||||
props->device_cap_flags |= IB_DEVICE_XRC;
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_CROSS_CHANNEL)
|
||||
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
|
||||
|
||||
if (check_flow_steering_support(dev->dev))
|
||||
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
|
||||
@ -235,7 +210,7 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
|
||||
}
|
||||
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
|
||||
0xffffff;
|
||||
props->vendor_part_id = dev->dev->pdev->device;
|
||||
props->vendor_part_id = dev->dev->persist->pdev->device;
|
||||
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
|
||||
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
|
||||
|
||||
@ -267,9 +242,12 @@ int mlx4_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_mcast_grp;
|
||||
props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
|
||||
props->hca_core_clock = dev->dev->caps.hca_core_clock;
|
||||
|
||||
if (!mlx4_is_slave(dev->dev))
|
||||
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
|
||||
if (dev->dev->caps.hca_core_clock > 0)
|
||||
props->comp_mask |= IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK;
|
||||
if (dev->dev->caps.cq_timestamp) {
|
||||
if (!err && !mlx4_is_slave(dev->dev)) {
|
||||
props->timestamp_mask = 0xFFFFFFFFFFFF;
|
||||
props->comp_mask |= IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK;
|
||||
}
|
||||
@ -831,8 +809,9 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
(pci_resource_start(dev->dev->pdev,
|
||||
params.bar) + params.offset)
|
||||
(pci_resource_start(dev->dev->persist->pdev,
|
||||
params.bar) +
|
||||
params.offset)
|
||||
>> PAGE_SHIFT,
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
@ -1088,12 +1067,12 @@ static int parse_flow_attr(struct mlx4_dev *dev,
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (map_sw_to_hw_steering_id(dev, type) < 0 ||
|
||||
hw_rule_sz(dev, type) < 0)
|
||||
if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
|
||||
mlx4_hw_rule_sz(dev, type) < 0)
|
||||
return -EINVAL;
|
||||
mlx4_spec->id = cpu_to_be16(map_sw_to_hw_steering_id(dev, type));
|
||||
mlx4_spec->size = hw_rule_sz(dev, type) >> 2;
|
||||
return hw_rule_sz(dev, type);
|
||||
mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
|
||||
mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
|
||||
return mlx4_hw_rule_sz(dev, type);
|
||||
}
|
||||
|
||||
static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
|
||||
@ -1125,7 +1104,8 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
|
||||
pr_err("Invalid domain value.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
|
||||
|
||||
if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
|
||||
@ -1136,7 +1116,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
|
||||
|
||||
ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
|
||||
flow_attr->priority);
|
||||
ctrl->type = map_sw_to_hw_steering_mode(mdev->dev, flow_type);
|
||||
ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
|
||||
ctrl->port = flow_attr->port;
|
||||
ctrl->qpn = cpu_to_be32(qp->qp_num);
|
||||
|
||||
@ -1511,7 +1491,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
|
||||
{
|
||||
struct mlx4_ib_dev *dev =
|
||||
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
|
||||
return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
|
||||
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
|
||||
}
|
||||
|
||||
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
|
||||
@ -1541,33 +1521,16 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
|
||||
dev->dev->board_id);
|
||||
}
|
||||
|
||||
static ssize_t show_vsd(struct device *device, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct mlx4_ib_dev *dev =
|
||||
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
|
||||
ssize_t len = MLX4_VSD_LEN;
|
||||
|
||||
if (dev->dev->vsd_vendor_id == PCI_VENDOR_ID_MELLANOX)
|
||||
len = sprintf(buf, "%.*s\n", MLX4_VSD_LEN, dev->dev->vsd);
|
||||
else
|
||||
memcpy(buf, dev->dev->vsd, MLX4_VSD_LEN);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
|
||||
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
|
||||
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
|
||||
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
|
||||
static DEVICE_ATTR(vsd, S_IRUGO, show_vsd, NULL);
|
||||
|
||||
static struct device_attribute *mlx4_class_attributes[] = {
|
||||
&dev_attr_hw_rev,
|
||||
&dev_attr_fw_ver,
|
||||
&dev_attr_hca_type,
|
||||
&dev_attr_board_id,
|
||||
&dev_attr_vsd
|
||||
&dev_attr_board_id
|
||||
};
|
||||
|
||||
static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev, u8 port)
|
||||
@ -1951,7 +1914,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
|
||||
int i;
|
||||
|
||||
if (mlx4_is_master(ibdev->dev)) {
|
||||
for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
|
||||
for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
|
||||
++slave) {
|
||||
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
|
||||
for (i = 0;
|
||||
i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
|
||||
@ -1978,82 +1942,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
|
||||
|
||||
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
|
||||
{
|
||||
char name[32];
|
||||
int eq_per_port = 0;
|
||||
int added_eqs = 0;
|
||||
int total_eqs = 0;
|
||||
int i, j, eq;
|
||||
int i, j, eq = 0, total_eqs = 0;
|
||||
|
||||
/* Legacy mode or comp_pool is not large enough */
|
||||
if (dev->caps.comp_pool == 0 ||
|
||||
dev->caps.num_ports > dev->caps.comp_pool)
|
||||
return;
|
||||
|
||||
eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
|
||||
dev->caps.num_ports);
|
||||
|
||||
/* Init eq table */
|
||||
added_eqs = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
|
||||
added_eqs += eq_per_port;
|
||||
|
||||
total_eqs = dev->caps.num_comp_vectors + added_eqs;
|
||||
|
||||
ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
|
||||
ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
|
||||
sizeof(ibdev->eq_table[0]), GFP_KERNEL);
|
||||
if (!ibdev->eq_table)
|
||||
return;
|
||||
|
||||
ibdev->eq_added = added_eqs;
|
||||
|
||||
eq = 0;
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
|
||||
for (j = 0; j < eq_per_port; j++) {
|
||||
sprintf(name, "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
|
||||
pci_get_domain(dev->pdev->dev.bsddev),
|
||||
pci_get_bus(dev->pdev->dev.bsddev),
|
||||
PCI_SLOT(dev->pdev->devfn),
|
||||
PCI_FUNC(dev->pdev->devfn));
|
||||
|
||||
/* Set IRQ for specific name (per ring) */
|
||||
if (mlx4_assign_eq(dev, name,
|
||||
&ibdev->eq_table[eq])) {
|
||||
/* Use legacy (same as mlx4_en driver) */
|
||||
pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
|
||||
ibdev->eq_table[eq] =
|
||||
(eq % dev->caps.num_comp_vectors);
|
||||
}
|
||||
eq++;
|
||||
for (i = 1; i <= dev->caps.num_ports; i++) {
|
||||
for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
|
||||
j++, total_eqs++) {
|
||||
if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
|
||||
continue;
|
||||
ibdev->eq_table[eq] = total_eqs;
|
||||
if (!mlx4_assign_eq(dev, i,
|
||||
&ibdev->eq_table[eq]))
|
||||
eq++;
|
||||
else
|
||||
ibdev->eq_table[eq] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Fill the reset of the vector with legacy EQ */
|
||||
for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
|
||||
ibdev->eq_table[eq++] = i;
|
||||
for (i = eq; i < dev->caps.num_comp_vectors;
|
||||
ibdev->eq_table[i++] = -1)
|
||||
;
|
||||
|
||||
/* Advertise the new number of EQs to clients */
|
||||
ibdev->ib_dev.num_comp_vectors = total_eqs;
|
||||
ibdev->ib_dev.num_comp_vectors = eq;
|
||||
}
|
||||
|
||||
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
|
||||
{
|
||||
int i;
|
||||
int total_eqs = ibdev->ib_dev.num_comp_vectors;
|
||||
|
||||
/* no additional eqs were added */
|
||||
/* no eqs were allocated */
|
||||
if (!ibdev->eq_table)
|
||||
return;
|
||||
|
||||
/* Reset the advertised EQ number */
|
||||
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
|
||||
ibdev->ib_dev.num_comp_vectors = 0;
|
||||
|
||||
/* Free only the added eqs */
|
||||
for (i = 0; i < ibdev->eq_added; i++) {
|
||||
/* Don't free legacy eqs if used */
|
||||
if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
|
||||
continue;
|
||||
for (i = 0; i < total_eqs; i++)
|
||||
mlx4_release_eq(dev, ibdev->eq_table[i]);
|
||||
}
|
||||
|
||||
kfree(ibdev->eq_table);
|
||||
ibdev->eq_table = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2079,8 +2013,8 @@ static size_t show_diag_rprt(struct device *device, char *buf,
|
||||
struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
|
||||
ib_dev.dev);
|
||||
|
||||
ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
|
||||
&counter_offset, &diag_counter);
|
||||
ret = mlx4_query_diag_counters(dev->dev, op_modifier,
|
||||
&counter_offset, &diag_counter, 1, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2095,8 +2029,8 @@ static ssize_t clear_diag_counters(struct device *device,
|
||||
struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
|
||||
ib_dev.dev);
|
||||
|
||||
ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
|
||||
NULL, NULL);
|
||||
ret = mlx4_query_diag_counters(dev->dev, MLX4_DIAG_RPRT_CLEAR_DIAGS,
|
||||
NULL, NULL, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2172,63 +2106,6 @@ static struct attribute_group diag_counters_group = {
|
||||
.attrs = diag_rprt_attrs
|
||||
};
|
||||
|
||||
static void init_dev_assign(void)
|
||||
{
|
||||
int i = 1;
|
||||
|
||||
spin_lock_init(&dev_num_str_lock);
|
||||
if (mlx4_fill_dbdf2val_tbl(&dev_assign_str))
|
||||
return;
|
||||
dev_num_str_bitmap =
|
||||
kmalloc(BITS_TO_LONGS(MAX_NUM_STR_BITMAP) * sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!dev_num_str_bitmap) {
|
||||
pr_warn("bitmap alloc failed -- cannot apply dev_assign_str parameter\n");
|
||||
return;
|
||||
}
|
||||
bitmap_zero(dev_num_str_bitmap, MAX_NUM_STR_BITMAP);
|
||||
while ((i < MLX4_DEVS_TBL_SIZE) && (dev_assign_str.tbl[i].dbdf !=
|
||||
MLX4_ENDOF_TBL)) {
|
||||
if (bitmap_allocate_region(dev_num_str_bitmap,
|
||||
dev_assign_str.tbl[i].val[0], 0))
|
||||
goto err;
|
||||
i++;
|
||||
}
|
||||
dr_active = 1;
|
||||
return;
|
||||
|
||||
err:
|
||||
kfree(dev_num_str_bitmap);
|
||||
dev_num_str_bitmap = NULL;
|
||||
pr_warn("mlx4_ib: The value of 'dev_assign_str' parameter "
|
||||
"is incorrect. The parameter value is discarded!");
|
||||
}
|
||||
|
||||
static int mlx4_ib_dev_idx(struct mlx4_dev *dev)
|
||||
{
|
||||
int i, val;
|
||||
|
||||
if (!dr_active)
|
||||
return -1;
|
||||
if (!dev)
|
||||
return -1;
|
||||
if (mlx4_get_val(dev_assign_str.tbl, dev->pdev, 0, &val))
|
||||
return -1;
|
||||
|
||||
if (val != DEFAULT_TBL_VAL) {
|
||||
dev->flags |= MLX4_FLAG_DEV_NUM_STR;
|
||||
return val;
|
||||
}
|
||||
|
||||
spin_lock(&dev_num_str_lock);
|
||||
i = bitmap_find_free_region(dev_num_str_bitmap, MAX_NUM_STR_BITMAP, 0);
|
||||
spin_unlock(&dev_num_str_lock);
|
||||
if (i >= 0)
|
||||
return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
@ -2242,7 +2119,7 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
} else {
|
||||
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCEV2)
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
|
||||
RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
|
||||
immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
|
||||
@ -2268,7 +2145,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
int i, j;
|
||||
int err;
|
||||
struct mlx4_ib_iboe *iboe;
|
||||
int dev_idx;
|
||||
|
||||
pr_info_once("%s", mlx4_ib_version);
|
||||
|
||||
@ -2281,7 +2157,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
|
||||
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
|
||||
if (!ibdev) {
|
||||
dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
|
||||
dev_err(&dev->persist->pdev->dev,
|
||||
"Device struct alloc failed\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2303,11 +2180,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
|
||||
ibdev->dev = dev;
|
||||
|
||||
dev_idx = mlx4_ib_dev_idx(dev);
|
||||
if (dev_idx >= 0)
|
||||
sprintf(ibdev->ib_dev.name, "mlx4_%d", dev_idx);
|
||||
else
|
||||
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
|
||||
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
|
||||
|
||||
ibdev->ib_dev.owner = THIS_MODULE;
|
||||
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
|
||||
@ -2315,7 +2188,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
ibdev->num_ports = num_ports;
|
||||
ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
|
||||
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
|
||||
ibdev->ib_dev.dma_device = &dev->pdev->dev;
|
||||
ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
|
||||
|
||||
if (dev->caps.userspace_caps)
|
||||
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
|
||||
@ -2457,18 +2330,20 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
IB_LINK_LAYER_ETHERNET) {
|
||||
if (mlx4_is_slave(dev)) {
|
||||
ibdev->counters[i].status = mlx4_counter_alloc(ibdev->dev,
|
||||
i + 1,
|
||||
&ibdev->counters[i].counter_index);
|
||||
if (ibdev->counters[i].status)
|
||||
ibdev->counters[i].counter_index = mlx4_get_default_counter_index(dev,
|
||||
i + 1);
|
||||
} else {/* allocating the PF IB default counter indices reserved in mlx4_init_counters_table */
|
||||
ibdev->counters[i].counter_index = ((i + 1) << 1) - 1;
|
||||
ibdev->counters[i].status = 0;
|
||||
}
|
||||
|
||||
dev_info(&dev->pdev->dev,
|
||||
dev_info(&dev->persist->pdev->dev,
|
||||
"%s: allocated counter index %d for port %d\n",
|
||||
__func__, ibdev->counters[i].counter_index, i+1);
|
||||
} else {
|
||||
ibdev->counters[i].counter_index = MLX4_SINK_COUNTER_INDEX;
|
||||
ibdev->counters[i].counter_index = MLX4_SINK_COUNTER_INDEX(dev);
|
||||
ibdev->counters[i].status = -ENOSPC;
|
||||
}
|
||||
}
|
||||
@ -2489,7 +2364,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!ibdev->ib_uc_qpns_bitmap) {
|
||||
dev_err(&dev->pdev->dev, "bit map alloc failed\n");
|
||||
dev_err(&dev->persist->pdev->dev,
|
||||
"bit map alloc failed\n");
|
||||
goto err_steer_qp_release;
|
||||
}
|
||||
|
||||
@ -2591,7 +2467,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i) ==
|
||||
IB_LINK_LAYER_ETHERNET) {
|
||||
mlx4_counter_free(ibdev->dev,
|
||||
i,
|
||||
ibdev->counters[i - 1].counter_index);
|
||||
}
|
||||
}
|
||||
@ -2678,7 +2553,6 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
||||
{
|
||||
struct mlx4_ib_dev *ibdev = ibdev_ptr;
|
||||
int p, j;
|
||||
int dev_idx, ret;
|
||||
|
||||
if (ibdev->iboe.nb_inet.notifier_call) {
|
||||
if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
|
||||
@ -2695,19 +2569,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
||||
mlx4_class_attributes[j]);
|
||||
}
|
||||
|
||||
|
||||
dev_idx = -1;
|
||||
if (dr_active && !(ibdev->dev->flags & MLX4_FLAG_DEV_NUM_STR)) {
|
||||
ret = sscanf(ibdev->ib_dev.name, "mlx4_%d", &dev_idx);
|
||||
if (ret != 1)
|
||||
dev_idx = -1;
|
||||
}
|
||||
ib_unregister_device(&ibdev->ib_dev);
|
||||
if (dev_idx >= 0) {
|
||||
spin_lock(&dev_num_str_lock);
|
||||
bitmap_release_region(dev_num_str_bitmap, dev_idx, 0);
|
||||
spin_unlock(&dev_num_str_lock);
|
||||
}
|
||||
|
||||
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
||||
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
|
||||
@ -2726,7 +2588,6 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
||||
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, p + 1) ==
|
||||
IB_LINK_LAYER_ETHERNET) {
|
||||
mlx4_counter_free(ibdev->dev,
|
||||
p + 1,
|
||||
ibdev->counters[p].counter_index);
|
||||
}
|
||||
}
|
||||
@ -2883,8 +2744,6 @@ static int __init mlx4_ib_init(void)
|
||||
if (err)
|
||||
goto clean_proc;
|
||||
|
||||
init_dev_assign();
|
||||
|
||||
err = mlx4_register_interface(&mlx4_ib_interface);
|
||||
if (err)
|
||||
goto clean_mcg;
|
||||
@ -2904,8 +2763,6 @@ static void __exit mlx4_ib_cleanup(void)
|
||||
mlx4_unregister_interface(&mlx4_ib_interface);
|
||||
mlx4_ib_mcg_destroy();
|
||||
destroy_workqueue(wq);
|
||||
|
||||
kfree(dev_num_str_bitmap);
|
||||
}
|
||||
|
||||
module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
|
||||
|
@ -315,7 +315,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem,
|
||||
u64 start_va,
|
||||
int *num_of_mtts)
|
||||
{
|
||||
u64 block_shift = MLX4_MAX_MTT_SHIFT;
|
||||
u64 block_shift = 31;
|
||||
u64 current_block_len = 0;
|
||||
u64 current_block_start = 0;
|
||||
u64 misalignment_bits;
|
||||
@ -763,7 +763,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
|
||||
if (!mfrpl->ibfrpl.page_list)
|
||||
goto err_free;
|
||||
|
||||
mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
|
||||
mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->pdev->dev,
|
||||
size, &mfrpl->map,
|
||||
GFP_KERNEL);
|
||||
if (!mfrpl->mapped_page_list)
|
||||
@ -785,7 +785,7 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
|
||||
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
|
||||
int size = page_list->max_page_list_len * sizeof (u64);
|
||||
|
||||
dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
|
||||
dma_free_coherent(&dev->dev->persist->pdev->dev, size, mfrpl->mapped_page_list,
|
||||
mfrpl->map);
|
||||
kfree(mfrpl->ibfrpl.page_list);
|
||||
kfree(mfrpl);
|
||||
|
@ -641,7 +641,7 @@ static int init_qpg_parent(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *pqp,
|
||||
err = mlx4_ib_steer_qp_alloc(dev, tss_align_num, &tss_base);
|
||||
else
|
||||
err = mlx4_qp_reserve_range(dev->dev, tss_align_num,
|
||||
tss_align_num, &tss_base, MLX4_RESERVE_BF_QP);
|
||||
tss_align_num, &tss_base, MLX4_RESERVE_ETH_BF_QP);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
@ -801,7 +801,7 @@ static int alloc_qpn_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
|
||||
* VLAN insertion. */
|
||||
if (attr->qp_type == IB_QPT_RAW_PACKET) {
|
||||
err = mlx4_qp_reserve_range(dev->dev, 1, 1, qpn,
|
||||
MLX4_RESERVE_BF_QP);
|
||||
MLX4_RESERVE_ETH_BF_QP);
|
||||
} else {
|
||||
if(qp->flags & MLX4_IB_QP_NETIF)
|
||||
err = mlx4_ib_steer_qp_alloc(dev, 1, qpn);
|
||||
@ -1016,7 +1016,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
goto err;
|
||||
|
||||
if (mlx4_ib_qp_has_rq(init_attr)) {
|
||||
err = mlx4_db_alloc(dev->dev, &qp->db, 0);
|
||||
err = mlx4_db_alloc(dev->dev, &qp->db, 0, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
@ -1033,7 +1033,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
} else
|
||||
qp->bf.uar = &dev->priv_uar;
|
||||
|
||||
if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
|
||||
if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, GFP_KERNEL)) {
|
||||
err = -ENOMEM;
|
||||
goto err_db;
|
||||
}
|
||||
@ -1043,7 +1043,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
|
||||
err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
@ -1070,7 +1070,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
goto err_proxy;
|
||||
}
|
||||
|
||||
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
|
||||
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_qpn;
|
||||
|
||||
@ -1408,12 +1408,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if ((mlx4_qp_flags &
|
||||
if (mlx4_qp_flags &
|
||||
(MLX4_IB_QP_CAP_CROSS_CHANNEL |
|
||||
MLX4_IB_QP_CAP_MANAGED_SEND |
|
||||
MLX4_IB_QP_CAP_MANAGED_RECV)) &&
|
||||
!(to_mdev(device)->dev->caps.flags &
|
||||
MLX4_DEV_CAP_FLAG_CROSS_CHANNEL)) {
|
||||
MLX4_IB_QP_CAP_MANAGED_RECV)) {
|
||||
pr_debug("%s Does not support cross-channel operations\n",
|
||||
to_mdev(device)->ib_dev.name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -1956,27 +1954,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
|
||||
}
|
||||
|
||||
if (attr_mask & IB_M_EXT_CLASS_1)
|
||||
context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER);
|
||||
|
||||
/* for now we enable also sqe on send */
|
||||
if (attr_mask & IB_M_EXT_CLASS_2) {
|
||||
context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_SQ);
|
||||
context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_MASTER);
|
||||
}
|
||||
|
||||
if (attr_mask & IB_M_EXT_CLASS_3)
|
||||
context->params2 |= cpu_to_be32(MLX4_QP_BIT_COLL_SYNC_RQ);
|
||||
|
||||
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
|
||||
context->params2 |= (qp->flags & MLX4_IB_QP_CAP_CROSS_CHANNEL ?
|
||||
cpu_to_be32(MLX4_QP_BIT_COLL_MASTER) : 0);
|
||||
context->params2 |= (qp->flags & MLX4_IB_QP_CAP_MANAGED_SEND ?
|
||||
cpu_to_be32(MLX4_QP_BIT_COLL_MASTER | MLX4_QP_BIT_COLL_SYNC_SQ) : 0);
|
||||
context->params2 |= (qp->flags & MLX4_IB_QP_CAP_MANAGED_RECV ?
|
||||
cpu_to_be32(MLX4_QP_BIT_COLL_MASTER | MLX4_QP_BIT_COLL_SYNC_RQ) : 0);
|
||||
}
|
||||
|
||||
if (ibqp->srq)
|
||||
context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
|
||||
|
||||
@ -2067,7 +2044,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
sqd_event = 0;
|
||||
|
||||
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
|
||||
context->rlkey |= (1 << 4);
|
||||
context->rlkey_roce_mode |= (1 << 4);
|
||||
|
||||
if ((attr_mask & IB_QP_GROUP_RSS) &&
|
||||
(qp->qpg_data->rss_child_count > 1)) {
|
||||
@ -2153,29 +2130,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
if (is_sqp(dev, qp))
|
||||
store_sqp_attrs(to_msqp(qp), attr, attr_mask);
|
||||
|
||||
/* Set 'ignore_cq_overrun' bits for collectives offload */
|
||||
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
|
||||
if (attr_mask & (IB_M_EXT_CLASS_2 | IB_M_EXT_CLASS_3)) {
|
||||
err = mlx4_ib_ignore_overrun_cq(ibqp->send_cq);
|
||||
if (err) {
|
||||
pr_err("Failed to set ignore CQ "
|
||||
"overrun for QP 0x%x's send CQ\n",
|
||||
ibqp->qp_num);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ibqp->recv_cq != ibqp->send_cq) {
|
||||
err = mlx4_ib_ignore_overrun_cq(ibqp->recv_cq);
|
||||
if (err) {
|
||||
pr_err("Failed to set ignore "
|
||||
"CQ overrun for QP 0x%x's recv "
|
||||
"CQ\n", ibqp->qp_num);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we moved QP0 to RTR, bring the IB link up; if we moved
|
||||
* QP0 to RESET or ERROR, bring the link back down.
|
||||
@ -2333,8 +2287,7 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
ll = rdma_port_get_link_layer(&dev->ib_dev, port);
|
||||
}
|
||||
|
||||
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
|
||||
attr_mask & ~IB_M_QP_MOD_VEND_MASK, ll)) {
|
||||
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, ll)) {
|
||||
pr_debug("qpn 0x%x: invalid attribute mask specified "
|
||||
"for transition %d to %d. qp_type %d,"
|
||||
" attr_mask 0x%x\n",
|
||||
@ -2343,12 +2296,6 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((attr_mask & IB_M_QP_MOD_VEND_MASK) && !dev->dev->caps.sync_qp) {
|
||||
pr_err("extended verbs are not supported by %s\n",
|
||||
dev->ib_dev.name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((attr_mask & IB_QP_PORT) &&
|
||||
(attr->port_num == 0 || attr->port_num > dev->num_ports)) {
|
||||
pr_debug("qpn 0x%x: invalid port number (%d) specified "
|
||||
|
@ -134,13 +134,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
} else {
|
||||
err = mlx4_db_alloc(dev->dev, &srq->db, 0);
|
||||
err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_srq;
|
||||
|
||||
*srq->db.db = 0;
|
||||
|
||||
if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
|
||||
if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
|
||||
GFP_KERNEL)) {
|
||||
err = -ENOMEM;
|
||||
goto err_db;
|
||||
}
|
||||
@ -165,7 +166,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
|
||||
err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
|
@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
|
||||
char base_name[9];
|
||||
|
||||
/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
|
||||
strlcpy(name, pci_name(dev->dev->pdev), max);
|
||||
strlcpy(name, pci_name(dev->dev->persist->pdev), max);
|
||||
strncpy(base_name, name, 8); /*till xxxx:yy:*/
|
||||
base_name[8] = '\0';
|
||||
/* with no ARI only 3 last bits are used so when the fn is higher than 8
|
||||
@ -688,7 +688,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
|
||||
if (!mlx4_is_master(device->dev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i <= device->dev->num_vfs; ++i)
|
||||
for (i = 0; i <= device->dev->persist->num_vfs; ++i)
|
||||
register_one_pkey_tree(device, i);
|
||||
|
||||
return 0;
|
||||
@ -703,7 +703,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
|
||||
if (!mlx4_is_master(device->dev))
|
||||
return;
|
||||
|
||||
for (slave = device->dev->num_vfs; slave >= 0; --slave) {
|
||||
for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
|
||||
list_for_each_entry_safe(p, t,
|
||||
&device->pkeys.pkey_port_list[slave],
|
||||
entry) {
|
||||
|
@ -38,22 +38,12 @@
|
||||
#include <dev/mlx4/device.h>
|
||||
|
||||
#define MLX4_INVALID_LKEY 0x100
|
||||
|
||||
#define DS_SIZE_ALIGNMENT 16
|
||||
|
||||
#define SET_BYTE_COUNT(byte_count) cpu_to_be32(byte_count)
|
||||
#define SET_LSO_MSS(mss_hdr_size) cpu_to_be32(mss_hdr_size)
|
||||
#define DS_BYTE_COUNT_MASK cpu_to_be32(0x7fffffff)
|
||||
|
||||
enum ib_m_qp_attr_mask {
|
||||
IB_M_EXT_CLASS_1 = 1 << 28,
|
||||
IB_M_EXT_CLASS_2 = 1 << 29,
|
||||
IB_M_EXT_CLASS_3 = 1 << 30,
|
||||
|
||||
IB_M_QP_MOD_VEND_MASK = (IB_M_EXT_CLASS_1 | IB_M_EXT_CLASS_2 |
|
||||
IB_M_EXT_CLASS_3)
|
||||
};
|
||||
|
||||
enum mlx4_qp_optpar {
|
||||
MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
|
||||
MLX4_QP_OPTPAR_RRE = 1 << 1,
|
||||
@ -70,7 +60,8 @@ enum mlx4_qp_optpar {
|
||||
MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
|
||||
MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
|
||||
MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
|
||||
MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
|
||||
MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
|
||||
MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
|
||||
};
|
||||
|
||||
enum mlx4_qp_state {
|
||||
@ -109,10 +100,8 @@ enum {
|
||||
MLX4_QP_BIT_RRE = 1 << 15,
|
||||
MLX4_QP_BIT_RWE = 1 << 14,
|
||||
MLX4_QP_BIT_RAE = 1 << 13,
|
||||
MLX4_QP_BIT_FPP = 1 << 3,
|
||||
MLX4_QP_BIT_RIC = 1 << 4,
|
||||
MLX4_QP_BIT_COLL_SYNC_RQ = 1 << 2,
|
||||
MLX4_QP_BIT_COLL_SYNC_SQ = 1 << 1,
|
||||
MLX4_QP_BIT_COLL_MASTER = 1 << 0
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -126,25 +115,34 @@ enum {
|
||||
MLX4_RSS_TCP_IPV4 = 1 << 4,
|
||||
MLX4_RSS_IPV4 = 1 << 5,
|
||||
|
||||
MLX4_RSS_BY_OUTER_HEADERS = 0 << 6,
|
||||
MLX4_RSS_BY_INNER_HEADERS = 2 << 6,
|
||||
MLX4_RSS_BY_INNER_HEADERS_IPONLY = 3 << 6,
|
||||
|
||||
/* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
|
||||
MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
|
||||
/* offset of being RSS indirection QP within mlx4_qp_context.flags */
|
||||
MLX4_RSS_QPC_FLAG_OFFSET = 13,
|
||||
};
|
||||
|
||||
#define MLX4_EN_RSS_KEY_SIZE 40
|
||||
|
||||
struct mlx4_rss_context {
|
||||
__be32 base_qpn;
|
||||
__be32 default_qpn;
|
||||
u16 reserved;
|
||||
u8 hash_fn;
|
||||
u8 flags;
|
||||
__be32 rss_key[10];
|
||||
__be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
|
||||
__be32 base_qpn_udp;
|
||||
};
|
||||
|
||||
struct mlx4_qp_path {
|
||||
u8 fl;
|
||||
u8 vlan_control;
|
||||
union {
|
||||
u8 vlan_control;
|
||||
u8 control;
|
||||
};
|
||||
u8 disable_pkey_check;
|
||||
u8 pkey_index;
|
||||
u8 counter_index;
|
||||
@ -161,31 +159,38 @@ struct mlx4_qp_path {
|
||||
u8 feup;
|
||||
u8 fvl_rx;
|
||||
u8 reserved4[2];
|
||||
u8 dmac[6];
|
||||
u8 dmac[ETH_ALEN];
|
||||
};
|
||||
|
||||
enum { /* fl */
|
||||
MLX4_FL_CV = 1 << 6,
|
||||
MLX4_FL_SV = 1 << 5,
|
||||
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
|
||||
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
|
||||
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
|
||||
};
|
||||
|
||||
enum { /* control */
|
||||
MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
|
||||
};
|
||||
|
||||
enum { /* vlan_control */
|
||||
MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
|
||||
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
|
||||
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */
|
||||
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4,
|
||||
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
|
||||
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1,/* 802.1p priorty tag*/
|
||||
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
|
||||
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
|
||||
};
|
||||
|
||||
enum { /* feup */
|
||||
MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
|
||||
MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
|
||||
MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
|
||||
MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
|
||||
MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
|
||||
MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
|
||||
};
|
||||
|
||||
enum { /* fvl_rx */
|
||||
MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
|
||||
MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
|
||||
};
|
||||
|
||||
struct mlx4_qp_context {
|
||||
@ -194,7 +199,7 @@ struct mlx4_qp_context {
|
||||
u8 mtu_msgmax;
|
||||
u8 rq_size_stride;
|
||||
u8 sq_size_stride;
|
||||
u8 rlkey;
|
||||
u8 rlkey_roce_mode;
|
||||
__be32 usr_page;
|
||||
__be32 local_qpn;
|
||||
__be32 remote_qpn;
|
||||
@ -204,7 +209,8 @@ struct mlx4_qp_context {
|
||||
u32 reserved1;
|
||||
__be32 next_send_psn;
|
||||
__be32 cqn_send;
|
||||
u32 reserved2[2];
|
||||
__be16 roce_entropy;
|
||||
__be16 reserved2[3];
|
||||
__be32 last_acked_psn;
|
||||
__be32 ssn;
|
||||
__be32 params2;
|
||||
@ -217,14 +223,17 @@ struct mlx4_qp_context {
|
||||
__be32 msn;
|
||||
__be16 rq_wqe_counter;
|
||||
__be16 sq_wqe_counter;
|
||||
u32 reserved3[2];
|
||||
u32 reserved3;
|
||||
__be16 rate_limit_params;
|
||||
u8 reserved4;
|
||||
u8 qos_vport;
|
||||
__be32 param3;
|
||||
__be32 nummmcpeers_basemkey;
|
||||
u8 log_page_size;
|
||||
u8 reserved4[2];
|
||||
u8 reserved5[2];
|
||||
u8 mtt_base_addr_h;
|
||||
__be32 mtt_base_addr_l;
|
||||
u32 reserved5[10];
|
||||
u32 reserved6[10];
|
||||
};
|
||||
|
||||
struct mlx4_update_qp_context {
|
||||
@ -239,6 +248,8 @@ struct mlx4_update_qp_context {
|
||||
enum {
|
||||
MLX4_UPD_QP_MASK_PM_STATE = 32,
|
||||
MLX4_UPD_QP_MASK_VSD = 33,
|
||||
MLX4_UPD_QP_MASK_QOS_VPP = 34,
|
||||
MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -261,13 +272,13 @@ enum {
|
||||
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
|
||||
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
|
||||
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
|
||||
MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
|
||||
};
|
||||
|
||||
enum { /* param3 */
|
||||
MLX4_STRIP_VLAN = 1 << 30
|
||||
MLX4_STRIP_VLAN = 1 << 30
|
||||
};
|
||||
|
||||
|
||||
/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
|
||||
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
|
||||
|
||||
@ -275,21 +286,29 @@ enum {
|
||||
MLX4_WQE_CTRL_OWN = 1 << 31,
|
||||
MLX4_WQE_CTRL_NEC = 1 << 29,
|
||||
MLX4_WQE_CTRL_RR = 1 << 6,
|
||||
MLX4_WQE_CTRL_IIP = 1 << 28,
|
||||
MLX4_WQE_CTRL_ILP = 1 << 27,
|
||||
MLX4_WQE_CTRL_FENCE = 1 << 6,
|
||||
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
|
||||
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
|
||||
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
|
||||
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
|
||||
MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
|
||||
MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
|
||||
MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
|
||||
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
|
||||
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
|
||||
};
|
||||
|
||||
struct mlx4_wqe_ctrl_seg {
|
||||
__be32 owner_opcode;
|
||||
__be16 vlan_tag;
|
||||
u8 ins_vlan;
|
||||
u8 fence_size;
|
||||
union {
|
||||
struct {
|
||||
__be16 vlan_tag;
|
||||
u8 ins_vlan;
|
||||
u8 fence_size;
|
||||
};
|
||||
__be32 bf_qpn;
|
||||
};
|
||||
/*
|
||||
* High 24 bits are SRC remote buffer; low 8 bits are flags:
|
||||
* [7] SO (strong ordering)
|
||||
@ -342,7 +361,7 @@ struct mlx4_wqe_datagram_seg {
|
||||
__be32 dqpn;
|
||||
__be32 qkey;
|
||||
__be16 vlan;
|
||||
u8 mac[6];
|
||||
u8 mac[ETH_ALEN];
|
||||
};
|
||||
|
||||
struct mlx4_wqe_lso_seg {
|
||||
@ -351,8 +370,8 @@ struct mlx4_wqe_lso_seg {
|
||||
};
|
||||
|
||||
enum mlx4_wqe_bind_seg_flags2 {
|
||||
MLX4_WQE_BIND_TYPE_2 = (1<<31),
|
||||
MLX4_WQE_BIND_ZERO_BASED = (1<<30),
|
||||
MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
|
||||
MLX4_WQE_BIND_TYPE_2 = (1 << 31),
|
||||
};
|
||||
|
||||
struct mlx4_wqe_bind_seg {
|
||||
@ -433,6 +452,31 @@ struct mlx4_wqe_inline_seg {
|
||||
__be32 byte_count;
|
||||
};
|
||||
|
||||
enum mlx4_update_qp_attr {
|
||||
MLX4_UPDATE_QP_SMAC = 1 << 0,
|
||||
MLX4_UPDATE_QP_VSD = 1 << 1,
|
||||
MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
|
||||
MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
|
||||
MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4,
|
||||
MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1
|
||||
};
|
||||
|
||||
enum mlx4_update_qp_params_flags {
|
||||
MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0,
|
||||
MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1,
|
||||
};
|
||||
|
||||
struct mlx4_update_qp_params {
|
||||
u8 smac_index;
|
||||
u8 qos_vport;
|
||||
u32 flags;
|
||||
u16 rate_unit;
|
||||
u16 rate_val;
|
||||
};
|
||||
|
||||
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
||||
enum mlx4_update_qp_attr attr,
|
||||
struct mlx4_update_qp_params *params);
|
||||
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
|
||||
struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
|
||||
@ -452,4 +496,14 @@ static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
|
||||
|
||||
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
|
||||
|
||||
static inline u16 folded_qp(u32 q)
|
||||
{
|
||||
u16 res;
|
||||
|
||||
res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00);
|
||||
return res;
|
||||
}
|
||||
|
||||
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
|
||||
|
||||
#endif /* MLX4_QP_H */
|
||||
|
@ -33,100 +33,75 @@
|
||||
#ifndef _MLX4_STATS_
|
||||
#define _MLX4_STATS_
|
||||
|
||||
|
||||
#ifdef MLX4_EN_PERF_STAT
|
||||
#define NUM_PERF_STATS NUM_PERF_COUNTERS
|
||||
#else
|
||||
#define NUM_PERF_STATS 0
|
||||
#endif
|
||||
|
||||
#define NUM_PRIORITIES 9
|
||||
#define NUM_PRIORITY_STATS 2
|
||||
|
||||
struct mlx4_en_pkt_stats {
|
||||
unsigned long rx_packets;
|
||||
unsigned long rx_bytes;
|
||||
unsigned long rx_multicast_packets;
|
||||
unsigned long rx_broadcast_packets;
|
||||
unsigned long rx_errors;
|
||||
unsigned long rx_dropped;
|
||||
unsigned long rx_length_errors;
|
||||
unsigned long rx_over_errors;
|
||||
unsigned long rx_crc_errors;
|
||||
unsigned long rx_jabbers;
|
||||
unsigned long rx_in_range_length_error;
|
||||
unsigned long rx_out_range_length_error;
|
||||
unsigned long rx_lt_64_bytes_packets;
|
||||
unsigned long rx_127_bytes_packets;
|
||||
unsigned long rx_255_bytes_packets;
|
||||
unsigned long rx_511_bytes_packets;
|
||||
unsigned long rx_1023_bytes_packets;
|
||||
unsigned long rx_1518_bytes_packets;
|
||||
unsigned long rx_1522_bytes_packets;
|
||||
unsigned long rx_1548_bytes_packets;
|
||||
unsigned long rx_gt_1548_bytes_packets;
|
||||
unsigned long tx_packets;
|
||||
unsigned long tx_bytes;
|
||||
unsigned long tx_multicast_packets;
|
||||
unsigned long tx_broadcast_packets;
|
||||
unsigned long tx_errors;
|
||||
unsigned long tx_dropped;
|
||||
unsigned long tx_lt_64_bytes_packets;
|
||||
unsigned long tx_127_bytes_packets;
|
||||
unsigned long tx_255_bytes_packets;
|
||||
unsigned long tx_511_bytes_packets;
|
||||
unsigned long tx_1023_bytes_packets;
|
||||
unsigned long tx_1518_bytes_packets;
|
||||
unsigned long tx_1522_bytes_packets;
|
||||
unsigned long tx_1548_bytes_packets;
|
||||
unsigned long tx_gt_1548_bytes_packets;
|
||||
unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
|
||||
unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_multicast_packets;
|
||||
u64 rx_broadcast_packets;
|
||||
u64 rx_errors;
|
||||
u64 rx_dropped;
|
||||
u64 rx_length_errors;
|
||||
u64 rx_over_errors;
|
||||
u64 rx_crc_errors;
|
||||
u64 rx_jabbers;
|
||||
u64 rx_in_range_length_error;
|
||||
u64 rx_out_range_length_error;
|
||||
u64 rx_lt_64_bytes_packets;
|
||||
u64 rx_127_bytes_packets;
|
||||
u64 rx_255_bytes_packets;
|
||||
u64 rx_511_bytes_packets;
|
||||
u64 rx_1023_bytes_packets;
|
||||
u64 rx_1518_bytes_packets;
|
||||
u64 rx_1522_bytes_packets;
|
||||
u64 rx_1548_bytes_packets;
|
||||
u64 rx_gt_1548_bytes_packets;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 tx_multicast_packets;
|
||||
u64 tx_broadcast_packets;
|
||||
u64 tx_errors;
|
||||
u64 tx_dropped;
|
||||
u64 tx_lt_64_bytes_packets;
|
||||
u64 tx_127_bytes_packets;
|
||||
u64 tx_255_bytes_packets;
|
||||
u64 tx_511_bytes_packets;
|
||||
u64 tx_1023_bytes_packets;
|
||||
u64 tx_1518_bytes_packets;
|
||||
u64 tx_1522_bytes_packets;
|
||||
u64 tx_1548_bytes_packets;
|
||||
u64 tx_gt_1548_bytes_packets;
|
||||
u64 rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
|
||||
u64 tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS];
|
||||
};
|
||||
|
||||
struct mlx4_en_vf_stats {
|
||||
unsigned long rx_packets;
|
||||
unsigned long rx_bytes;
|
||||
unsigned long rx_multicast_packets;
|
||||
unsigned long rx_broadcast_packets;
|
||||
unsigned long rx_errors;
|
||||
unsigned long rx_dropped;
|
||||
unsigned long tx_packets;
|
||||
unsigned long tx_bytes;
|
||||
unsigned long tx_multicast_packets;
|
||||
unsigned long tx_broadcast_packets;
|
||||
unsigned long tx_errors;
|
||||
u64 rx_frames;
|
||||
u64 rx_bytes;
|
||||
u64 tx_frames;
|
||||
u64 tx_bytes;
|
||||
};
|
||||
|
||||
struct mlx4_en_vport_stats {
|
||||
unsigned long rx_unicast_packets;
|
||||
unsigned long rx_unicast_bytes;
|
||||
unsigned long rx_multicast_packets;
|
||||
unsigned long rx_multicast_bytes;
|
||||
unsigned long rx_broadcast_packets;
|
||||
unsigned long rx_broadcast_bytes;
|
||||
unsigned long rx_dropped;
|
||||
unsigned long rx_errors;
|
||||
unsigned long tx_unicast_packets;
|
||||
unsigned long tx_unicast_bytes;
|
||||
unsigned long tx_multicast_packets;
|
||||
unsigned long tx_multicast_bytes;
|
||||
unsigned long tx_broadcast_packets;
|
||||
unsigned long tx_broadcast_bytes;
|
||||
unsigned long tx_errors;
|
||||
u64 rx_frames;
|
||||
u64 rx_bytes;
|
||||
u64 tx_frames;
|
||||
u64 tx_bytes;
|
||||
};
|
||||
|
||||
struct mlx4_en_port_stats {
|
||||
unsigned long tso_packets;
|
||||
unsigned long queue_stopped;
|
||||
unsigned long wake_queue;
|
||||
unsigned long tx_timeout;
|
||||
unsigned long oversized_packets;
|
||||
unsigned long rx_alloc_failed;
|
||||
unsigned long rx_chksum_good;
|
||||
unsigned long rx_chksum_none;
|
||||
unsigned long tx_chksum_offload;
|
||||
unsigned long defrag_attempts;
|
||||
u64 tso_packets;
|
||||
u64 queue_stopped;
|
||||
u64 wake_queue;
|
||||
u64 tx_timeout;
|
||||
u64 oversized_packets;
|
||||
u64 rx_alloc_failed;
|
||||
u64 rx_chksum_good;
|
||||
u64 rx_chksum_none;
|
||||
u64 tx_chksum_offload;
|
||||
u64 defrag_attempts;
|
||||
};
|
||||
|
||||
struct mlx4_en_perf_stats {
|
||||
@ -138,16 +113,19 @@ struct mlx4_en_perf_stats {
|
||||
u32 napi_quota;
|
||||
};
|
||||
|
||||
struct mlx4_en_flow_stats {
|
||||
#define MLX4_NUM_PRIORITIES 8
|
||||
|
||||
struct mlx4_en_flow_stats_rx {
|
||||
u64 rx_pause;
|
||||
u64 rx_pause_duration;
|
||||
u64 rx_pause_transition;
|
||||
};
|
||||
|
||||
struct mlx4_en_flow_stats_tx {
|
||||
u64 tx_pause;
|
||||
u64 tx_pause_duration;
|
||||
u64 tx_pause_transition;
|
||||
};
|
||||
#define MLX4_NUM_PRIORITIES 8
|
||||
|
||||
|
||||
struct mlx4_en_stat_out_flow_control_mbox {
|
||||
/* Total number of PAUSE frames received from the far-end port */
|
||||
@ -170,8 +148,12 @@ struct mlx4_en_stat_out_flow_control_mbox {
|
||||
__be64 reserved[2];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_DUMP_ETH_STATS_FLOW_CONTROL = 1 << 12
|
||||
};
|
||||
|
||||
int mlx4_get_vport_ethtool_stats(struct mlx4_dev *dev, int port,
|
||||
struct mlx4_en_vport_stats *vport_stats,
|
||||
int reset);
|
||||
struct mlx4_en_vport_stats *vport_stats,
|
||||
int reset, int *read_counters);
|
||||
|
||||
#endif
|
||||
|
@ -10,6 +10,7 @@ SRCS= device_if.h bus_if.h vnode_if.h pci_if.h \
|
||||
mlx4_cq.c \
|
||||
mlx4_eq.c \
|
||||
mlx4_fw.c \
|
||||
mlx4_fw_qos.c \
|
||||
mlx4_icm.c \
|
||||
mlx4_intf.c \
|
||||
mlx4_main.c \
|
||||
@ -22,8 +23,7 @@ SRCS= device_if.h bus_if.h vnode_if.h pci_if.h \
|
||||
mlx4_reset.c \
|
||||
mlx4_sense.c \
|
||||
mlx4_srq.c \
|
||||
mlx4_resource_tracker.c \
|
||||
mlx4_sys_tune.c
|
||||
mlx4_resource_tracker.c
|
||||
|
||||
CFLAGS+= -I${SRCTOP}/sys/ofed/include
|
||||
CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
|
||||
@ -31,5 +31,3 @@ CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include
|
||||
.include <bsd.kmod.mk>
|
||||
|
||||
CFLAGS+= -Wno-cast-qual -Wno-pointer-arith
|
||||
|
||||
CWARNFLAGS.mlx4_mcg.c= -Wno-unused
|
||||
|
Loading…
Reference in New Issue
Block a user