Update oce driver to 11.0.50.0

Submitted by:	Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
This commit is contained in:
Josh Paetzel 2016-09-21 22:53:16 +00:00
parent d80cb37fb7
commit 764c812d84
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=306148
7 changed files with 2062 additions and 494 deletions

View File

@ -393,6 +393,11 @@ oce_create_nw_interface(POCE_SOFTC sc)
if (IS_SH(sc) || IS_XE201(sc))
capab_flags |= MBX_RX_IFACE_FLAGS_MULTICAST;
if (sc->enable_hwlro) {
capab_flags |= MBX_RX_IFACE_FLAGS_LRO;
capab_en_flags |= MBX_RX_IFACE_FLAGS_LRO;
}
/* enable capabilities controlled via driver startup parameters */
if (is_rss_enabled(sc))
capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS;

View File

@ -111,6 +111,9 @@
#define PD_MPU_MBOX_DB 0x0160
#define PD_MQ_DB 0x0140
#define DB_OFFSET 0xc0
#define DB_LRO_RQ_ID_MASK 0x7FF
/* EQE completion types */
#define EQ_MINOR_CODE_COMPLETION 0x00
#define EQ_MINOR_CODE_OTHER 0x01
@ -180,6 +183,7 @@
#define ASYNC_EVENT_GRP5 0x5
#define ASYNC_EVENT_CODE_DEBUG 0x6
#define ASYNC_EVENT_PVID_STATE 0x3
#define ASYNC_EVENT_OS2BMC 0x5
#define ASYNC_EVENT_DEBUG_QNQ 0x1
#define ASYNC_EVENT_CODE_SLIPORT 0x11
#define VLAN_VID_MASK 0x0FFF
@ -722,6 +726,34 @@ struct oce_async_cqe_link_state {
} u0;
};
/* OS2BMC async event */
struct oce_async_evt_grp5_os2bmc {
union {
struct {
uint32_t lrn_enable:1;
uint32_t lrn_disable:1;
uint32_t mgmt_enable:1;
uint32_t mgmt_disable:1;
uint32_t rsvd0:12;
uint32_t vlan_tag:16;
uint32_t arp_filter:1;
uint32_t dhcp_client_filt:1;
uint32_t dhcp_server_filt:1;
uint32_t net_bios_filt:1;
uint32_t rsvd1:3;
uint32_t bcast_filt:1;
uint32_t ipv6_nbr_filt:1;
uint32_t ipv6_ra_filt:1;
uint32_t ipv6_ras_filt:1;
uint32_t rsvd2[4];
uint32_t mcast_filt:1;
uint32_t rsvd3:16;
uint32_t evt_tag;
uint32_t dword3;
} s;
uint32_t dword[4];
} u;
};
/* PVID aync event */
struct oce_async_event_grp5_pvid_state {
@ -1396,7 +1428,7 @@ typedef union oce_cq_ctx_u {
uint32_t dw5rsvd3:1;
uint32_t eventable:1;
/* dw6 */
uint32_t eq_id:8;
uint32_t eq_id:16;
uint32_t dw6rsvd1:15;
uint32_t armed:1;
/* dw7 */
@ -2403,8 +2435,8 @@ struct oce_nic_hdr_wqe {
uint32_t tcpcs:1;
uint32_t udpcs:1;
uint32_t ipcs:1;
uint32_t rsvd3:1;
uint32_t rsvd2:1;
uint32_t mgmt:1;
uint32_t lso6:1;
uint32_t forward:1;
uint32_t crc:1;
uint32_t event:1;
@ -2426,8 +2458,8 @@ struct oce_nic_hdr_wqe {
uint32_t event:1;
uint32_t crc:1;
uint32_t forward:1;
uint32_t rsvd2:1;
uint32_t rsvd3:1;
uint32_t lso6:1;
uint32_t mgmt:1;
uint32_t ipcs:1;
uint32_t udpcs:1;
uint32_t tcpcs:1;
@ -3010,6 +3042,53 @@ struct oce_rxf_stats_v0 {
uint32_t rsvd1[6];
};
struct oce_port_rxf_stats_v2 {
uint32_t rsvd0[10];
uint32_t roce_bytes_received_lsd;
uint32_t roce_bytes_received_msd;
uint32_t rsvd1[5];
uint32_t roce_frames_received;
uint32_t rx_crc_errors;
uint32_t rx_alignment_symbol_errors;
uint32_t rx_pause_frames;
uint32_t rx_priority_pause_frames;
uint32_t rx_control_frames;
uint32_t rx_in_range_errors;
uint32_t rx_out_range_errors;
uint32_t rx_frame_too_long;
uint32_t rx_address_match_errors;
uint32_t rx_dropped_too_small;
uint32_t rx_dropped_too_short;
uint32_t rx_dropped_header_too_small;
uint32_t rx_dropped_tcp_length;
uint32_t rx_dropped_runt;
uint32_t rsvd2[10];
uint32_t rx_ip_checksum_errs;
uint32_t rx_tcp_checksum_errs;
uint32_t rx_udp_checksum_errs;
uint32_t rsvd3[7];
uint32_t rx_switched_unicast_packets;
uint32_t rx_switched_multicast_packets;
uint32_t rx_switched_broadcast_packets;
uint32_t rsvd4[3];
uint32_t tx_pauseframes;
uint32_t tx_priority_pauseframes;
uint32_t tx_controlframes;
uint32_t rsvd5[10];
uint32_t rxpp_fifo_overflow_drop;
uint32_t rx_input_fifo_overflow_drop;
uint32_t pmem_fifo_overflow_drop;
uint32_t jabber_events;
uint32_t rsvd6[3];
uint32_t rx_drops_payload_size;
uint32_t rx_drops_clipped_header;
uint32_t rx_drops_crc;
uint32_t roce_drops_payload_len;
uint32_t roce_drops_crc;
uint32_t rsvd7[19];
};
struct oce_port_rxf_stats_v1 {
uint32_t rsvd0[12];
uint32_t rx_crc_errors;
@ -3046,6 +3125,20 @@ struct oce_port_rxf_stats_v1 {
uint32_t rsvd5[3];
};
struct oce_rxf_stats_v2 {
struct oce_port_rxf_stats_v2 port[4];
uint32_t rsvd0[2];
uint32_t rx_drops_no_pbuf;
uint32_t rx_drops_no_txpb;
uint32_t rx_drops_no_erx_descr;
uint32_t rx_drops_no_tpre_descr;
uint32_t rsvd1[6];
uint32_t rx_drops_too_many_frags;
uint32_t rx_drops_invalid_ring;
uint32_t forwarded_packets;
uint32_t rx_drops_mtu;
uint32_t rsvd2[35];
};
struct oce_rxf_stats_v1 {
struct oce_port_rxf_stats_v1 port[4];
@ -3062,6 +3155,11 @@ struct oce_rxf_stats_v1 {
uint32_t rsvd2[14];
};
struct oce_erx_stats_v2 {
uint32_t rx_drops_no_fragments[136];
uint32_t rsvd[3];
};
struct oce_erx_stats_v1 {
uint32_t rx_drops_no_fragments[68];
uint32_t rsvd[4];
@ -3078,6 +3176,15 @@ struct oce_pmem_stats {
uint32_t rsvd[5];
};
struct oce_hw_stats_v2 {
struct oce_rxf_stats_v2 rxf;
uint32_t rsvd0[OCE_TXP_SW_SZ];
struct oce_erx_stats_v2 erx;
struct oce_pmem_stats pmem;
uint32_t rsvd1[18];
};
struct oce_hw_stats_v1 {
struct oce_rxf_stats_v1 rxf;
uint32_t rsvd0[OCE_TXP_SW_SZ];
@ -3093,32 +3200,22 @@ struct oce_hw_stats_v0 {
struct oce_pmem_stats pmem;
};
struct mbx_get_nic_stats_v0 {
struct mbx_hdr hdr;
union {
struct {
uint32_t rsvd0;
} req;
union {
struct oce_hw_stats_v0 stats;
} rsp;
} params;
};
struct mbx_get_nic_stats {
struct mbx_hdr hdr;
union {
struct {
uint32_t rsvd0;
} req;
struct {
struct oce_hw_stats_v1 stats;
} rsp;
} params;
};
#define MBX_GET_NIC_STATS(version) \
struct mbx_get_nic_stats_v##version { \
struct mbx_hdr hdr; \
union { \
struct { \
uint32_t rsvd0; \
} req; \
union { \
struct oce_hw_stats_v##version stats; \
} rsp; \
} params; \
}
MBX_GET_NIC_STATS(0);
MBX_GET_NIC_STATS(1);
MBX_GET_NIC_STATS(2);
/* [18(0x12)] NIC_GET_PPORT_STATS */
struct pport_stats {
@ -3728,3 +3825,373 @@ enum OCE_QUEUE_RX_STATS {
QUEUE_RX_BUFFER_ERRORS = 8,
QUEUE_RX_N_WORDS = 10
};
/* HW LRO structures */
struct mbx_nic_query_lro_capabilities {
struct mbx_hdr hdr;
union {
struct {
uint32_t rsvd[6];
} req;
struct {
#ifdef _BIG_ENDIAN
uint32_t lro_flags;
uint16_t lro_rq_cnt;
uint16_t plro_max_offload;
uint32_t rsvd[4];
#else
uint32_t lro_flags;
uint16_t plro_max_offload;
uint16_t lro_rq_cnt;
uint32_t rsvd[4];
#endif
} rsp;
} params;
};
struct mbx_nic_set_iface_lro_config {
struct mbx_hdr hdr;
union {
struct {
#ifdef _BIG_ENDIAN
uint32_t lro_flags;
uint32_t iface_id;
uint32_t max_clsc_byte_cnt;
uint32_t max_clsc_seg_cnt;
uint32_t max_clsc_usec_delay;
uint32_t min_clsc_frame_byte_cnt;
uint32_t rsvd[2];
#else
uint32_t lro_flags;
uint32_t iface_id;
uint32_t max_clsc_byte_cnt;
uint32_t max_clsc_seg_cnt;
uint32_t max_clsc_usec_delay;
uint32_t min_clsc_frame_byte_cnt;
uint32_t rsvd[2];
#endif
} req;
struct {
#ifdef _BIG_ENDIAN
uint32_t lro_flags;
uint32_t rsvd[7];
#else
uint32_t lro_flags;
uint32_t rsvd[7];
#endif
} rsp;
} params;
};
struct mbx_create_nic_rq_v2 {
struct mbx_hdr hdr;
union {
struct {
#ifdef _BIG_ENDIAN
uint8_t num_pages;
uint8_t frag_size;
uint16_t cq_id;
uint32_t if_id;
uint16_t page_size;
uint16_t max_frame_size;
uint16_t rsvd;
uint16_t pd_id;
uint16_t rsvd1;
uint16_t rq_flags;
uint16_t hds_fixed_offset;
uint8_t hds_start;
uint8_t hds_frag;
uint16_t hds_backfill_size;
uint16_t hds_frag_size;
uint32_t rbq_id;
uint32_t rsvd2[8];
struct phys_addr pages[2];
#else
uint16_t cq_id;
uint8_t frag_size;
uint8_t num_pages;
uint32_t if_id;
uint16_t max_frame_size;
uint16_t page_size;
uint16_t pd_id;
uint16_t rsvd;
uint16_t rq_flags;
uint16_t rsvd1;
uint8_t hds_frag;
uint8_t hds_start;
uint16_t hds_fixed_offset;
uint16_t hds_frag_size;
uint16_t hds_backfill_size;
uint32_t rbq_id;
uint32_t rsvd2[8];
struct phys_addr pages[2];
#endif
} req;
struct {
#ifdef _BIG_ENDIAN
uint8_t rsvd0;
uint8_t rss_cpuid;
uint16_t rq_id;
uint8_t db_format;
uint8_t db_reg_set;
uint16_t rsvd1;
uint32_t db_offset;
uint32_t rsvd2;
uint16_t rsvd3;
uint16_t rq_flags;
#else
uint16_t rq_id;
uint8_t rss_cpuid;
uint8_t rsvd0;
uint16_t rsvd1;
uint8_t db_reg_set;
uint8_t db_format;
uint32_t db_offset;
uint32_t rsvd2;
uint16_t rq_flags;
uint16_t rsvd3;
#endif
} rsp;
} params;
};
struct mbx_delete_nic_rq_v1 {
struct mbx_hdr hdr;
union {
struct {
#ifdef _BIG_ENDIAN
uint16_t bypass_flush;
uint16_t rq_id;
uint16_t rsvd;
uint16_t rq_flags;
#else
uint16_t rq_id;
uint16_t bypass_flush;
uint16_t rq_flags;
uint16_t rsvd;
#endif
} req;
struct {
uint32_t rsvd[2];
} rsp;
} params;
};
struct nic_hwlro_singleton_cqe {
#ifdef _BIG_ENDIAN
/* dw 0 */
uint32_t ip_opt:1;
uint32_t vtp:1;
uint32_t pkt_size:14;
uint32_t vlan_tag:16;
/* dw 1 */
uint32_t num_frags:3;
uint32_t rsvd1:3;
uint32_t frag_index:10;
uint32_t rsvd:8;
uint32_t ipv6_frame:1;
uint32_t l4_cksum_pass:1;
uint32_t ip_cksum_pass:1;
uint32_t udpframe:1;
uint32_t tcpframe:1;
uint32_t ipframe:1;
uint32_t rss_hp:1;
uint32_t error:1;
/* dw 2 */
uint32_t valid:1;
uint32_t cqe_type:2;
uint32_t debug:7;
uint32_t rsvd4:6;
uint32_t data_offset:8;
uint32_t rsvd3:3;
uint32_t rss_bank:1;
uint32_t qnq:1;
uint32_t rsvd2:3;
/* dw 3 */
uint32_t rss_hash_value;
#else
/* dw 0 */
uint32_t vlan_tag:16;
uint32_t pkt_size:14;
uint32_t vtp:1;
uint32_t ip_opt:1;
/* dw 1 */
uint32_t error:1;
uint32_t rss_hp:1;
uint32_t ipframe:1;
uint32_t tcpframe:1;
uint32_t udpframe:1;
uint32_t ip_cksum_pass:1;
uint32_t l4_cksum_pass:1;
uint32_t ipv6_frame:1;
uint32_t rsvd:8;
uint32_t frag_index:10;
uint32_t rsvd1:3;
uint32_t num_frags:3;
/* dw 2 */
uint32_t rsvd2:3;
uint32_t qnq:1;
uint32_t rss_bank:1;
uint32_t rsvd3:3;
uint32_t data_offset:8;
uint32_t rsvd4:6;
uint32_t debug:7;
uint32_t cqe_type:2;
uint32_t valid:1;
/* dw 3 */
uint32_t rss_hash_value;
#endif
};
struct nic_hwlro_cqe_part1 {
#ifdef _BIG_ENDIAN
/* dw 0 */
uint32_t tcp_timestamp_val;
/* dw 1 */
uint32_t tcp_timestamp_ecr;
/* dw 2 */
uint32_t valid:1;
uint32_t cqe_type:2;
uint32_t rsvd3:7;
uint32_t rss_policy:4;
uint32_t rsvd2:2;
uint32_t data_offset:8;
uint32_t rsvd1:1;
uint32_t lro_desc:1;
uint32_t lro_timer_pop:1;
uint32_t rss_bank:1;
uint32_t qnq:1;
uint32_t rsvd:2;
uint32_t rss_flush:1;
/* dw 3 */
uint32_t rss_hash_value;
#else
/* dw 0 */
uint32_t tcp_timestamp_val;
/* dw 1 */
uint32_t tcp_timestamp_ecr;
/* dw 2 */
uint32_t rss_flush:1;
uint32_t rsvd:2;
uint32_t qnq:1;
uint32_t rss_bank:1;
uint32_t lro_timer_pop:1;
uint32_t lro_desc:1;
uint32_t rsvd1:1;
uint32_t data_offset:8;
uint32_t rsvd2:2;
uint32_t rss_policy:4;
uint32_t rsvd3:7;
uint32_t cqe_type:2;
uint32_t valid:1;
/* dw 3 */
uint32_t rss_hash_value;
#endif
};
struct nic_hwlro_cqe_part2 {
#ifdef _BIG_ENDIAN
/* dw 0 */
uint32_t ip_opt:1;
uint32_t vtp:1;
uint32_t pkt_size:14;
uint32_t vlan_tag:16;
/* dw 1 */
uint32_t tcp_window:16;
uint32_t coalesced_size:16;
/* dw 2 */
uint32_t valid:1;
uint32_t cqe_type:2;
uint32_t rsvd:2;
uint32_t push:1;
uint32_t ts_opt:1;
uint32_t threshold:1;
uint32_t seg_cnt:8;
uint32_t frame_lifespan:8;
uint32_t ipv6_frame:1;
uint32_t l4_cksum_pass:1;
uint32_t ip_cksum_pass:1;
uint32_t udpframe:1;
uint32_t tcpframe:1;
uint32_t ipframe:1;
uint32_t rss_hp:1;
uint32_t error:1;
/* dw 3 */
uint32_t tcp_ack_num;
#else
/* dw 0 */
uint32_t vlan_tag:16;
uint32_t pkt_size:14;
uint32_t vtp:1;
uint32_t ip_opt:1;
/* dw 1 */
uint32_t coalesced_size:16;
uint32_t tcp_window:16;
/* dw 2 */
uint32_t error:1;
uint32_t rss_hp:1;
uint32_t ipframe:1;
uint32_t tcpframe:1;
uint32_t udpframe:1;
uint32_t ip_cksum_pass:1;
uint32_t l4_cksum_pass:1;
uint32_t ipv6_frame:1;
uint32_t frame_lifespan:8;
uint32_t seg_cnt:8;
uint32_t threshold:1;
uint32_t ts_opt:1;
uint32_t push:1;
uint32_t rsvd:2;
uint32_t cqe_type:2;
uint32_t valid:1;
/* dw 3 */
uint32_t tcp_ack_num;
#endif
};

File diff suppressed because it is too large Load Diff

View File

@ -85,13 +85,14 @@
#include <netinet/tcp.h>
#include <netinet/sctp.h>
#include <netinet/tcp_lro.h>
#include <netinet/icmp6.h>
#include <machine/bus.h>
#include "oce_hw.h"
/* OCE device driver module component revision informaiton */
#define COMPONENT_REVISION "10.0.664.0"
#define COMPONENT_REVISION "11.0.50.0"
/* OCE devices supported by this driver */
#define PCI_VENDOR_EMULEX 0x10df /* Emulex */
@ -142,7 +143,6 @@ extern int mp_ncpus; /* system's total active cpu cores */
#define OCE_DEFAULT_WQ_EQD 16
#define OCE_MAX_PACKET_Q 16
#define OCE_RQ_BUF_SIZE 2048
#define OCE_LSO_MAX_SIZE (64 * 1024)
#define LONG_TIMEOUT 30
#define OCE_MAX_JUMBO_FRAME_SIZE 9018
@ -150,11 +150,15 @@ extern int mp_ncpus; /* system's total active cpu cores */
ETHER_VLAN_ENCAP_LEN - \
ETHER_HDR_LEN)
#define OCE_RDMA_VECTORS 2
#define OCE_MAX_TX_ELEMENTS 29
#define OCE_MAX_TX_DESC 1024
#define OCE_MAX_TX_SIZE 65535
#define OCE_MAX_TSO_SIZE (65535 - ETHER_HDR_LEN)
#define OCE_MAX_RX_SIZE 4096
#define OCE_MAX_RQ_POSTS 255
#define OCE_HWLRO_MAX_RQ_POSTS 64
#define OCE_DEFAULT_PROMISCUOUS 0
@ -503,7 +507,7 @@ struct oce_drv_stats {
#define INTR_RATE_LWM 10000
#define OCE_MAX_EQD 128u
#define OCE_MIN_EQD 50u
#define OCE_MIN_EQD 0u
struct oce_set_eqd {
uint32_t eq_id;
@ -518,7 +522,8 @@ struct oce_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
uint32_t cur_eqd; /* in usecs */
uint32_t et_eqd; /* configured value when aic is off */
uint64_t ticks;
uint64_t intr_prev;
uint64_t prev_rxpkts;
uint64_t prev_txreqs;
};
#define MAX_LOCK_DESC_LEN 32
@ -609,7 +614,8 @@ struct oce_eq {
enum cq_len {
CQ_LEN_256 = 256,
CQ_LEN_512 = 512,
CQ_LEN_1024 = 1024
CQ_LEN_1024 = 1024,
CQ_LEN_2048 = 2048
};
struct cq_config {
@ -685,6 +691,7 @@ struct oce_tx_queue_stats {
struct oce_wq {
OCE_LOCK tx_lock;
OCE_LOCK tx_compl_lock;
void *parent;
oce_ring_buffer_t *ring;
struct oce_cq *cq;
@ -730,6 +737,7 @@ struct oce_rx_queue_stats {
uint32_t rx_frags;
uint32_t prev_rx_frags;
uint32_t rx_fps;
uint32_t rx_drops_no_frags; /* HW has no fetched frags */
};
@ -744,8 +752,6 @@ struct oce_rq {
void *pad1;
bus_dma_tag_t tag;
struct oce_packet_desc pckts[OCE_RQ_PACKET_ARRAY_SIZE];
uint32_t packets_in;
uint32_t packets_out;
uint32_t pending;
#ifdef notdef
struct mbuf *head;
@ -757,6 +763,8 @@ struct oce_rq {
struct oce_rx_queue_stats rx_stats;
struct lro_ctrl lro;
int lro_pkts_queued;
int islro;
struct nic_hwlro_cqe_part1 *cqe_firstpart;
};
@ -781,6 +789,7 @@ struct link_status {
#define OCE_FLAGS_XE201 0x00000400
#define OCE_FLAGS_BE2 0x00000800
#define OCE_FLAGS_SH 0x00001000
#define OCE_FLAGS_OS2BMC 0x00002000
#define OCE_DEV_BE2_CFG_BAR 1
#define OCE_DEV_CFG_BAR 0
@ -815,6 +824,7 @@ typedef struct oce_softc {
OCE_INTR_INFO intrs[OCE_MAX_EQ];
int intr_count;
int roce_intr_count;
struct ifnet *ifp;
@ -824,6 +834,7 @@ typedef struct oce_softc {
uint8_t duplex;
uint32_t qos_link_speed;
uint32_t speed;
uint32_t enable_hwlro;
char fw_version[32];
struct mac_address_format macaddr;
@ -881,9 +892,15 @@ typedef struct oce_softc {
uint16_t qnqid;
uint32_t pvid;
uint32_t max_vlans;
uint32_t bmc_filt_mask;
void *rdma_context;
uint32_t rdma_flags;
struct oce_softc *next;
} OCE_SOFTC, *POCE_SOFTC;
#define OCE_RDMA_FLAG_SUPPORTED 0x00000001
/**************************************************
@ -933,7 +950,7 @@ typedef struct oce_softc {
: (bus_space_write_1((sc)->devcfg_btag, \
(sc)->devcfg_bhandle,o,v)))
void oce_rx_flush_lro(struct oce_rq *rq);
/***********************************************************
* DMA memory functions
***********************************************************/
@ -983,6 +1000,9 @@ uint32_t oce_page_list(oce_ring_buffer_t *ring, struct phys_addr *pa_list);
* cleanup functions
***********************************************************/
void oce_stop_rx(POCE_SOFTC sc);
void oce_discard_rx_comp(struct oce_rq *rq, int num_frags);
void oce_rx_cq_clean(struct oce_rq *rq);
void oce_rx_cq_clean_hwlro(struct oce_rq *rq);
void oce_intr_free(POCE_SOFTC sc);
void oce_free_posted_rxbuf(struct oce_rq *rq);
#if defined(INET6) || defined(INET)
@ -1015,7 +1035,8 @@ int oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable);
int oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl);
int oce_get_link_status(POCE_SOFTC sc, struct link_status *link);
int oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
int oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
int oce_mbox_get_nic_stats_v1(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
int oce_mbox_get_nic_stats_v2(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
int oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
uint32_t reset_stats);
int oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
@ -1086,10 +1107,16 @@ int oce_refresh_nic_stats(POCE_SOFTC sc);
int oce_stats_init(POCE_SOFTC sc);
void oce_stats_free(POCE_SOFTC sc);
/* hw lro functions */
int oce_mbox_nic_query_lro_capabilities(POCE_SOFTC sc, uint32_t *lro_rq_cnt, uint32_t *lro_flags);
int oce_mbox_nic_set_iface_lro_config(POCE_SOFTC sc, int enable);
int oce_mbox_create_rq_v2(struct oce_rq *rq);
/* Capabilities */
#define OCE_MODCAP_RSS 1
#define OCE_MAX_RSP_HANDLED 64
extern uint32_t oce_max_rsp_handled; /* max responses */
extern uint32_t oce_rq_buf_size;
#define OCE_MAC_LOOPBACK 0x0
#define OCE_PHY_LOOPBACK 0x1
@ -1159,3 +1186,80 @@ static inline int MPU_EP_SEMAPHORE(POCE_SOFTC sc)
#define IS_QNQ_OR_UMC(sc) ((sc->pvid && (sc->function_mode & FNM_UMC_MODE ))\
|| (sc->qnqid && (sc->function_mode & FNM_FLEX10_MODE)))
struct oce_rdma_info;
extern struct oce_rdma_if *oce_rdma_if;
/* OS2BMC related */
#define DHCP_CLIENT_PORT 68
#define DHCP_SERVER_PORT 67
#define NET_BIOS_PORT1 137
#define NET_BIOS_PORT2 138
#define DHCPV6_RAS_PORT 547
#define BMC_FILT_BROADCAST_ARP ((uint32_t)(1))
#define BMC_FILT_BROADCAST_DHCP_CLIENT ((uint32_t)(1 << 1))
#define BMC_FILT_BROADCAST_DHCP_SERVER ((uint32_t)(1 << 2))
#define BMC_FILT_BROADCAST_NET_BIOS ((uint32_t)(1 << 3))
#define BMC_FILT_BROADCAST ((uint32_t)(1 << 4))
#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER ((uint32_t)(1 << 5))
#define BMC_FILT_MULTICAST_IPV6_RA ((uint32_t)(1 << 6))
#define BMC_FILT_MULTICAST_IPV6_RAS ((uint32_t)(1 << 7))
#define BMC_FILT_MULTICAST ((uint32_t)(1 << 8))
#define ND_ROUTER_ADVERT 134
#define ND_NEIGHBOR_ADVERT 136
#define is_mc_allowed_on_bmc(sc, eh) \
(!is_multicast_filt_enabled(sc) && \
ETHER_IS_MULTICAST(eh->ether_dhost) && \
!ETHER_IS_BROADCAST(eh->ether_dhost))
#define is_bc_allowed_on_bmc(sc, eh) \
(!is_broadcast_filt_enabled(sc) && \
ETHER_IS_BROADCAST(eh->ether_dhost))
#define is_arp_allowed_on_bmc(sc, et) \
(is_arp(et) && is_arp_filt_enabled(sc))
#define is_arp(et) (et == ETHERTYPE_ARP)
#define is_arp_filt_enabled(sc) \
(sc->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
#define is_dhcp_client_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
#define is_dhcp_srvr_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
#define is_nbios_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
#define is_ipv6_na_filt_enabled(sc) \
(sc->bmc_filt_mask & \
BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
#define is_ipv6_ra_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
#define is_ipv6_ras_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
#define is_broadcast_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_BROADCAST)
#define is_multicast_filt_enabled(sc) \
(sc->bmc_filt_mask & BMC_FILT_MULTICAST)
#define is_os2bmc_enabled(sc) (sc->flags & OCE_FLAGS_OS2BMC)
#define LRO_FLAGS_HASH_MODE 0x00000001
#define LRO_FLAGS_RSS_MODE 0x00000004
#define LRO_FLAGS_CLSC_IPV4 0x00000010
#define LRO_FLAGS_CLSC_IPV6 0x00000020
#define NIC_RQ_FLAGS_RSS 0x0001
#define NIC_RQ_FLAGS_LRO 0x0020

View File

@ -495,6 +495,10 @@ oce_get_fw_config(POCE_SOFTC sc)
sc->asic_revision = HOST_32(fwcmd->params.rsp.asic_revision);
sc->port_id = HOST_32(fwcmd->params.rsp.port_id);
sc->function_mode = HOST_32(fwcmd->params.rsp.function_mode);
if ((sc->function_mode & (ULP_NIC_MODE | ULP_RDMA_MODE)) ==
(ULP_NIC_MODE | ULP_RDMA_MODE)) {
sc->rdma_flags = OCE_RDMA_FLAG_SUPPORTED;
}
sc->function_caps = HOST_32(fwcmd->params.rsp.function_caps);
if (fwcmd->params.rsp.ulp[0].ulp_mode & ULP_NIC_MODE) {
@ -767,7 +771,7 @@ oce_rss_itbl_init(POCE_SOFTC sc, struct mbx_config_nic_rss *fwcmd)
/* fill log2 value indicating the size of the CPU table */
if (rc == 0)
fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(i));
fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(INDIRECTION_TABLE_ENTRIES));
return rc;
}
@ -808,9 +812,15 @@ oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss)
RSS_ENABLE_TCP_IPV4 |
RSS_ENABLE_IPV6 |
RSS_ENABLE_TCP_IPV6);
fwcmd->params.req.flush = OCE_FLUSH;
if(!sc->enable_hwlro)
fwcmd->params.req.flush = OCE_FLUSH;
else
fwcmd->params.req.flush = 0;
fwcmd->params.req.if_id = LE_32(if_id);
srandom(arc4random()); /* random entropy seed */
read_random(fwcmd->params.req.hash, sizeof(fwcmd->params.req.hash));
rc = oce_rss_itbl_init(sc, fwcmd);
@ -864,7 +874,7 @@ oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable)
req->iface_flags = MBX_RX_IFACE_FLAGS_PROMISCUOUS;
if (enable & 0x02)
req->iface_flags = MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
req->iface_flags |= MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
req->if_id = sc->if_id;
@ -968,105 +978,59 @@ oce_get_link_status(POCE_SOFTC sc, struct link_status *link)
}
int
oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
{
struct oce_mbx mbx;
struct mbx_get_nic_stats_v0 *fwcmd;
int rc = 0;
bzero(&mbx, sizeof(struct oce_mbx));
fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v0);
bzero(fwcmd, sizeof(struct mbx_get_nic_stats_v0));
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_NIC,
NIC_GET_STATS,
MBX_TIMEOUT_SEC,
sizeof(struct mbx_get_nic_stats_v0),
OCE_MBX_VER_V0);
mbx.u0.s.embedded = 0;
mbx.u0.s.sge_count = 1;
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats_v0);
mbx.payload_length = sizeof(struct mbx_get_nic_stats_v0);
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
if (!rc)
rc = fwcmd->hdr.u0.rsp.status;
if (rc)
device_printf(sc->dev,
"%s failed - cmd status: %d addi status: %d\n",
__FUNCTION__, rc,
fwcmd->hdr.u0.rsp.additional_status);
return rc;
}
/**
* @brief Function to get NIC statistics
* @param sc software handle to the device
* @param *stats pointer to where to store statistics
* @param reset_stats resets statistics of set
* @returns 0 on success, EIO on failure
* @note command depricated in Lancer
* @param sc software handle to the device
* @param *stats pointer to where to store statistics
* @param reset_stats resets statistics of set
* @returns 0 on success, EIO on failure
* @note command depricated in Lancer
*/
int
oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
{
struct oce_mbx mbx;
struct mbx_get_nic_stats *fwcmd;
int rc = 0;
bzero(&mbx, sizeof(struct oce_mbx));
fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats);
bzero(fwcmd, sizeof(struct mbx_get_nic_stats));
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_NIC,
NIC_GET_STATS,
MBX_TIMEOUT_SEC,
sizeof(struct mbx_get_nic_stats),
OCE_MBX_VER_V1);
mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
mbx.u0.s.sge_count = 1; /* using scatter gather instead */
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats);
mbx.payload_length = sizeof(struct mbx_get_nic_stats);
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
rc = oce_mbox_post(sc, &mbx, NULL);
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
if (!rc)
rc = fwcmd->hdr.u0.rsp.status;
if (rc)
device_printf(sc->dev,
"%s failed - cmd status: %d addi status: %d\n",
__FUNCTION__, rc,
fwcmd->hdr.u0.rsp.additional_status);
return rc;
#define OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, version) \
int \
oce_mbox_get_nic_stats_v##version(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem) \
{ \
struct oce_mbx mbx; \
struct mbx_get_nic_stats_v##version *fwcmd; \
int rc = 0; \
\
bzero(&mbx, sizeof(struct oce_mbx)); \
fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v##version); \
bzero(fwcmd, sizeof(*fwcmd)); \
\
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, \
MBX_SUBSYSTEM_NIC, \
NIC_GET_STATS, \
MBX_TIMEOUT_SEC, \
sizeof(*fwcmd), \
OCE_MBX_VER_V##version); \
\
mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */ \
mbx.u0.s.sge_count = 1; /* using scatter gather instead */ \
\
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); \
mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); \
mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); \
mbx.payload.u0.u1.sgl[0].length = sizeof(*fwcmd); \
mbx.payload_length = sizeof(*fwcmd); \
DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); \
\
rc = oce_mbox_post(sc, &mbx, NULL); \
oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); \
if (!rc) \
rc = fwcmd->hdr.u0.rsp.status; \
if (rc) \
device_printf(sc->dev, \
"%s failed - cmd status: %d addi status: %d\n", \
__FUNCTION__, rc, \
fwcmd->hdr.u0.rsp.additional_status); \
return rc; \
}
OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 0);
OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 1);
OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 2);
/**
* @brief Function to get pport (physical port) statistics
@ -2220,3 +2184,149 @@ oce_get_func_config(POCE_SOFTC sc)
return rc;
}
/* hw lro functions */
int
oce_mbox_nic_query_lro_capabilities(POCE_SOFTC sc, uint32_t *lro_rq_cnt, uint32_t *lro_flags)
{
struct oce_mbx mbx;
struct mbx_nic_query_lro_capabilities *fwcmd;
int rc = 0;
bzero(&mbx, sizeof(struct oce_mbx));
fwcmd = (struct mbx_nic_query_lro_capabilities *)&mbx.payload;
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_NIC,
0x20,MBX_TIMEOUT_SEC,
sizeof(struct mbx_nic_query_lro_capabilities),
OCE_MBX_VER_V0);
mbx.u0.s.embedded = 1;
mbx.payload_length = sizeof(struct mbx_nic_query_lro_capabilities);
rc = oce_mbox_post(sc, &mbx, NULL);
if (!rc)
rc = fwcmd->hdr.u0.rsp.status;
if (rc) {
device_printf(sc->dev,
"%s failed - cmd status: %d addi status: %d\n",
__FUNCTION__, rc,
fwcmd->hdr.u0.rsp.additional_status);
return rc;
}
if(lro_flags)
*lro_flags = HOST_32(fwcmd->params.rsp.lro_flags);
if(lro_rq_cnt)
*lro_rq_cnt = HOST_16(fwcmd->params.rsp.lro_rq_cnt);
return rc;
}
int
oce_mbox_nic_set_iface_lro_config(POCE_SOFTC sc, int enable)
{
struct oce_mbx mbx;
struct mbx_nic_set_iface_lro_config *fwcmd;
int rc = 0;
bzero(&mbx, sizeof(struct oce_mbx));
fwcmd = (struct mbx_nic_set_iface_lro_config *)&mbx.payload;
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_NIC,
0x26,MBX_TIMEOUT_SEC,
sizeof(struct mbx_nic_set_iface_lro_config),
OCE_MBX_VER_V0);
mbx.u0.s.embedded = 1;
mbx.payload_length = sizeof(struct mbx_nic_set_iface_lro_config);
fwcmd->params.req.iface_id = sc->if_id;
fwcmd->params.req.lro_flags = 0;
if(enable) {
fwcmd->params.req.lro_flags = LRO_FLAGS_HASH_MODE | LRO_FLAGS_RSS_MODE;
fwcmd->params.req.lro_flags |= LRO_FLAGS_CLSC_IPV4 | LRO_FLAGS_CLSC_IPV6;
fwcmd->params.req.max_clsc_byte_cnt = 64*1024; /* min = 2974, max = 0xfa59 */
fwcmd->params.req.max_clsc_seg_cnt = 43; /* min = 2, max = 64 */
fwcmd->params.req.max_clsc_usec_delay = 18; /* min = 1, max = 256 */
fwcmd->params.req.min_clsc_frame_byte_cnt = 0; /* min = 1, max = 9014 */
}
rc = oce_mbox_post(sc, &mbx, NULL);
if (!rc)
rc = fwcmd->hdr.u0.rsp.status;
if (rc) {
device_printf(sc->dev,
"%s failed - cmd status: %d addi status: %d\n",
__FUNCTION__, rc,
fwcmd->hdr.u0.rsp.additional_status);
return rc;
}
return rc;
}
int
oce_mbox_create_rq_v2(struct oce_rq *rq)
{
struct oce_mbx mbx;
struct mbx_create_nic_rq_v2 *fwcmd;
POCE_SOFTC sc = rq->parent;
int rc = 0, num_pages = 0;
if (rq->qstate == QCREATED)
return 0;
bzero(&mbx, sizeof(struct oce_mbx));
fwcmd = (struct mbx_create_nic_rq_v2 *)&mbx.payload;
mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
MBX_SUBSYSTEM_NIC,
0x08, MBX_TIMEOUT_SEC,
sizeof(struct mbx_create_nic_rq_v2),
OCE_MBX_VER_V2);
/* oce_page_list will also prepare pages */
num_pages = oce_page_list(rq->ring, &fwcmd->params.req.pages[0]);
fwcmd->params.req.cq_id = rq->cq->cq_id;
fwcmd->params.req.frag_size = rq->cfg.frag_size/2048;
fwcmd->params.req.num_pages = num_pages;
fwcmd->params.req.if_id = sc->if_id;
fwcmd->params.req.max_frame_size = rq->cfg.mtu;
fwcmd->params.req.page_size = 1;
if(rq->cfg.is_rss_queue) {
fwcmd->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
}else {
device_printf(sc->dev,
"non rss lro queue should not be created \n");
goto error;
}
mbx.u0.s.embedded = 1;
mbx.payload_length = sizeof(struct mbx_create_nic_rq_v2);
rc = oce_mbox_post(sc, &mbx, NULL);
if (!rc)
rc = fwcmd->hdr.u0.rsp.status;
if (rc) {
device_printf(sc->dev,
"%s failed - cmd status: %d addi status: %d\n",
__FUNCTION__, rc,
fwcmd->hdr.u0.rsp.additional_status);
goto error;
}
rq->rq_id = HOST_16(fwcmd->params.rsp.rq_id);
rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid;
error:
return rc;
}

View File

@ -66,7 +66,7 @@ static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
struct oce_eq *eq, uint32_t q_len);
static void oce_mq_free(struct oce_mq *mq);
static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
*mbx, size_t req_size, enum qtype qtype);
*mbx, size_t req_size, enum qtype qtype, int version);
struct oce_cq *oce_cq_create(POCE_SOFTC sc,
struct oce_eq *eq,
uint32_t q_len,
@ -120,9 +120,10 @@ oce_queue_init_all(POCE_SOFTC sc)
aic->min_eqd = OCE_MIN_EQD;
aic->et_eqd = OCE_MIN_EQD;
aic->enable = TRUE;
sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
EQE_SIZE_4,0, vector);
sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
0, vector);
if (!sc->eq[vector])
goto error;
}
@ -169,6 +170,10 @@ oce_queue_release_all(POCE_SOFTC sc)
struct oce_rq *rq;
struct oce_eq *eq;
/* before deleting lro queues, we have to disable hwlro */
if(sc->enable_hwlro)
oce_mbox_nic_set_iface_lro_config(sc, 0);
for_all_rq_queues(sc, rq, i) {
if (rq) {
oce_rq_del(sc->rq[i]);
@ -254,6 +259,7 @@ oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
LOCK_CREATE(&wq->tx_lock, "TX_lock");
LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
#if __FreeBSD_version >= 800000
/* Allocate buf ring for multiqueue*/
@ -304,6 +310,7 @@ oce_wq_free(struct oce_wq *wq)
buf_ring_free(wq->br, M_DEVBUF);
LOCK_DESTROY(&wq->tx_lock);
LOCK_DESTROY(&wq->tx_compl_lock);
free(wq, M_DEVBUF);
}
@ -374,7 +381,7 @@ oce_wq_del(struct oce_wq *wq)
fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
fwcmd->params.req.wq_id = wq->wq_id;
(void)oce_destroy_q(sc, &mbx,
sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
wq->qstate = QDELETED;
}
@ -422,20 +429,17 @@ oce_rq *oce_rq_init(POCE_SOFTC sc,
rq->cfg.eqd = 0;
rq->lro_pkts_queued = 0;
rq->cfg.is_rss_queue = rss;
rq->packets_in = 0;
rq->packets_out = 0;
rq->pending = 0;
rq->parent = (void *)sc;
rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
OCE_MAX_RX_SIZE,
1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
1, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
oce_rq_buf_size,
1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
if (rc)
goto free_rq;
@ -512,10 +516,10 @@ oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
POCE_SOFTC sc = rq->parent;
struct oce_cq *cq;
cq = oce_cq_create(sc,
eq,
CQ_LEN_1024,
sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
cq = oce_cq_create(sc, eq,
sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
if (!cq)
return ENXIO;
@ -548,14 +552,20 @@ oce_rq_del(struct oce_rq *rq)
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
struct oce_mbx mbx;
struct mbx_delete_nic_rq *fwcmd;
struct mbx_delete_nic_rq_v1 *fwcmd1;
if (rq->qstate == QCREATED) {
bzero(&mbx, sizeof(mbx));
fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
fwcmd->params.req.rq_id = rq->rq_id;
(void)oce_destroy_q(sc, &mbx,
sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
if(!rq->islro) {
fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
fwcmd->params.req.rq_id = rq->rq_id;
(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
}else {
fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
fwcmd1->params.req.rq_id = rq->rq_id;
fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
}
rq->qstate = QDELETED;
}
@ -632,7 +642,7 @@ oce_eq_del(struct oce_eq *eq)
fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
fwcmd->params.req.id = eq->eq_id;
(void)oce_destroy_q(sc, &mbx,
sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
}
if (eq->ring != NULL) {
@ -783,7 +793,7 @@ oce_mq_free(struct oce_mq *mq)
fwcmd->params.req.id = mq->mq_id;
(void) oce_destroy_q(sc, &mbx,
sizeof (struct mbx_destroy_common_mq),
QTYPE_MQ);
QTYPE_MQ, 0);
}
mq->qstate = QDELETED;
}
@ -810,7 +820,7 @@ oce_mq_free(struct oce_mq *mq)
*/
static int
oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
enum qtype qtype)
enum qtype qtype, int version)
{
struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
int opcode;
@ -844,7 +854,7 @@ oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
mbx_common_req_hdr_init(hdr, 0, 0, subsys,
opcode, MBX_TIMEOUT_SEC, req_size,
OCE_MBX_VER_V0);
version);
mbx->u0.s.embedded = 1;
mbx->payload_length = (uint32_t) req_size;
@ -932,7 +942,7 @@ oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
fwcmd->params.req.id = cq->cq_id;
(void)oce_destroy_q(sc, &mbx,
sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
/*NOW destroy the ring */
oce_destroy_ring_buffer(sc, cq->ring);
cq->ring = NULL;
@ -951,12 +961,17 @@ oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
int
oce_start_rq(struct oce_rq *rq)
{
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
int rc;
rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
if(sc->enable_hwlro)
rc = oce_alloc_rx_bufs(rq, 960);
else
rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
if (rc == 0)
oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
return rc;
}
@ -1148,7 +1163,7 @@ oce_free_posted_rxbuf(struct oce_rq *rq)
while (rq->pending) {
pd = &rq->pckts[rq->packets_out];
pd = &rq->pckts[rq->ring->cidx];
bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(rq->tag, pd->map);
if (pd->mbuf != NULL) {
@ -1156,44 +1171,179 @@ oce_free_posted_rxbuf(struct oce_rq *rq)
pd->mbuf = NULL;
}
if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
rq->packets_out = 0;
else
rq->packets_out++;
RING_GET(rq->ring,1);
rq->pending--;
}
}
void
oce_rx_cq_clean_hwlro(struct oce_rq *rq)
{
struct oce_cq *cq = rq->cq;
POCE_SOFTC sc = rq->parent;
struct nic_hwlro_singleton_cqe *cqe;
struct nic_hwlro_cqe_part2 *cqe2;
int flush_wait = 0;
int flush_compl = 0;
int num_frags = 0;
for (;;) {
bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
if(cqe->valid) {
if(cqe->cqe_type == 0) { /* singleton cqe */
/* we should not get singleton cqe after cqe1 on same rq */
if(rq->cqe_firstpart != NULL) {
device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
goto exit_rx_cq_clean_hwlro;
}
num_frags = cqe->pkt_size / rq->cfg.frag_size;
if(cqe->pkt_size % rq->cfg.frag_size)
num_frags++;
oce_discard_rx_comp(rq, num_frags);
/* Check if CQE is flush completion */
if(!cqe->pkt_size)
flush_compl = 1;
cqe->valid = 0;
RING_GET(cq->ring, 1);
}else if(cqe->cqe_type == 0x1) { /* first part */
/* we should not get cqe1 after cqe1 on same rq */
if(rq->cqe_firstpart != NULL) {
device_printf(sc->dev, "Got cqe1 after cqe1 \n");
goto exit_rx_cq_clean_hwlro;
}
rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
RING_GET(cq->ring, 1);
}else if(cqe->cqe_type == 0x2) { /* second part */
cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
/* We should not get cqe2 without cqe1 */
if(rq->cqe_firstpart == NULL) {
device_printf(sc->dev, "Got cqe2 without cqe1 \n");
goto exit_rx_cq_clean_hwlro;
}
num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
if(cqe2->coalesced_size % rq->cfg.frag_size)
num_frags++;
/* Flush completion will always come in singleton CQE */
oce_discard_rx_comp(rq, num_frags);
rq->cqe_firstpart->valid = 0;
cqe2->valid = 0;
rq->cqe_firstpart = NULL;
RING_GET(cq->ring, 1);
}
oce_arm_cq(sc, cq->cq_id, 1, FALSE);
if(flush_compl)
break;
}else {
if (flush_wait++ > 100) {
device_printf(sc->dev, "did not receive hwlro flush compl\n");
break;
}
oce_arm_cq(sc, cq->cq_id, 0, TRUE);
DELAY(1000);
}
}
/* After cleanup, leave the CQ in unarmed state */
oce_arm_cq(sc, cq->cq_id, 0, FALSE);
exit_rx_cq_clean_hwlro:
return;
}
void
oce_rx_cq_clean(struct oce_rq *rq)
{
struct oce_nic_rx_cqe *cqe;
struct oce_cq *cq;
POCE_SOFTC sc;
int flush_wait = 0;
int flush_compl = 0;
sc = rq->parent;
cq = rq->cq;
for (;;) {
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
if(RQ_CQE_VALID(cqe)) {
DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
/* Check if CQE is flush completion */
if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
flush_compl = 1;
RQ_CQE_INVALIDATE(cqe);
RING_GET(cq->ring, 1);
#if defined(INET6) || defined(INET)
if (IF_LRO_ENABLED(sc))
oce_rx_flush_lro(rq);
#endif
oce_arm_cq(sc, cq->cq_id, 1, FALSE);
if(flush_compl)
break;
}else {
if (flush_wait++ > 100) {
device_printf(sc->dev, "did not receive flush compl\n");
break;
}
oce_arm_cq(sc, cq->cq_id, 0, TRUE);
DELAY(1000);
}
}
/* After cleanup, leave the CQ in unarmed state */
oce_arm_cq(sc, cq->cq_id, 0, FALSE);
}
void
oce_stop_rx(POCE_SOFTC sc)
{
struct oce_mbx mbx;
struct mbx_delete_nic_rq *fwcmd;
struct oce_rq *rq;
int i = 0;
struct oce_mbx mbx;
struct mbx_delete_nic_rq *fwcmd;
struct mbx_delete_nic_rq_v1 *fwcmd1;
struct oce_rq *rq;
int i = 0;
/* before deleting disable hwlro */
if(sc->enable_hwlro)
oce_mbox_nic_set_iface_lro_config(sc, 0);
for_all_rq_queues(sc, rq, i) {
if (rq->qstate == QCREATED) {
/* Delete rxq in firmware */
for_all_rq_queues(sc, rq, i) {
if (rq->qstate == QCREATED) {
/* Delete rxq in firmware */
LOCK(&rq->rx_lock);
bzero(&mbx, sizeof(mbx));
fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
fwcmd->params.req.rq_id = rq->rq_id;
bzero(&mbx, sizeof(mbx));
if(!rq->islro) {
fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
fwcmd->params.req.rq_id = rq->rq_id;
(void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
}else {
fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
fwcmd1->params.req.rq_id = rq->rq_id;
fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
(void)oce_destroy_q(sc, &mbx,
sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
(void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
}
rq->qstate = QDELETED;
rq->qstate = QDELETED;
DELAY(1);
/* Free posted RX buffers that are not used */
oce_free_posted_rxbuf(rq);
DELAY(1000);
}
}
if(!rq->islro)
oce_rx_cq_clean(rq);
else
oce_rx_cq_clean_hwlro(rq);
/* Free posted RX buffers that are not used */
oce_free_posted_rxbuf(rq);
UNLOCK(&rq->rx_lock);
}
}
}
@ -1207,16 +1357,28 @@ oce_start_rx(POCE_SOFTC sc)
for_all_rq_queues(sc, rq, i) {
if (rq->qstate == QCREATED)
continue;
rc = oce_mbox_create_rq(rq);
if((i == 0) || (!sc->enable_hwlro)) {
rc = oce_mbox_create_rq(rq);
if (rc)
goto error;
rq->islro = 0;
}else {
rc = oce_mbox_create_rq_v2(rq);
if (rc)
goto error;
rq->islro = 1;
}
/* reset queue pointers */
rq->qstate = QCREATED;
rq->pending = 0;
rq->ring->cidx = 0;
rq->ring->pidx = 0;
}
if(sc->enable_hwlro) {
rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
if (rc)
goto error;
/* reset queue pointers */
rq->qstate = QCREATED;
rq->pending = 0;
rq->ring->cidx = 0;
rq->ring->pidx = 0;
rq->packets_in = 0;
rq->packets_out = 0;
}
DELAY(1);
@ -1229,6 +1391,7 @@ oce_start_rx(POCE_SOFTC sc)
}
DELAY(1);
return rc;
error:
device_printf(sc->dev, "Start RX failed\n");

View File

@ -43,6 +43,7 @@
static void copy_stats_to_sc_xe201(POCE_SOFTC sc);
static void copy_stats_to_sc_be3(POCE_SOFTC sc);
static void copy_stats_to_sc_be2(POCE_SOFTC sc);
static void copy_stats_to_sc_sh(POCE_SOFTC sc);
static int oce_sysctl_loopback(SYSCTL_HANDLER_ARGS);
static int oce_sys_aic_enable(SYSCTL_HANDLER_ARGS);
static int oce_be3_fwupgrade(POCE_SOFTC sc, const struct firmware *fw);
@ -182,6 +183,8 @@ oce_sys_aic_enable(SYSCTL_HANDLER_ARGS)
POCE_SOFTC sc = (struct oce_softc *)arg1;
struct oce_aic_obj *aic;
/* set current value for proper sysctl logging */
value = sc->aic_obj[0].enable;
status = sysctl_handle_int(oidp, &value, 0, req);
if (status || !req->newptr)
return status;
@ -482,34 +485,34 @@ oce_sh_be3_flashdata(POCE_SOFTC sc, const struct firmware *fw, int32_t num_imgs)
return rc;
}
#define UFI_TYPE2 2
#define UFI_TYPE3 3
#define UFI_TYPE3R 10
#define UFI_TYPE4 4
#define UFI_TYPE4R 11
#define UFI_TYPE2 2
#define UFI_TYPE3 3
#define UFI_TYPE3R 10
#define UFI_TYPE4 4
#define UFI_TYPE4R 11
static int oce_get_ufi_type(POCE_SOFTC sc,
const struct flash_file_hdr *fhdr)
const struct flash_file_hdr *fhdr)
{
if (fhdr == NULL)
goto be_get_ufi_exit;
if (fhdr == NULL)
goto be_get_ufi_exit;
if (IS_SH(sc) && fhdr->build[0] == '4') {
if (fhdr->asic_type_rev >= 0x10)
return UFI_TYPE4R;
else
return UFI_TYPE4;
} else if (IS_BE3(sc) && fhdr->build[0] == '3') {
if (fhdr->asic_type_rev == 0x10)
return UFI_TYPE3R;
else
return UFI_TYPE3;
} else if (IS_BE2(sc) && fhdr->build[0] == '2')
return UFI_TYPE2;
if (IS_SH(sc) && fhdr->build[0] == '4') {
if (fhdr->asic_type_rev >= 0x10)
return UFI_TYPE4R;
else
return UFI_TYPE4;
} else if (IS_BE3(sc) && fhdr->build[0] == '3') {
if (fhdr->asic_type_rev == 0x10)
return UFI_TYPE3R;
else
return UFI_TYPE3;
} else if (IS_BE2(sc) && fhdr->build[0] == '2')
return UFI_TYPE2;
be_get_ufi_exit:
device_printf(sc->dev,
"UFI and Interface are not compatible for flashing\n");
return -1;
device_printf(sc->dev,
"UFI and Interface are not compatible for flashing\n");
return -1;
}
@ -777,7 +780,11 @@ oce_add_stats_sysctls_be3(POCE_SOFTC sc,
SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rxcp_err",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0,
"Received Completion Errors");
if(IS_SH(sc)) {
SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_drops_no_frags",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_drops_no_frags, 0,
"num of packet drops due to no fragments");
}
}
rx_stats_node = SYSCTL_ADD_NODE(ctx,
@ -1372,10 +1379,10 @@ copy_stats_to_sc_be3(POCE_SOFTC sc)
struct oce_pmem_stats *pmem;
struct oce_rxf_stats_v1 *rxf_stats;
struct oce_port_rxf_stats_v1 *port_stats;
struct mbx_get_nic_stats *nic_mbx;
struct mbx_get_nic_stats_v1 *nic_mbx;
uint32_t port = sc->port_id;
nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats);
nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v1);
pmem = &nic_mbx->params.rsp.stats.pmem;
rxf_stats = &nic_mbx->params.rsp.stats.rxf;
port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
@ -1429,18 +1436,91 @@ copy_stats_to_sc_be3(POCE_SOFTC sc)
adapter_stats->eth_red_drops = pmem->eth_red_drops;
}
static void
copy_stats_to_sc_sh(POCE_SOFTC sc)
{
struct oce_be_stats *adapter_stats;
struct oce_pmem_stats *pmem;
struct oce_rxf_stats_v2 *rxf_stats;
struct oce_port_rxf_stats_v2 *port_stats;
struct mbx_get_nic_stats_v2 *nic_mbx;
struct oce_erx_stats_v2 *erx_stats;
uint32_t port = sc->port_id;
nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v2);
pmem = &nic_mbx->params.rsp.stats.pmem;
rxf_stats = &nic_mbx->params.rsp.stats.rxf;
erx_stats = &nic_mbx->params.rsp.stats.erx;
port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
adapter_stats = &sc->oce_stats_info.u0.be;
/* Update stats */
adapter_stats->pmem_fifo_overflow_drop =
port_stats->pmem_fifo_overflow_drop;
adapter_stats->rx_priority_pause_frames =
port_stats->rx_priority_pause_frames;
adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
adapter_stats->rx_control_frames = port_stats->rx_control_frames;
adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long;
adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
adapter_stats->rx_dropped_tcp_length =
port_stats->rx_dropped_tcp_length;
adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors;
adapter_stats->rx_dropped_header_too_small =
port_stats->rx_dropped_header_too_small;
adapter_stats->rx_input_fifo_overflow_drop =
port_stats->rx_input_fifo_overflow_drop;
adapter_stats->rx_address_match_errors =
port_stats->rx_address_match_errors;
adapter_stats->rx_alignment_symbol_errors =
port_stats->rx_alignment_symbol_errors;
adapter_stats->rxpp_fifo_overflow_drop =
port_stats->rxpp_fifo_overflow_drop;
adapter_stats->tx_pauseframes = port_stats->tx_pauseframes;
adapter_stats->tx_controlframes = port_stats->tx_controlframes;
adapter_stats->jabber_events = port_stats->jabber_events;
adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
adapter_stats->forwarded_packets = rxf_stats->forwarded_packets;
adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu;
adapter_stats->rx_drops_no_tpre_descr =
rxf_stats->rx_drops_no_tpre_descr;
adapter_stats->rx_drops_too_many_frags =
rxf_stats->rx_drops_too_many_frags;
adapter_stats->eth_red_drops = pmem->eth_red_drops;
/* populate erx stats */
for (int i = 0; i < sc->nrqs; i++)
sc->rq[i]->rx_stats.rx_drops_no_frags = erx_stats->rx_drops_no_fragments[sc->rq[i]->rq_id];
}
int
oce_stats_init(POCE_SOFTC sc)
{
int rc = 0, sz;
if (IS_BE(sc) || IS_SH(sc)) {
if (sc->flags & OCE_FLAGS_BE2)
sz = sizeof(struct mbx_get_nic_stats_v0);
else
sz = sizeof(struct mbx_get_nic_stats);
} else
int rc = 0, sz = 0;
if( IS_BE2(sc) )
sz = sizeof(struct mbx_get_nic_stats_v0);
else if( IS_BE3(sc) )
sz = sizeof(struct mbx_get_nic_stats_v1);
else if( IS_SH(sc))
sz = sizeof(struct mbx_get_nic_stats_v2);
else if( IS_XE201(sc) )
sz = sizeof(struct mbx_get_pport_stats);
rc = oce_dma_alloc(sc, sz, &sc->stats_mem, 0);
@ -1463,23 +1543,24 @@ oce_refresh_nic_stats(POCE_SOFTC sc)
{
int rc = 0, reset = 0;
if (IS_BE(sc) || IS_SH(sc)) {
if (sc->flags & OCE_FLAGS_BE2) {
rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem);
if (!rc)
copy_stats_to_sc_be2(sc);
} else {
rc = oce_mbox_get_nic_stats(sc, &sc->stats_mem);
if (!rc)
copy_stats_to_sc_be3(sc);
}
} else {
if( IS_BE2(sc) ) {
rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem);
if (!rc)
copy_stats_to_sc_be2(sc);
}else if( IS_BE3(sc) ) {
rc = oce_mbox_get_nic_stats_v1(sc, &sc->stats_mem);
if (!rc)
copy_stats_to_sc_be3(sc);
}else if( IS_SH(sc)) {
rc = oce_mbox_get_nic_stats_v2(sc, &sc->stats_mem);
if (!rc)
copy_stats_to_sc_sh(sc);
}else if( IS_XE201(sc) ){
rc = oce_mbox_get_pport_stats(sc, &sc->stats_mem, reset);
if (!rc)
copy_stats_to_sc_xe201(sc);
}
return rc;
}