Introduce initial support for Cavium's ThunderX networking interface

- The driver consists of three main componens: PF, VF, BGX
- Requires appropriate entries in DTS and MDIO driver
- Supports only FDT configuration
- Multiple Tx queues and single Rx queue supported
- No RSS, HW checksum and TSO support
- No more than 8 queues per-IF (only one Queue Set per IF)
- HW statistics enabled
- Works in all available MAC modes (1,10,20,40G)
- Style converted to BSD according to style(9)
- The code brings lmac_if interface used by the BGX driver to
  update its logical MACs state.

Obtained from: Semihalf
Sponsored by:  The FreeBSD Foundation
This commit is contained in:
Zbigniew Bodek 2015-10-18 22:02:58 +00:00
parent 3c0086b813
commit 2306b72a5d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=289551
13 changed files with 5046 additions and 4363 deletions

View File

@ -68,6 +68,12 @@ dev/psci/psci_arm64.S optional psci
dev/uart/uart_cpu_fdt.c optional uart fdt
dev/uart/uart_dev_pl011.c optional uart pl011
dev/usb/controller/dwc_otg_hisi.c optional dwcotg soc_hisi_hi6220
dev/vnic/nic_main.c optional vnic pci
dev/vnic/nicvf_main.c optional vnic pci pci_iov
dev/vnic/nicvf_queues.c optional vnic pci pci_iov
dev/vnic/thunder_bgx_fdt.c optional vnic fdt
dev/vnic/thunder_bgx.c optional vnic pci
dev/vnic/lmac_if.m optional vnic
kern/kern_clocksource.c standard
kern/subr_dummy_vdso_tc.c standard
libkern/bcmp.c standard

102
sys/dev/vnic/lmac_if.m Normal file
View File

@ -0,0 +1,102 @@
#-
# Copyright (c) 2015 The FreeBSD Foundation
# All rights reserved.
#
# This software was developed by Semihalf under
# the sponsorship of the FreeBSD Foundation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
# LMAC (BGX controller) interface description
#
INTERFACE lmac;
CODE {
static int null_lmac_media_status(device_t dev, int lmacid, int *link,
int *duplex, int *speed)
{
return (ENXIO);
}
static int null_lmac_media_change(device_t dev, int lmacid, int link,
int duplex, int speed)
{
return (ENXIO);
}
static int null_lmac_phy_connect(device_t dev, int lmacid, int phy)
{
return (ENXIO);
}
static int null_lmac_phy_disconnect(device_t dev, int lmacid, int phy)
{
return (ENXIO);
}
};
# Get link status
#
# 0 : Success
#
METHOD int media_status {
device_t dev;
int lmacid;
int * link;
int * duplex;
int * speed;
} DEFAULT null_lmac_media_status;
# Change link status
#
# 0 : Success
#
METHOD int media_change {
device_t dev;
int lmacid;
int link;
int duplex;
int speed;
} DEFAULT null_lmac_media_change;
# Connect PHY
#
# 0 : Success
#
METHOD int phy_connect {
device_t dev;
int lmacid;
int phy;
} DEFAULT null_lmac_phy_connect;
# Disconnect PHY
#
# 0 : Success
#
METHOD int phy_disconnect {
device_t dev;
int lmacid;
int phy;
} DEFAULT null_lmac_phy_disconnect;

View File

@ -30,11 +30,8 @@
#ifndef NIC_H
#define NIC_H
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include "thunder_bgx.h"
/* PCI vendor ID */
#define PCI_VENDOR_ID_CAVIUM 0x177D
/* PCI device IDs */
#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
@ -53,12 +50,15 @@
#define NIC_TNS_MODE 1
/* NIC priv flags */
#define NIC_SRIOV_ENABLED BIT(0)
#define NIC_TNS_ENABLED BIT(1)
#define NIC_SRIOV_ENABLED (1 << 0)
#define NIC_TNS_ENABLED (1 << 1)
/* ARM64TODO */
#if 0
/* VNIC HW optimiation features */
#define VNIC_RSS_SUPPORT
#define VNIC_MULTI_QSET_SUPPORT
#endif
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
@ -67,7 +67,8 @@
/* Max pkinds */
#define NIC_MAX_PKIND 16
/* Rx Channels */
/*
* Rx Channels */
/* Receive channel configuration in TNS bypass mode
* Below is configuration in TNS bypass mode
* BGX0-LMAC0-CHAN0 - VNIC CHAN0
@ -83,7 +84,7 @@
#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
@ -122,27 +123,33 @@
#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
#define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT)
#define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT)
#define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT)
#define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT)
/* MSI-X interrupts */
#define NIC_PF_MSIX_VECTORS 10
#define NIC_VF_MSIX_VECTORS 20
#define NIC_PF_INTR_ID_ECC0_SBE 0
#define NIC_PF_INTR_ID_ECC0_DBE 1
#define NIC_PF_INTR_ID_ECC1_SBE 2
#define NIC_PF_INTR_ID_ECC1_DBE 3
#define NIC_PF_INTR_ID_ECC2_SBE 4
#define NIC_PF_INTR_ID_ECC2_DBE 5
#define NIC_PF_INTR_ID_ECC3_SBE 6
#define NIC_PF_INTR_ID_ECC3_DBE 7
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
#define NIC_PF_INTR_ID_ECC0_SBE 0
#define NIC_PF_INTR_ID_ECC0_DBE 1
#define NIC_PF_INTR_ID_ECC1_SBE 2
#define NIC_PF_INTR_ID_ECC1_DBE 3
#define NIC_PF_INTR_ID_ECC2_SBE 4
#define NIC_PF_INTR_ID_ECC2_DBE 5
#define NIC_PF_INTR_ID_ECC3_SBE 6
#define NIC_PF_INTR_ID_ECC3_DBE 7
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
/* Global timer for CQ timer thresh interrupts
struct msix_entry {
struct resource * irq_res;
void * handle;
};
/*
* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
*
@ -151,7 +158,8 @@
*/
#define NICPF_CLK_PER_INT_TICK 2
/* Time to wait before we decide that a SQ is stuck.
/*
* Time to wait before we decide that a SQ is stuck.
*
* Since both pkt rx and tx notifications are done with same CQ,
* when packets are being received at very high rate (eg: L2 forwarding)
@ -160,36 +168,10 @@
*/
#define NICVF_TX_TIMEOUT (50 * HZ)
struct nicvf_cq_poll {
struct nicvf *nicvf;
u8 cq_idx; /* Completion queue index */
struct napi_struct napi;
};
#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
#ifdef VNIC_RSS_SUPPORT
struct nicvf_rss_info {
bool enable;
#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
#define RSS_IP_HASH_ENA BIT(1)
#define RSS_TCP_HASH_ENA BIT(2)
#define RSS_TCP_SYN_DIS BIT(3)
#define RSS_UDP_HASH_ENA BIT(4)
#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
#define RSS_ROCE_ENA BIT(6)
#define RSS_L3_BI_DIRECTION_ENA BIT(7)
#define RSS_L4_BI_DIRECTION_ENA BIT(8)
u64 cfg;
u8 hash_bits;
u16 rss_size;
u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
u64 key[RSS_HASH_KEY_SIZE];
} ____cacheline_aligned_in_smp;
#endif
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
enum rx_stats_reg_offset {
RX_OCTS = 0x0,
@ -219,132 +201,124 @@ enum tx_stats_reg_offset {
};
struct nicvf_hw_stats {
u64 rx_bytes;
u64 rx_ucast_frames;
u64 rx_bcast_frames;
u64 rx_mcast_frames;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
u64 rx_drop_overrun_bytes;
u64 rx_drop_bcast;
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
u64 rx_bgx_errs;
u64 rx_prel2_errs;
u64 rx_l2_hdr_malformed;
u64 rx_oversize;
u64 rx_undersize;
u64 rx_l2_len_mismatch;
u64 rx_l2_pclp;
u64 rx_ip_ver_errs;
u64 rx_ip_csum_errs;
u64 rx_ip_hdr_malformed;
u64 rx_ip_payload_malformed;
u64 rx_ip_ttl_errs;
u64 rx_l3_pclp;
u64 rx_l4_malformed;
u64 rx_l4_csum_errs;
u64 rx_udp_len_errs;
u64 rx_l4_port_errs;
u64 rx_tcp_flag_errs;
u64 rx_tcp_offset_errs;
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
uint64_t rx_bytes;
uint64_t rx_ucast_frames;
uint64_t rx_bcast_frames;
uint64_t rx_mcast_frames;
uint64_t rx_fcs_errors;
uint64_t rx_l2_errors;
uint64_t rx_drop_red;
uint64_t rx_drop_red_bytes;
uint64_t rx_drop_overrun;
uint64_t rx_drop_overrun_bytes;
uint64_t rx_drop_bcast;
uint64_t rx_drop_mcast;
uint64_t rx_drop_l3_bcast;
uint64_t rx_drop_l3_mcast;
uint64_t rx_bgx_truncated_pkts;
uint64_t rx_jabber_errs;
uint64_t rx_fcs_errs;
uint64_t rx_bgx_errs;
uint64_t rx_prel2_errs;
uint64_t rx_l2_hdr_malformed;
uint64_t rx_oversize;
uint64_t rx_undersize;
uint64_t rx_l2_len_mismatch;
uint64_t rx_l2_pclp;
uint64_t rx_ip_ver_errs;
uint64_t rx_ip_csum_errs;
uint64_t rx_ip_hdr_malformed;
uint64_t rx_ip_payload_malformed;
uint64_t rx_ip_ttl_errs;
uint64_t rx_l3_pclp;
uint64_t rx_l4_malformed;
uint64_t rx_l4_csum_errs;
uint64_t rx_udp_len_errs;
uint64_t rx_l4_port_errs;
uint64_t rx_tcp_flag_errs;
uint64_t rx_tcp_offset_errs;
uint64_t rx_l4_pclp;
uint64_t rx_truncated_pkts;
u64 tx_bytes_ok;
u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok;
u64 tx_mcast_frames_ok;
u64 tx_drops;
uint64_t tx_bytes_ok;
uint64_t tx_ucast_frames_ok;
uint64_t tx_bcast_frames_ok;
uint64_t tx_mcast_frames_ok;
uint64_t tx_drops;
};
struct nicvf_drv_stats {
/* Rx */
u64 rx_frames_ok;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
uint64_t rx_frames_ok;
uint64_t rx_frames_64;
uint64_t rx_frames_127;
uint64_t rx_frames_255;
uint64_t rx_frames_511;
uint64_t rx_frames_1023;
uint64_t rx_frames_1518;
uint64_t rx_frames_jumbo;
uint64_t rx_drops;
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_tso;
u64 txq_stop;
u64 txq_wake;
uint64_t tx_frames_ok;
uint64_t tx_drops;
uint64_t tx_tso;
uint64_t txq_stop;
uint64_t txq_wake;
};
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
struct pci_dev *pdev;
u8 vf_id;
u8 node;
bool tns_mode:1;
bool sqs_mode:1;
bool loopback_supported:1;
u16 mtu;
struct queue_set *qs;
#ifdef VNIC_MULTI_QSET_SUPPORT
#define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11
u8 sqs_id;
u8 sqs_count; /* Secondary Qset count */
struct nicvf *snicvf[MAX_SQS_PER_VF];
#endif
u8 rx_queues;
u8 tx_queues;
u8 max_queues;
void __iomem *reg_base;
bool link_up;
u8 duplex;
u32 speed;
struct page *rb_page;
u32 rb_page_offset;
bool rb_alloc_fail;
bool rb_work_scheduled;
struct delayed_work rbdr_work;
struct tasklet_struct rbdr_task;
struct tasklet_struct qs_err_task;
struct tasklet_struct cq_task;
struct nicvf_cq_poll *napi[8];
#ifdef VNIC_RSS_SUPPORT
struct nicvf_rss_info rss_info;
#endif
u8 cpi_alg;
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
device_t dev;
u32 msg_enable;
struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct ifnet * ifp;
struct sx core_sx;
struct ifmedia if_media;
uint32_t if_flags;
uint8_t hwaddr[ETHER_ADDR_LEN];
uint8_t vf_id;
uint8_t node;
boolean_t tns_mode:1;
boolean_t sqs_mode:1;
bool loopback_supported:1;
uint16_t mtu;
struct queue_set *qs;
uint8_t rx_queues;
uint8_t tx_queues;
uint8_t max_queues;
struct resource *reg_base;
boolean_t link_up;
uint8_t duplex;
uint32_t speed;
uint8_t cpi_alg;
/* Interrupt coalescing settings */
uint32_t cq_coalesce_usecs;
uint32_t msg_enable;
struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats;
struct work_struct reset_task;
/* Interface statistics */
struct callout stats_callout;
struct mtx stats_mtx;
/* MSI-X */
bool msix_enabled;
u8 num_vec;
boolean_t msix_enabled;
uint8_t num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
struct resource * msix_table_res;
char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
boolean_t irq_allocated[NIC_VF_MSIX_VECTORS];
/* VF <-> PF mailbox communication */
bool pf_acked;
bool pf_nacked;
} ____cacheline_aligned_in_smp;
boolean_t pf_acked;
boolean_t pf_nacked;
} __aligned(CACHE_LINE_SIZE);
/* PF <--> VF Mailbox communication
/*
* PF <--> VF Mailbox communication
* Eight 64bit registers are shared between PF and VF.
* Separate set for each VF.
* Writing '1' into last register mbx7 means end of message.
@ -381,123 +355,108 @@ struct nicvf {
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
struct nic_cfg_msg {
u8 msg;
u8 vf_id;
u8 node_id;
bool tns_mode:1;
bool sqs_mode:1;
bool loopback_supported:1;
u8 mac_addr[ETH_ALEN];
uint8_t msg;
uint8_t vf_id;
uint8_t node_id;
boolean_t tns_mode:1;
boolean_t sqs_mode:1;
boolean_t loopback_supported:1;
uint8_t mac_addr[ETHER_ADDR_LEN];
};
/* Qset configuration */
struct qs_cfg_msg {
u8 msg;
u8 num;
u8 sqs_count;
u64 cfg;
uint8_t msg;
uint8_t num;
uint8_t sqs_count;
uint64_t cfg;
};
/* Receive queue configuration */
struct rq_cfg_msg {
u8 msg;
u8 qs_num;
u8 rq_num;
u64 cfg;
uint8_t msg;
uint8_t qs_num;
uint8_t rq_num;
uint64_t cfg;
};
/* Send queue configuration */
struct sq_cfg_msg {
u8 msg;
u8 qs_num;
u8 sq_num;
bool sqs_mode;
u64 cfg;
uint8_t msg;
uint8_t qs_num;
uint8_t sq_num;
boolean_t sqs_mode;
uint64_t cfg;
};
/* Set VF's MAC address */
struct set_mac_msg {
u8 msg;
u8 vf_id;
u8 mac_addr[ETH_ALEN];
uint8_t msg;
uint8_t vf_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
};
/* Set Maximum frame size */
struct set_frs_msg {
u8 msg;
u8 vf_id;
u16 max_frs;
uint8_t msg;
uint8_t vf_id;
uint16_t max_frs;
};
/* Set CPI algorithm type */
struct cpi_cfg_msg {
u8 msg;
u8 vf_id;
u8 rq_cnt;
u8 cpi_alg;
uint8_t msg;
uint8_t vf_id;
uint8_t rq_cnt;
uint8_t cpi_alg;
};
/* Get RSS table size */
struct rss_sz_msg {
u8 msg;
u8 vf_id;
u16 ind_tbl_size;
uint8_t msg;
uint8_t vf_id;
uint16_t ind_tbl_size;
};
/* Set RSS configuration */
struct rss_cfg_msg {
u8 msg;
u8 vf_id;
u8 hash_bits;
u8 tbl_len;
u8 tbl_offset;
#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
uint8_t msg;
uint8_t vf_id;
uint8_t hash_bits;
uint8_t tbl_len;
uint8_t tbl_offset;
#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
uint8_t ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
};
struct bgx_stats_msg {
u8 msg;
u8 vf_id;
u8 rx;
u8 idx;
u64 stats;
uint8_t msg;
uint8_t vf_id;
uint8_t rx;
uint8_t idx;
uint64_t stats;
};
/* Physical interface link status */
struct bgx_link_status {
u8 msg;
u8 link_up;
u8 duplex;
u32 speed;
uint8_t msg;
uint8_t link_up;
uint8_t duplex;
uint32_t speed;
};
#ifdef VNIC_MULTI_QSET_SUPPORT
/* Get Extra Qset IDs */
struct sqs_alloc {
u8 msg;
u8 vf_id;
u8 qs_count;
};
struct nicvf_ptr {
u8 msg;
u8 vf_id;
bool sqs_mode;
u8 sqs_id;
u64 nicvf;
};
#endif
/* Set interface in loopback mode */
struct set_loopback {
u8 msg;
u8 vf_id;
bool enable;
uint8_t msg;
uint8_t vf_id;
boolean_t enable;
};
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
struct {
uint8_t msg;
} msg;
struct nic_cfg_msg nic_cfg;
struct qs_cfg_msg qs;
struct rq_cfg_msg rq;
@ -507,33 +466,23 @@ union nic_mbx {
struct cpi_cfg_msg cpi_cfg;
struct rss_sz_msg rss_size;
struct rss_cfg_msg rss_cfg;
struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status;
#ifdef VNIC_MULTI_QSET_SUPPORT
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
#endif
struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status;
struct set_loopback lbk;
};
#define NIC_NODE_ID_MASK 0x03
#define NIC_NODE_ID_SHIFT 44
#define NIC_NODE_ID_MASK 0x03
#define NIC_NODE_ID_SHIFT 44
static inline int nic_get_node_id(struct pci_dev *pdev)
static __inline int
nic_get_node_id(struct resource *res)
{
u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
pci_addr_t addr;
addr = rman_get_start(res);
return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
}
int nicvf_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
int nicvf_open(struct net_device *netdev);
int nicvf_stop(struct net_device *netdev);
int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
void nicvf_config_rss(struct nicvf *nic);
void nicvf_set_rss_key(struct nicvf *nic);
void nicvf_set_ethtool_ops(struct net_device *netdev);
void nicvf_update_stats(struct nicvf *nic);
void nicvf_update_lmac_stats(struct nicvf *nic);
#endif /* NIC_H */

File diff suppressed because it is too large Load Diff

View File

@ -30,205 +30,195 @@
#ifndef NIC_REG_H
#define NIC_REG_H
#define NIC_PF_REG_COUNT 29573
#define NIC_VF_REG_COUNT 249
#define NIC_PF_REG_COUNT 29573
#define NIC_VF_REG_COUNT 249
/* Physical function register offsets */
#define NIC_PF_CFG (0x0000)
#define NIC_PF_STATUS (0x0010)
#define NIC_PF_INTR_TIMER_CFG (0x0030)
#define NIC_PF_BIST_STATUS (0x0040)
#define NIC_PF_SOFT_RESET (0x0050)
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
#define NIC_PF_CQM_CF (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
#define NIC_PF_RRM_AVG_CFG (0x00C8)
#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
#define NIC_PF_MAILBOX_INT (0x0410)
#define NIC_PF_MAILBOX_INT_W1S (0x0430)
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
#define NIC_PF_ECC2_FLIP0 (0x1010)
#define NIC_PF_ECC3_FLIP0 (0x1018)
#define NIC_PF_ECC0_FLIP1 (0x1080)
#define NIC_PF_ECC1_FLIP1 (0x1088)
#define NIC_PF_ECC2_FLIP1 (0x1090)
#define NIC_PF_ECC3_FLIP1 (0x1098)
#define NIC_PF_ECC0_CDIS (0x1100)
#define NIC_PF_ECC1_CDIS (0x1108)
#define NIC_PF_ECC2_CDIS (0x1110)
#define NIC_PF_ECC3_CDIS (0x1118)
#define NIC_PF_BIST0_STATUS (0x1280)
#define NIC_PF_BIST1_STATUS (0x1288)
#define NIC_PF_BIST2_STATUS (0x1290)
#define NIC_PF_BIST3_STATUS (0x1298)
#define NIC_PF_ECC0_SBE_INT (0x2000)
#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
#define NIC_PF_ECC0_DBE_INT (0x2100)
#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
#define NIC_PF_ECC1_SBE_INT (0x2200)
#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
#define NIC_PF_ECC1_DBE_INT (0x2300)
#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
#define NIC_PF_ECC2_SBE_INT (0x2400)
#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
#define NIC_PF_ECC2_DBE_INT (0x2500)
#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
#define NIC_PF_ECC3_SBE_INT (0x2600)
#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
#define NIC_PF_ECC3_DBE_INT (0x2700)
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
#define NIC_PF_SW_SYNC_RX (0x490000)
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
#define NIC_PF_TL3_0_255_CHAN (0x620000)
#define NIC_PF_TL3_0_255_PIR (0x640000)
#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
#define NIC_PF_TL4_0_1023_CFG (0x800000)
#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
#define NIC_PF_QSET_0_127_CFG (0x20010000)
#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
#define NIC_PF_CFG (0x0000)
#define NIC_PF_STATUS (0x0010)
#define NIC_PF_INTR_TIMER_CFG (0x0030)
#define NIC_PF_BIST_STATUS (0x0040)
#define NIC_PF_SOFT_RESET (0x0050)
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
#define NIC_PF_CQM_CF (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
#define NIC_PF_RRM_AVG_CFG (0x00C8)
#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
#define NIC_PF_MAILBOX_INT (0x0410)
#define NIC_PF_MAILBOX_INT_W1S (0x0430)
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
#define NIC_PF_ECC2_FLIP0 (0x1010)
#define NIC_PF_ECC3_FLIP0 (0x1018)
#define NIC_PF_ECC0_FLIP1 (0x1080)
#define NIC_PF_ECC1_FLIP1 (0x1088)
#define NIC_PF_ECC2_FLIP1 (0x1090)
#define NIC_PF_ECC3_FLIP1 (0x1098)
#define NIC_PF_ECC0_CDIS (0x1100)
#define NIC_PF_ECC1_CDIS (0x1108)
#define NIC_PF_ECC2_CDIS (0x1110)
#define NIC_PF_ECC3_CDIS (0x1118)
#define NIC_PF_BIST0_STATUS (0x1280)
#define NIC_PF_BIST1_STATUS (0x1288)
#define NIC_PF_BIST2_STATUS (0x1290)
#define NIC_PF_BIST3_STATUS (0x1298)
#define NIC_PF_ECC0_SBE_INT (0x2000)
#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
#define NIC_PF_ECC0_DBE_INT (0x2100)
#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
#define NIC_PF_ECC1_SBE_INT (0x2200)
#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
#define NIC_PF_ECC1_DBE_INT (0x2300)
#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
#define NIC_PF_ECC2_SBE_INT (0x2400)
#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
#define NIC_PF_ECC2_DBE_INT (0x2500)
#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
#define NIC_PF_ECC3_SBE_INT (0x2600)
#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
#define NIC_PF_ECC3_DBE_INT (0x2700)
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
#define NIC_PF_SW_SYNC_RX (0x490000)
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
#define NIC_PF_TL3_0_255_CHAN (0x620000)
#define NIC_PF_TL3_0_255_PIR (0x640000)
#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
#define NIC_PF_TL4_0_1023_CFG (0x800000)
#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
#define NIC_PF_QSET_0_127_CFG (0x20010000)
#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
#define NIC_PF_MSIX_PBA_0 (0x0F0000)
#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
#define NIC_PF_MSIX_PBA_0 (0x0F0000)
/* Virtual function register offsets */
#define NIC_VNIC_CFG (0x000020)
#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
#define NIC_VF_INT (0x000200)
#define NIC_VF_INT_W1S (0x000220)
#define NIC_VF_ENA_W1C (0x000240)
#define NIC_VF_ENA_W1S (0x000260)
#define NIC_VNIC_CFG (0x000020)
#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
#define NIC_VF_INT (0x000200)
#define NIC_VF_INT_W1S (0x000220)
#define NIC_VF_ENA_W1C (0x000240)
#define NIC_VF_ENA_W1S (0x000260)
#define NIC_VNIC_RSS_CFG (0x0020E0)
#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
#define NIC_VNIC_TX_STAT_0_4 (0x004000)
#define NIC_VNIC_RX_STAT_0_13 (0x004100)
#define NIC_QSET_RQ_GEN_CFG (0x010010)
#define NIC_VNIC_RSS_CFG (0x0020E0)
#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
#define NIC_VNIC_TX_STAT_0_4 (0x004000)
#define NIC_VNIC_RX_STAT_0_13 (0x004100)
#define NIC_QSET_RQ_GEN_CFG (0x010010)
#define NIC_QSET_CQ_0_7_CFG (0x010400)
#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
#define NIC_QSET_CQ_0_7_THRESH (0x010410)
#define NIC_QSET_CQ_0_7_BASE (0x010420)
#define NIC_QSET_CQ_0_7_HEAD (0x010428)
#define NIC_QSET_CQ_0_7_TAIL (0x010430)
#define NIC_QSET_CQ_0_7_DOOR (0x010438)
#define NIC_QSET_CQ_0_7_STATUS (0x010440)
#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
#define NIC_QSET_CQ_0_7_CFG (0x010400)
#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
#define NIC_QSET_CQ_0_7_THRESH (0x010410)
#define NIC_QSET_CQ_0_7_BASE (0x010420)
#define NIC_QSET_CQ_0_7_HEAD (0x010428)
#define NIC_QSET_CQ_0_7_TAIL (0x010430)
#define NIC_QSET_CQ_0_7_DOOR (0x010438)
#define NIC_QSET_CQ_0_7_STATUS (0x010440)
#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
#define NIC_QSET_RQ_0_7_CFG (0x010600)
#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
#define NIC_QSET_RQ_0_7_CFG (0x010600)
#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
#define NIC_QSET_SQ_0_7_CFG (0x010800)
#define NIC_QSET_SQ_0_7_THRESH (0x010810)
#define NIC_QSET_SQ_0_7_BASE (0x010820)
#define NIC_QSET_SQ_0_7_HEAD (0x010828)
#define NIC_QSET_SQ_0_7_TAIL (0x010830)
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_SQ_0_7_CFG (0x010800)
#define NIC_QSET_SQ_0_7_THRESH (0x010810)
#define NIC_QSET_SQ_0_7_BASE (0x010820)
#define NIC_QSET_SQ_0_7_HEAD (0x010828)
#define NIC_QSET_SQ_0_7_TAIL (0x010830)
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
#define NIC_VF_MSIX_PBA (0x0F0000)
#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
#define NIC_VF_MSIX_PBA (0x0F0000)
/* Offsets within registers */
#define NIC_MSIX_VEC_SHIFT 4
#define NIC_Q_NUM_SHIFT 18
#define NIC_QS_ID_SHIFT 21
#define NIC_VF_NUM_SHIFT 21
#define NIC_MSIX_VEC_SHIFT 4
#define NIC_Q_NUM_SHIFT 18
#define NIC_QS_ID_SHIFT 21
#define NIC_VF_NUM_SHIFT 21
/* Port kind configuration register */
struct pkind_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_42_63:22;
u64 hdr_sl:5; /* Header skip length */
u64 rx_hdr:3; /* TNS Receive header present */
u64 lenerr_en:1;/* L2 length error check enable */
u64 reserved_32_32:1;
u64 maxlen:16; /* Max frame size */
u64 minlen:16; /* Min frame size */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 minlen:16;
u64 maxlen:16;
u64 reserved_32_32:1;
u64 lenerr_en:1;
u64 rx_hdr:3;
u64 hdr_sl:5;
u64 reserved_42_63:22;
#endif
uint64_t minlen:16;
uint64_t maxlen:16;
uint64_t reserved_32_32:1;
uint64_t lenerr_en:1;
uint64_t rx_hdr:3;
uint64_t hdr_sl:5;
uint64_t reserved_42_63:22;
};
#endif /* NIC_REG_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -28,16 +28,15 @@
*/
#ifndef NICVF_QUEUES_H
#define NICVF_QUEUES_H
#define NICVF_QUEUES_H
#include <linux/netdevice.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
#define MAX_RCV_QUEUES_PER_QS 8
#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
#define MAX_SND_QUEUES_PER_QS 8
#define MAX_CMP_QUEUES_PER_QS 8
#define MAX_QUEUE_SET 128
#define MAX_RCV_QUEUES_PER_QS 8
#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
#define MAX_SND_QUEUES_PER_QS 8
#define MAX_CMP_QUEUES_PER_QS 8
/* VF's queue interrupt ranges */
#define NICVF_INTR_ID_CQ 0
@ -47,100 +46,103 @@
#define NICVF_INTR_ID_QS_ERR 19
#define for_each_cq_irq(irq) \
for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
for ((irq) = NICVF_INTR_ID_CQ; (irq) < NICVF_INTR_ID_SQ; (irq)++)
#define for_each_sq_irq(irq) \
for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
for ((irq) = NICVF_INTR_ID_SQ; (irq) < NICVF_INTR_ID_RBDR; (irq)++)
#define for_each_rbdr_irq(irq) \
for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
for ((irq) = NICVF_INTR_ID_RBDR; (irq) < NICVF_INTR_ID_MISC; (irq)++)
#define RBDR_SIZE0 0ULL /* 8K entries */
#define RBDR_SIZE1 1ULL /* 16K entries */
#define RBDR_SIZE2 2ULL /* 32K entries */
#define RBDR_SIZE3 3ULL /* 64K entries */
#define RBDR_SIZE4 4ULL /* 126K entries */
#define RBDR_SIZE5 5ULL /* 256K entries */
#define RBDR_SIZE6 6ULL /* 512K entries */
#define RBDR_SIZE0 0UL /* 8K entries */
#define RBDR_SIZE1 1UL /* 16K entries */
#define RBDR_SIZE2 2UL /* 32K entries */
#define RBDR_SIZE3 3UL /* 64K entries */
#define RBDR_SIZE4 4UL /* 126K entries */
#define RBDR_SIZE5 5UL /* 256K entries */
#define RBDR_SIZE6 6UL /* 512K entries */
#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
#define SND_QUEUE_SIZE0 0UL /* 1K entries */
#define SND_QUEUE_SIZE1 1UL /* 2K entries */
#define SND_QUEUE_SIZE2 2UL /* 4K entries */
#define SND_QUEUE_SIZE3 3UL /* 8K entries */
#define SND_QUEUE_SIZE4 4UL /* 16K entries */
#define SND_QUEUE_SIZE5 5UL /* 32K entries */
#define SND_QUEUE_SIZE6 6UL /* 64K entries */
#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
#define CMP_QUEUE_SIZE0 0UL /* 1K entries */
#define CMP_QUEUE_SIZE1 1UL /* 2K entries */
#define CMP_QUEUE_SIZE2 2UL /* 4K entries */
#define CMP_QUEUE_SIZE3 3UL /* 8K entries */
#define CMP_QUEUE_SIZE4 4UL /* 16K entries */
#define CMP_QUEUE_SIZE5 5UL /* 32K entries */
#define CMP_QUEUE_SIZE6 6UL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */
#define RBDR_CNT 1
#define RCV_QUEUE_CNT 8
#define SND_QUEUE_CNT 8
#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
#define RBDR_CNT 1
#define RCV_QUEUE_CNT 8
#define SND_QUEUE_CNT 8
#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1UL << (SND_QSIZE + 10))
#define MAX_SND_QUEUE_LEN (1UL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2UL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
/* Since timestamp not enabled, otherwise 2 */
#define MAX_CQE_PER_PKT_XMIT 1
#define MAX_CQE_PER_PKT_XMIT 1
/* Keep CQ and SQ sizes same, if timestamping
/*
* Keep CQ and SQ sizes same, if timestamping
* is enabled this equation will change.
*/
#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1UL << (CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
(NICVF_RCV_BUF_ALIGN_BYTES * 2))
#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1UL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1UL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
#define MAX_CQES_FOR_TX \
((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * MAX_CQE_PER_PKT_XMIT)
/* Calculate number of CQEs to reserve for all SQEs.
* Its 1/256th level of CQ size.
* '+ 1' to account for pipelining
*/
#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
(CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
#define RQ_CQ_DROP \
((256 / (CMP_QUEUE_LEN / (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
/* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE 16
#define CMP_QUEUE_DESC_SIZE 512
#define SND_QUEUE_DESC_SIZE 16
#define CMP_QUEUE_DESC_SIZE 512
/* Buffer / descriptor alignments */
#define NICVF_RCV_BUF_ALIGN 7
#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
#define NICVF_RCV_BUF_ALIGN 7
#define NICVF_RCV_BUF_ALIGN_BYTES (1UL << NICVF_RCV_BUF_ALIGN)
#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
#define NICVF_ALIGNED_ADDR(addr, align_bytes) \
roundup2((addr), (align_bytes))
#define NICVF_ADDR_ALIGN_LEN(addr, bytes) \
(NICVF_ALIGNED_ADDR((addr), (bytes)) - (bytes))
#define NICVF_RCV_BUF_ALIGN_LEN(addr) \
(NICVF_ALIGNED_ADDR((addr), NICVF_RCV_BUF_ALIGN_BYTES) - (addr))
#define NICVF_TXBUF_MAXSIZE 9212 /* Total max payload without TSO */
#define NICVF_TXBUF_NSEGS 256 /* Single command is at most 256 buffers
(hdr + 255 subcmds) */
#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
(NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
#define NICVF_RCV_BUF_ALIGN_LEN(X)\
(NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
/* Queue enable/disable */
#define NICVF_SQ_EN BIT_ULL(19)
#define NICVF_SQ_EN (1UL << 19)
/* Queue reset */
#define NICVF_CQ_RESET BIT_ULL(41)
#define NICVF_SQ_RESET BIT_ULL(17)
#define NICVF_RBDR_RESET BIT_ULL(43)
#define NICVF_CQ_RESET (1UL << 41)
#define NICVF_SQ_RESET (1UL << 17)
#define NICVF_RBDR_RESET (1UL << 43)
enum CQ_RX_ERRLVL_E {
CQ_ERRLVL_MAC,
@ -203,22 +205,22 @@ enum CQ_TX_ERROP_E {
struct cmp_queue_stats {
struct tx_stats {
u64 good;
u64 desc_fault;
u64 hdr_cons_err;
u64 subdesc_err;
u64 imm_size_oflow;
u64 data_seq_err;
u64 mem_seq_err;
u64 lock_viol;
u64 data_fault;
u64 tstmp_conflict;
u64 tstmp_timeout;
u64 mem_fault;
u64 csum_overlap;
u64 csum_overflow;
uint64_t good;
uint64_t desc_fault;
uint64_t hdr_cons_err;
uint64_t subdesc_err;
uint64_t imm_size_oflow;
uint64_t data_seq_err;
uint64_t mem_seq_err;
uint64_t lock_viol;
uint64_t data_fault;
uint64_t tstmp_conflict;
uint64_t tstmp_timeout;
uint64_t mem_fault;
uint64_t csum_overlap;
uint64_t csum_overflow;
} tx;
} ____cacheline_aligned_in_smp;
} __aligned(CACHE_LINE_SIZE);
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
@ -226,141 +228,173 @@ enum RQ_SQ_STATS {
};
struct rx_tx_queue_stats {
u64 bytes;
u64 pkts;
} ____cacheline_aligned_in_smp;
uint64_t bytes;
uint64_t pkts;
} __aligned(CACHE_LINE_SIZE);
struct q_desc_mem {
dma_addr_t dma;
u64 size;
u16 q_len;
dma_addr_t phys_base;
bus_dma_tag_t dmat;
bus_dmamap_t dmap;
void *base;
void *unalign_base;
bus_addr_t phys_base;
uint64_t size;
uint16_t q_len;
};
struct rbdr {
bool enable;
u32 dma_size;
u32 frag_len;
u32 thresh; /* Threshold level for interrupt */
void *desc;
u32 head;
u32 tail;
struct q_desc_mem dmem;
} ____cacheline_aligned_in_smp;
boolean_t enable;
uint32_t dma_size;
uint32_t frag_len;
uint32_t thresh; /* Threshold level for interrupt */
void *desc;
uint32_t head;
uint32_t tail;
struct q_desc_mem dmem;
struct nicvf *nic;
int idx;
struct task rbdr_task;
struct task rbdr_task_nowait;
struct taskqueue *rbdr_taskq;
bus_dma_tag_t rbdr_buff_dmat;
bus_dmamap_t *rbdr_buff_dmaps;
} __aligned(CACHE_LINE_SIZE);
struct rcv_queue {
bool enable;
boolean_t enable;
struct rbdr *rbdr_start;
struct rbdr *rbdr_cont;
bool en_tcp_reassembly;
u8 cq_qs; /* CQ's QS to which this RQ is assigned */
u8 cq_idx; /* CQ index (0 to 7) in the QS */
u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
u8 start_rbdr_qs; /* First buffer ptrs - QS num */
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
boolean_t en_tcp_reassembly;
uint8_t cq_qs; /* CQ's QS to which this RQ is assigned */
uint8_t cq_idx; /* CQ index (0 to 7) in the QS */
uint8_t cont_rbdr_qs; /* Continue buffer ptrs - QS num */
uint8_t cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
uint8_t start_rbdr_qs; /* First buffer ptrs - QS num */
uint8_t start_qs_rbdr_idx; /* RBDR idx in the above QS */
uint8_t caching;
struct rx_tx_queue_stats stats;
} ____cacheline_aligned_in_smp;
} __aligned(CACHE_LINE_SIZE);
struct cmp_queue {
bool enable;
u16 thresh;
spinlock_t lock; /* lock to serialize processing CQEs */
void *desc;
struct q_desc_mem dmem;
boolean_t enable;
uint16_t thresh;
struct nicvf *nic;
int idx; /* This queue index */
struct buf_ring *rx_br; /* Reception buf ring */
struct mtx mtx; /* lock to serialize processing CQEs */
char mtx_name[32];
struct task cmp_task;
struct taskqueue *cmp_taskq;
void *desc;
struct q_desc_mem dmem;
struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp;
int irq;
} __aligned(CACHE_LINE_SIZE);
struct snd_buff {
bus_dmamap_t dmap;
struct mbuf *mbuf;
};
struct snd_queue {
bool enable;
u8 cq_qs; /* CQ's QS to which this SQ is pointing */
u8 cq_idx; /* CQ index (0 to 7) in the above QS */
u16 thresh;
atomic_t free_cnt;
u32 head;
u32 tail;
u64 *skbuff;
void *desc;
boolean_t enable;
uint8_t cq_qs; /* CQ's QS to which this SQ is pointing */
uint8_t cq_idx; /* CQ index (0 to 7) in the above QS */
uint16_t thresh;
volatile int free_cnt;
uint32_t head;
uint32_t tail;
uint64_t *skbuff;
void *desc;
struct q_desc_mem dmem;
struct nicvf *nic;
int idx; /* This queue index */
bus_dma_tag_t snd_buff_dmat;
struct snd_buff *snd_buff;
struct buf_ring *br; /* Transmission buf ring */
struct mtx mtx;
char mtx_name[32];
struct task snd_task;
struct taskqueue *snd_taskq;
struct q_desc_mem dmem;
struct rx_tx_queue_stats stats;
} ____cacheline_aligned_in_smp;
} __aligned(CACHE_LINE_SIZE);
struct queue_set {
bool enable;
bool be_en;
u8 vnic_id;
u8 rq_cnt;
u8 cq_cnt;
u64 cq_len;
u8 sq_cnt;
u64 sq_len;
u8 rbdr_cnt;
u64 rbdr_len;
boolean_t enable;
boolean_t be_en;
uint8_t vnic_id;
uint8_t rq_cnt;
uint8_t cq_cnt;
uint64_t cq_len;
uint8_t sq_cnt;
uint64_t sq_len;
uint8_t rbdr_cnt;
uint64_t rbdr_len;
struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
} ____cacheline_aligned_in_smp;
#define GET_RBDR_DESC(RING, idx)\
(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
#define GET_SQ_DESC(RING, idx)\
(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
#define GET_CQ_DESC(RING, idx)\
(&(((union cq_desc_t *)((RING)->desc))[idx]))
struct task qs_err_task;
struct taskqueue *qs_err_taskq;
} __aligned(CACHE_LINE_SIZE);
#define GET_RBDR_DESC(RING, idx) \
(&(((struct rbdr_entry_t *)((RING)->desc))[(idx)]))
#define GET_SQ_DESC(RING, idx) \
(&(((struct sq_hdr_subdesc *)((RING)->desc))[(idx)]))
#define GET_CQ_DESC(RING, idx) \
(&(((union cq_desc_t *)((RING)->desc))[(idx)]))
/* CQ status bits */
#define CQ_WR_FULL BIT(26)
#define CQ_WR_DISABLE BIT(25)
#define CQ_WR_FAULT BIT(24)
#define CQ_WR_FUL (1UL << 26)
#define CQ_WR_DISABLE (1UL << 25)
#define CQ_WR_FAULT (1UL << 24)
#define CQ_CQE_COUNT (0xFFFF << 0)
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
#define CQ_ERR_MASK (CQ_WR_FUL | CQ_WR_DISABLE | CQ_WR_FAULT)
void nicvf_config_vlan_stripping(struct nicvf *nic,
netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable);
void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable);
#define NICVF_TX_LOCK(sq) mtx_lock(&(sq)->mtx)
#define NICVF_TX_TRYLOCK(sq) mtx_trylock(&(sq)->mtx)
#define NICVF_TX_UNLOCK(sq) mtx_unlock(&(sq)->mtx)
#define NICVF_TX_LOCK_ASSERT(sq) mtx_assert(&(sq)->mtx, MA_OWNED)
void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
void nicvf_sq_disable(struct nicvf *nic, int qidx);
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
void nicvf_sq_free_used_descs(struct net_device *netdev,
struct snd_queue *sq, int qidx);
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
#define NICVF_CMP_LOCK(cq) mtx_lock(&(cq)->mtx)
#define NICVF_CMP_UNLOCK(cq) mtx_unlock(&(cq)->mtx)
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
void nicvf_rbdr_task(unsigned long data);
void nicvf_rbdr_work(struct work_struct *work);
int nicvf_set_qset_resources(struct nicvf *);
int nicvf_config_data_transfer(struct nicvf *, boolean_t);
void nicvf_qset_config(struct nicvf *, boolean_t);
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
void nicvf_enable_intr(struct nicvf *, int, int);
void nicvf_disable_intr(struct nicvf *, int, int);
void nicvf_clear_intr(struct nicvf *, int, int);
int nicvf_is_intr_enabled(struct nicvf *, int, int);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
u64 offset, u64 qidx);
void nicvf_reg_write(struct nicvf *, uint64_t, uint64_t);
uint64_t nicvf_reg_read(struct nicvf *, uint64_t);
void nicvf_qset_reg_write(struct nicvf *, uint64_t, uint64_t);
uint64_t nicvf_qset_reg_read(struct nicvf *, uint64_t);
void nicvf_queue_reg_write(struct nicvf *, uint64_t, uint64_t, uint64_t);
uint64_t nicvf_queue_reg_read(struct nicvf *, uint64_t, uint64_t);
/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
void nicvf_update_rq_stats(struct nicvf *, int);
void nicvf_update_sq_stats(struct nicvf *, int);
int nicvf_check_cqe_rx_errs(struct nicvf *, struct cmp_queue *,
struct cqe_rx_t *);
int nicvf_check_cqe_tx_errs(struct nicvf *,struct cmp_queue *,
struct cqe_send_t *);
#endif /* NICVF_QUEUES_H */

View File

@ -30,6 +30,8 @@
#ifndef Q_STRUCT_H
#define Q_STRUCT_H
#define __LITTLE_ENDIAN_BITFIELD
/* Load transaction types for reading segment bytes specified by
* NIC_SEND_GATHER_S[LD_TYPE].
*/
@ -191,185 +193,185 @@ enum cqe_rx_err_opcode {
struct cqe_rx_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 stdn_fault:1;
u64 rsvd0:1;
u64 rq_qs:7;
u64 rq_idx:3;
u64 rsvd1:12;
u64 rss_alg:4;
u64 rsvd2:4;
u64 rb_cnt:4;
u64 vlan_found:1;
u64 vlan_stripped:1;
u64 vlan2_found:1;
u64 vlan2_stripped:1;
u64 l4_type:4;
u64 l3_type:4;
u64 l2_present:1;
u64 err_level:3;
u64 err_opcode:8;
uint64_t cqe_type:4; /* W0 */
uint64_t stdn_fault:1;
uint64_t rsvd0:1;
uint64_t rq_qs:7;
uint64_t rq_idx:3;
uint64_t rsvd1:12;
uint64_t rss_alg:4;
uint64_t rsvd2:4;
uint64_t rb_cnt:4;
uint64_t vlan_found:1;
uint64_t vlan_stripped:1;
uint64_t vlan2_found:1;
uint64_t vlan2_stripped:1;
uint64_t l4_type:4;
uint64_t l3_type:4;
uint64_t l2_present:1;
uint64_t err_level:3;
uint64_t err_opcode:8;
u64 pkt_len:16; /* W1 */
u64 l2_ptr:8;
u64 l3_ptr:8;
u64 l4_ptr:8;
u64 cq_pkt_len:8;
u64 align_pad:3;
u64 rsvd3:1;
u64 chan:12;
uint64_t pkt_len:16; /* W1 */
uint64_t l2_ptr:8;
uint64_t l3_ptr:8;
uint64_t l4_ptr:8;
uint64_t cq_pkt_len:8;
uint64_t align_pad:3;
uint64_t rsvd3:1;
uint64_t chan:12;
u64 rss_tag:32; /* W2 */
u64 vlan_tci:16;
u64 vlan_ptr:8;
u64 vlan2_ptr:8;
uint64_t rss_tag:32; /* W2 */
uint64_t vlan_tci:16;
uint64_t vlan_ptr:8;
uint64_t vlan2_ptr:8;
u64 rb3_sz:16; /* W3 */
u64 rb2_sz:16;
u64 rb1_sz:16;
u64 rb0_sz:16;
uint64_t rb3_sz:16; /* W3 */
uint64_t rb2_sz:16;
uint64_t rb1_sz:16;
uint64_t rb0_sz:16;
u64 rb7_sz:16; /* W4 */
u64 rb6_sz:16;
u64 rb5_sz:16;
u64 rb4_sz:16;
uint64_t rb7_sz:16; /* W4 */
uint64_t rb6_sz:16;
uint64_t rb5_sz:16;
uint64_t rb4_sz:16;
u64 rb11_sz:16; /* W5 */
u64 rb10_sz:16;
u64 rb9_sz:16;
u64 rb8_sz:16;
uint64_t rb11_sz:16; /* W5 */
uint64_t rb10_sz:16;
uint64_t rb9_sz:16;
uint64_t rb8_sz:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 err_opcode:8;
u64 err_level:3;
u64 l2_present:1;
u64 l3_type:4;
u64 l4_type:4;
u64 vlan2_stripped:1;
u64 vlan2_found:1;
u64 vlan_stripped:1;
u64 vlan_found:1;
u64 rb_cnt:4;
u64 rsvd2:4;
u64 rss_alg:4;
u64 rsvd1:12;
u64 rq_idx:3;
u64 rq_qs:7;
u64 rsvd0:1;
u64 stdn_fault:1;
u64 cqe_type:4; /* W0 */
u64 chan:12;
u64 rsvd3:1;
u64 align_pad:3;
u64 cq_pkt_len:8;
u64 l4_ptr:8;
u64 l3_ptr:8;
u64 l2_ptr:8;
u64 pkt_len:16; /* W1 */
u64 vlan2_ptr:8;
u64 vlan_ptr:8;
u64 vlan_tci:16;
u64 rss_tag:32; /* W2 */
u64 rb0_sz:16;
u64 rb1_sz:16;
u64 rb2_sz:16;
u64 rb3_sz:16; /* W3 */
u64 rb4_sz:16;
u64 rb5_sz:16;
u64 rb6_sz:16;
u64 rb7_sz:16; /* W4 */
u64 rb8_sz:16;
u64 rb9_sz:16;
u64 rb10_sz:16;
u64 rb11_sz:16; /* W5 */
uint64_t err_opcode:8;
uint64_t err_level:3;
uint64_t l2_present:1;
uint64_t l3_type:4;
uint64_t l4_type:4;
uint64_t vlan2_stripped:1;
uint64_t vlan2_found:1;
uint64_t vlan_stripped:1;
uint64_t vlan_found:1;
uint64_t rb_cnt:4;
uint64_t rsvd2:4;
uint64_t rss_alg:4;
uint64_t rsvd1:12;
uint64_t rq_idx:3;
uint64_t rq_qs:7;
uint64_t rsvd0:1;
uint64_t stdn_fault:1;
uint64_t cqe_type:4; /* W0 */
uint64_t chan:12;
uint64_t rsvd3:1;
uint64_t align_pad:3;
uint64_t cq_pkt_len:8;
uint64_t l4_ptr:8;
uint64_t l3_ptr:8;
uint64_t l2_ptr:8;
uint64_t pkt_len:16; /* W1 */
uint64_t vlan2_ptr:8;
uint64_t vlan_ptr:8;
uint64_t vlan_tci:16;
uint64_t rss_tag:32; /* W2 */
uint64_t rb0_sz:16;
uint64_t rb1_sz:16;
uint64_t rb2_sz:16;
uint64_t rb3_sz:16; /* W3 */
uint64_t rb4_sz:16;
uint64_t rb5_sz:16;
uint64_t rb6_sz:16;
uint64_t rb7_sz:16; /* W4 */
uint64_t rb8_sz:16;
uint64_t rb9_sz:16;
uint64_t rb10_sz:16;
uint64_t rb11_sz:16; /* W5 */
#endif
u64 rb0_ptr:64;
u64 rb1_ptr:64;
u64 rb2_ptr:64;
u64 rb3_ptr:64;
u64 rb4_ptr:64;
u64 rb5_ptr:64;
u64 rb6_ptr:64;
u64 rb7_ptr:64;
u64 rb8_ptr:64;
u64 rb9_ptr:64;
u64 rb10_ptr:64;
u64 rb11_ptr:64;
uint64_t rb0_ptr:64;
uint64_t rb1_ptr:64;
uint64_t rb2_ptr:64;
uint64_t rb3_ptr:64;
uint64_t rb4_ptr:64;
uint64_t rb5_ptr:64;
uint64_t rb6_ptr:64;
uint64_t rb7_ptr:64;
uint64_t rb8_ptr:64;
uint64_t rb9_ptr:64;
uint64_t rb10_ptr:64;
uint64_t rb11_ptr:64;
};
struct cqe_rx_tcp_err_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:60;
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:60;
u64 rsvd1:4; /* W1 */
u64 partial_first:1;
u64 rsvd2:27;
u64 rbdr_bytes:8;
u64 rsvd3:24;
uint64_t rsvd1:4; /* W1 */
uint64_t partial_first:1;
uint64_t rsvd2:27;
uint64_t rbdr_bytes:8;
uint64_t rsvd3:24;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 rsvd0:60;
u64 cqe_type:4;
uint64_t rsvd0:60;
uint64_t cqe_type:4;
u64 rsvd3:24;
u64 rbdr_bytes:8;
u64 rsvd2:27;
u64 partial_first:1;
u64 rsvd1:4;
uint64_t rsvd3:24;
uint64_t rbdr_bytes:8;
uint64_t rsvd2:27;
uint64_t partial_first:1;
uint64_t rsvd1:4;
#endif
};
struct cqe_rx_tcp_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:52;
u64 cq_tcp_status:8;
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:52;
uint64_t cq_tcp_status:8;
u64 rsvd1:32; /* W1 */
u64 tcp_cntx_bytes:8;
u64 rsvd2:8;
u64 tcp_err_bytes:16;
uint64_t rsvd1:32; /* W1 */
uint64_t tcp_cntx_bytes:8;
uint64_t rsvd2:8;
uint64_t tcp_err_bytes:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 cq_tcp_status:8;
u64 rsvd0:52;
u64 cqe_type:4; /* W0 */
uint64_t cq_tcp_status:8;
uint64_t rsvd0:52;
uint64_t cqe_type:4; /* W0 */
u64 tcp_err_bytes:16;
u64 rsvd2:8;
u64 tcp_cntx_bytes:8;
u64 rsvd1:32; /* W1 */
uint64_t tcp_err_bytes:16;
uint64_t rsvd2:8;
uint64_t tcp_cntx_bytes:8;
uint64_t rsvd1:32; /* W1 */
#endif
};
struct cqe_send_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:4;
u64 sqe_ptr:16;
u64 rsvd1:4;
u64 rsvd2:10;
u64 sq_qs:7;
u64 sq_idx:3;
u64 rsvd3:8;
u64 send_status:8;
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:4;
uint64_t sqe_ptr:16;
uint64_t rsvd1:4;
uint64_t rsvd2:10;
uint64_t sq_qs:7;
uint64_t sq_idx:3;
uint64_t rsvd3:8;
uint64_t send_status:8;
u64 ptp_timestamp:64; /* W1 */
uint64_t ptp_timestamp:64; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 send_status:8;
u64 rsvd3:8;
u64 sq_idx:3;
u64 sq_qs:7;
u64 rsvd2:10;
u64 rsvd1:4;
u64 sqe_ptr:16;
u64 rsvd0:4;
u64 cqe_type:4; /* W0 */
uint64_t send_status:8;
uint64_t rsvd3:8;
uint64_t sq_idx:3;
uint64_t sq_qs:7;
uint64_t rsvd2:10;
uint64_t rsvd1:4;
uint64_t sqe_ptr:16;
uint64_t rsvd0:4;
uint64_t cqe_type:4; /* W0 */
u64 ptp_timestamp:64; /* W1 */
uint64_t ptp_timestamp:64; /* W1 */
#endif
};
union cq_desc_t {
u64 u[64];
uint64_t u[64];
struct cqe_send_t snd_hdr;
struct cqe_rx_t rx_hdr;
struct cqe_rx_tcp_t rx_tcp_hdr;
@ -378,54 +380,54 @@ union cq_desc_t {
struct rbdr_entry_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd0:15;
u64 buf_addr:42;
u64 cache_align:7;
uint64_t rsvd0:15;
uint64_t buf_addr:42;
uint64_t cache_align:7;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 cache_align:7;
u64 buf_addr:42;
u64 rsvd0:15;
uint64_t cache_align:7;
uint64_t buf_addr:42;
uint64_t rsvd0:15;
#endif
};
/* TCP reassembly context */
struct rbe_tcp_cnxt_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 tcp_pkt_cnt:12;
u64 rsvd1:4;
u64 align_hdr_bytes:4;
u64 align_ptr_bytes:4;
u64 ptr_bytes:16;
u64 rsvd2:24;
u64 cqe_type:4;
u64 rsvd0:54;
u64 tcp_end_reason:2;
u64 tcp_status:4;
uint64_t tcp_pkt_cnt:12;
uint64_t rsvd1:4;
uint64_t align_hdr_bytes:4;
uint64_t align_ptr_bytes:4;
uint64_t ptr_bytes:16;
uint64_t rsvd2:24;
uint64_t cqe_type:4;
uint64_t rsvd0:54;
uint64_t tcp_end_reason:2;
uint64_t tcp_status:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tcp_status:4;
u64 tcp_end_reason:2;
u64 rsvd0:54;
u64 cqe_type:4;
u64 rsvd2:24;
u64 ptr_bytes:16;
u64 align_ptr_bytes:4;
u64 align_hdr_bytes:4;
u64 rsvd1:4;
u64 tcp_pkt_cnt:12;
uint64_t tcp_status:4;
uint64_t tcp_end_reason:2;
uint64_t rsvd0:54;
uint64_t cqe_type:4;
uint64_t rsvd2:24;
uint64_t ptr_bytes:16;
uint64_t align_ptr_bytes:4;
uint64_t align_hdr_bytes:4;
uint64_t rsvd1:4;
uint64_t tcp_pkt_cnt:12;
#endif
};
/* Always Big endian */
struct rx_hdr_t {
u64 opaque:32;
u64 rss_flow:8;
u64 skip_length:6;
u64 disable_rss:1;
u64 disable_tcp_reassembly:1;
u64 nodrop:1;
u64 dest_alg:2;
u64 rsvd0:2;
u64 dest_rq:11;
uint64_t opaque:32;
uint64_t rss_flow:8;
uint64_t skip_length:6;
uint64_t disable_rss:1;
uint64_t disable_tcp_reassembly:1;
uint64_t nodrop:1;
uint64_t dest_alg:2;
uint64_t rsvd0:2;
uint64_t dest_rq:11;
};
enum send_l4_csum_type {
@ -472,247 +474,247 @@ enum sq_subdesc_type {
struct sq_crc_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd1:32;
u64 crc_ival:32;
u64 subdesc_type:4;
u64 crc_alg:2;
u64 rsvd0:10;
u64 crc_insert_pos:16;
u64 hdr_start:16;
u64 crc_len:16;
uint64_t rsvd1:32;
uint64_t crc_ival:32;
uint64_t subdesc_type:4;
uint64_t crc_alg:2;
uint64_t rsvd0:10;
uint64_t crc_insert_pos:16;
uint64_t hdr_start:16;
uint64_t crc_len:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 crc_len:16;
u64 hdr_start:16;
u64 crc_insert_pos:16;
u64 rsvd0:10;
u64 crc_alg:2;
u64 subdesc_type:4;
u64 crc_ival:32;
u64 rsvd1:32;
uint64_t crc_len:16;
uint64_t hdr_start:16;
uint64_t crc_insert_pos:16;
uint64_t rsvd0:10;
uint64_t crc_alg:2;
uint64_t subdesc_type:4;
uint64_t crc_ival:32;
uint64_t rsvd1:32;
#endif
};
struct sq_gather_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 ld_type:2;
u64 rsvd0:42;
u64 size:16;
uint64_t subdesc_type:4; /* W0 */
uint64_t ld_type:2;
uint64_t rsvd0:42;
uint64_t size:16;
u64 rsvd1:15; /* W1 */
u64 addr:49;
uint64_t rsvd1:15; /* W1 */
uint64_t addr:49;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 size:16;
u64 rsvd0:42;
u64 ld_type:2;
u64 subdesc_type:4; /* W0 */
uint64_t size:16;
uint64_t rsvd0:42;
uint64_t ld_type:2;
uint64_t subdesc_type:4; /* W0 */
u64 addr:49;
u64 rsvd1:15; /* W1 */
uint64_t addr:49;
uint64_t rsvd1:15; /* W1 */
#endif
};
/* SQ immediate subdescriptor */
struct sq_imm_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 rsvd0:46;
u64 len:14;
uint64_t subdesc_type:4; /* W0 */
uint64_t rsvd0:46;
uint64_t len:14;
u64 data:64; /* W1 */
uint64_t data:64; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 len:14;
u64 rsvd0:46;
u64 subdesc_type:4; /* W0 */
uint64_t len:14;
uint64_t rsvd0:46;
uint64_t subdesc_type:4; /* W0 */
u64 data:64; /* W1 */
uint64_t data:64; /* W1 */
#endif
};
struct sq_mem_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 mem_alg:4;
u64 mem_dsz:2;
u64 wmem:1;
u64 rsvd0:21;
u64 offset:32;
uint64_t subdesc_type:4; /* W0 */
uint64_t mem_alg:4;
uint64_t mem_dsz:2;
uint64_t wmem:1;
uint64_t rsvd0:21;
uint64_t offset:32;
u64 rsvd1:15; /* W1 */
u64 addr:49;
uint64_t rsvd1:15; /* W1 */
uint64_t addr:49;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 offset:32;
u64 rsvd0:21;
u64 wmem:1;
u64 mem_dsz:2;
u64 mem_alg:4;
u64 subdesc_type:4; /* W0 */
uint64_t offset:32;
uint64_t rsvd0:21;
uint64_t wmem:1;
uint64_t mem_dsz:2;
uint64_t mem_alg:4;
uint64_t subdesc_type:4; /* W0 */
u64 addr:49;
u64 rsvd1:15; /* W1 */
uint64_t addr:49;
uint64_t rsvd1:15; /* W1 */
#endif
};
struct sq_hdr_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4;
u64 tso:1;
u64 post_cqe:1; /* Post CQE on no error also */
u64 dont_send:1;
u64 tstmp:1;
u64 subdesc_cnt:8;
u64 csum_l4:2;
u64 csum_l3:1;
u64 rsvd0:5;
u64 l4_offset:8;
u64 l3_offset:8;
u64 rsvd1:4;
u64 tot_len:20; /* W0 */
uint64_t subdesc_type:4;
uint64_t tso:1;
uint64_t post_cqe:1; /* Post CQE on no error also */
uint64_t dont_send:1;
uint64_t tstmp:1;
uint64_t subdesc_cnt:8;
uint64_t csum_l4:2;
uint64_t csum_l3:1;
uint64_t rsvd0:5;
uint64_t l4_offset:8;
uint64_t l3_offset:8;
uint64_t rsvd1:4;
uint64_t tot_len:20; /* W0 */
u64 tso_sdc_cont:8;
u64 tso_sdc_first:8;
u64 tso_l4_offset:8;
u64 tso_flags_last:12;
u64 tso_flags_first:12;
u64 rsvd2:2;
u64 tso_max_paysize:14; /* W1 */
uint64_t tso_sdc_cont:8;
uint64_t tso_sdc_first:8;
uint64_t tso_l4_offset:8;
uint64_t tso_flags_last:12;
uint64_t tso_flags_first:12;
uint64_t rsvd2:2;
uint64_t tso_max_paysize:14; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tot_len:20;
u64 rsvd1:4;
u64 l3_offset:8;
u64 l4_offset:8;
u64 rsvd0:5;
u64 csum_l3:1;
u64 csum_l4:2;
u64 subdesc_cnt:8;
u64 tstmp:1;
u64 dont_send:1;
u64 post_cqe:1; /* Post CQE on no error also */
u64 tso:1;
u64 subdesc_type:4; /* W0 */
uint64_t tot_len:20;
uint64_t rsvd1:4;
uint64_t l3_offset:8;
uint64_t l4_offset:8;
uint64_t rsvd0:5;
uint64_t csum_l3:1;
uint64_t csum_l4:2;
uint64_t subdesc_cnt:8;
uint64_t tstmp:1;
uint64_t dont_send:1;
uint64_t post_cqe:1; /* Post CQE on no error also */
uint64_t tso:1;
uint64_t subdesc_type:4; /* W0 */
u64 tso_max_paysize:14;
u64 rsvd2:2;
u64 tso_flags_first:12;
u64 tso_flags_last:12;
u64 tso_l4_offset:8;
u64 tso_sdc_first:8;
u64 tso_sdc_cont:8; /* W1 */
uint64_t tso_max_paysize:14;
uint64_t rsvd2:2;
uint64_t tso_flags_first:12;
uint64_t tso_flags_last:12;
uint64_t tso_l4_offset:8;
uint64_t tso_sdc_first:8;
uint64_t tso_sdc_cont:8; /* W1 */
#endif
};
/* Queue config register formats */
struct rq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_2_63:62;
u64 ena:1;
u64 tcp_ena:1;
uint64_t reserved_2_63:62;
uint64_t ena:1;
uint64_t tcp_ena:1;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tcp_ena:1;
u64 ena:1;
u64 reserved_2_63:62;
uint64_t tcp_ena:1;
uint64_t ena:1;
uint64_t reserved_2_63:62;
#endif
};
struct cq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_43_63:21;
u64 ena:1;
u64 reset:1;
u64 caching:1;
u64 reserved_35_39:5;
u64 qsize:3;
u64 reserved_25_31:7;
u64 avg_con:9;
u64 reserved_0_15:16;
uint64_t reserved_43_63:21;
uint64_t ena:1;
uint64_t reset:1;
uint64_t caching:1;
uint64_t reserved_35_39:5;
uint64_t qsize:3;
uint64_t reserved_25_31:7;
uint64_t avg_con:9;
uint64_t reserved_0_15:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 reserved_0_15:16;
u64 avg_con:9;
u64 reserved_25_31:7;
u64 qsize:3;
u64 reserved_35_39:5;
u64 caching:1;
u64 reset:1;
u64 ena:1;
u64 reserved_43_63:21;
uint64_t reserved_0_15:16;
uint64_t avg_con:9;
uint64_t reserved_25_31:7;
uint64_t qsize:3;
uint64_t reserved_35_39:5;
uint64_t caching:1;
uint64_t reset:1;
uint64_t ena:1;
uint64_t reserved_43_63:21;
#endif
};
struct sq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_20_63:44;
u64 ena:1;
u64 reserved_18_18:1;
u64 reset:1;
u64 ldwb:1;
u64 reserved_11_15:5;
u64 qsize:3;
u64 reserved_3_7:5;
u64 tstmp_bgx_intf:3;
uint64_t reserved_20_63:44;
uint64_t ena:1;
uint64_t reserved_18_18:1;
uint64_t reset:1;
uint64_t ldwb:1;
uint64_t reserved_11_15:5;
uint64_t qsize:3;
uint64_t reserved_3_7:5;
uint64_t tstmp_bgx_intf:3;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tstmp_bgx_intf:3;
u64 reserved_3_7:5;
u64 qsize:3;
u64 reserved_11_15:5;
u64 ldwb:1;
u64 reset:1;
u64 reserved_18_18:1;
u64 ena:1;
u64 reserved_20_63:44;
uint64_t tstmp_bgx_intf:3;
uint64_t reserved_3_7:5;
uint64_t qsize:3;
uint64_t reserved_11_15:5;
uint64_t ldwb:1;
uint64_t reset:1;
uint64_t reserved_18_18:1;
uint64_t ena:1;
uint64_t reserved_20_63:44;
#endif
};
struct rbdr_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_45_63:19;
u64 ena:1;
u64 reset:1;
u64 ldwb:1;
u64 reserved_36_41:6;
u64 qsize:4;
u64 reserved_25_31:7;
u64 avg_con:9;
u64 reserved_12_15:4;
u64 lines:12;
uint64_t reserved_45_63:19;
uint64_t ena:1;
uint64_t reset:1;
uint64_t ldwb:1;
uint64_t reserved_36_41:6;
uint64_t qsize:4;
uint64_t reserved_25_31:7;
uint64_t avg_con:9;
uint64_t reserved_12_15:4;
uint64_t lines:12;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 lines:12;
u64 reserved_12_15:4;
u64 avg_con:9;
u64 reserved_25_31:7;
u64 qsize:4;
u64 reserved_36_41:6;
u64 ldwb:1;
u64 reset:1;
u64 ena: 1;
u64 reserved_45_63:19;
uint64_t lines:12;
uint64_t reserved_12_15:4;
uint64_t avg_con:9;
uint64_t reserved_25_31:7;
uint64_t qsize:4;
uint64_t reserved_36_41:6;
uint64_t ldwb:1;
uint64_t reset:1;
uint64_t ena: 1;
uint64_t reserved_45_63:19;
#endif
};
struct qs_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_32_63:32;
u64 ena:1;
u64 reserved_27_30:4;
u64 sq_ins_ena:1;
u64 sq_ins_pos:6;
u64 lock_ena:1;
u64 lock_viol_cqe_ena:1;
u64 send_tstmp_ena:1;
u64 be:1;
u64 reserved_7_15:9;
u64 vnic:7;
uint64_t reserved_32_63:32;
uint64_t ena:1;
uint64_t reserved_27_30:4;
uint64_t sq_ins_ena:1;
uint64_t sq_ins_pos:6;
uint64_t lock_ena:1;
uint64_t lock_viol_cqe_ena:1;
uint64_t send_tstmp_ena:1;
uint64_t be:1;
uint64_t reserved_7_15:9;
uint64_t vnic:7;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 vnic:7;
u64 reserved_7_15:9;
u64 be:1;
u64 send_tstmp_ena:1;
u64 lock_viol_cqe_ena:1;
u64 lock_ena:1;
u64 sq_ins_pos:6;
u64 sq_ins_ena:1;
u64 reserved_27_30:4;
u64 ena:1;
u64 reserved_32_63:32;
uint64_t vnic:7;
uint64_t reserved_7_15:9;
uint64_t be:1;
uint64_t send_tstmp_ena:1;
uint64_t lock_viol_cqe_ena:1;
uint64_t lock_ena:1;
uint64_t sq_ins_pos:6;
uint64_t sq_ins_ena:1;
uint64_t reserved_27_30:4;
uint64_t ena:1;
uint64_t reserved_32_63:32;
#endif
};

File diff suppressed because it is too large Load Diff

View File

@ -28,169 +28,169 @@
*/
#ifndef THUNDER_BGX_H
#define THUNDER_BGX_H
#define THUNDER_BGX_H
#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
#define MAX_BGX_PER_CN88XX 2
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
#define MAX_BGX_PER_CN88XX 2
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
/* Registers */
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13)
#define CMR_PKT_RX_EN BIT_ULL(14)
#define CMR_EN BIT_ULL(15)
#define BGX_CMR_GLOBAL_CFG 0x08
#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
#define BGX_CMRX_RX_ID_MAP 0x60
#define BGX_CMRX_RX_STAT0 0x70
#define BGX_CMRX_RX_STAT1 0x78
#define BGX_CMRX_RX_STAT2 0x80
#define BGX_CMRX_RX_STAT3 0x88
#define BGX_CMRX_RX_STAT4 0x90
#define BGX_CMRX_RX_STAT5 0x98
#define BGX_CMRX_RX_STAT6 0xA0
#define BGX_CMRX_RX_STAT7 0xA8
#define BGX_CMRX_RX_STAT8 0xB0
#define BGX_CMRX_RX_STAT9 0xB8
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
#define RX_DMAC_COUNT 32
#define BGX_CMR_RX_STREERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
#define BGX_CMRX_TX_STAT3 0x618
#define BGX_CMRX_TX_STAT4 0x620
#define BGX_CMRX_TX_STAT5 0x628
#define BGX_CMRX_TX_STAT6 0x630
#define BGX_CMRX_TX_STAT7 0x638
#define BGX_CMRX_TX_STAT8 0x640
#define BGX_CMRX_TX_STAT9 0x648
#define BGX_CMRX_TX_STAT10 0x650
#define BGX_CMRX_TX_STAT11 0x658
#define BGX_CMRX_TX_STAT12 0x660
#define BGX_CMRX_TX_STAT13 0x668
#define BGX_CMRX_TX_STAT14 0x670
#define BGX_CMRX_TX_STAT15 0x678
#define BGX_CMRX_TX_STAT16 0x680
#define BGX_CMRX_TX_STAT17 0x688
#define BGX_CMR_TX_LMACS 0x1000
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN (1UL << 13)
#define CMR_PKT_RX_EN (1UL << 14)
#define CMR_EN (1UL << 15)
#define BGX_CMR_GLOBAL_CFG 0x08
#define CMR_GLOBAL_CFG_FCS_STRIP (1UL << 6)
#define BGX_CMRX_RX_ID_MAP 0x60
#define BGX_CMRX_RX_STAT0 0x70
#define BGX_CMRX_RX_STAT1 0x78
#define BGX_CMRX_RX_STAT2 0x80
#define BGX_CMRX_RX_STAT3 0x88
#define BGX_CMRX_RX_STAT4 0x90
#define BGX_CMRX_RX_STAT5 0x98
#define BGX_CMRX_RX_STAT6 0xA0
#define BGX_CMRX_RX_STAT7 0xA8
#define BGX_CMRX_RX_STAT8 0xB0
#define BGX_CMRX_RX_STAT9 0xB8
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN (1UL << 48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
#define RX_DMAC_COUNT 32
#define BGX_CMR_RX_STREERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
#define BGX_CMRX_TX_STAT3 0x618
#define BGX_CMRX_TX_STAT4 0x620
#define BGX_CMRX_TX_STAT5 0x628
#define BGX_CMRX_TX_STAT6 0x630
#define BGX_CMRX_TX_STAT7 0x638
#define BGX_CMRX_TX_STAT8 0x640
#define BGX_CMRX_TX_STAT9 0x648
#define BGX_CMRX_TX_STAT10 0x650
#define BGX_CMRX_TX_STAT11 0x658
#define BGX_CMRX_TX_STAT12 0x660
#define BGX_CMRX_TX_STAT13 0x668
#define BGX_CMRX_TX_STAT14 0x670
#define BGX_CMRX_TX_STAT15 0x678
#define BGX_CMRX_TX_STAT16 0x680
#define BGX_CMRX_TX_STAT17 0x688
#define BGX_CMR_TX_LMACS 0x1000
#define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER BIT_ULL(11)
#define SPU_CTL_LOOPBACK BIT_ULL(14)
#define SPU_CTL_RESET BIT_ULL(15)
#define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
#define BGX_SPUX_STATUS2 0x10020
#define SPU_STATUS2_RCVFLT BIT_ULL(10)
#define BGX_SPUX_BX_STATUS 0x10028
#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
#define BGX_SPUX_BR_STATUS1 0x10030
#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
#define BGX_SPUX_BR_PMD_CRTL 0x10068
#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
#define BGX_SPUX_BR_PMD_LD_REP 0x10090
#define BGX_SPUX_FEC_CONTROL 0x100A0
#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
#define BGX_SPUX_AN_CONTROL 0x100C8
#define SPU_AN_CTL_AN_EN BIT_ULL(12)
#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
#define BGX_SPUX_AN_ADV 0x100D8
#define BGX_SPUX_MISC_CONTROL 0x10218
#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
#define BGX_SPUX_INT_W1S 0x10228
#define BGX_SPUX_INT_ENA_W1C 0x10230
#define BGX_SPUX_INT_ENA_W1S 0x10238
#define BGX_SPU_DBG_CONTROL 0x10300
#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER (1UL << 11)
#define SPU_CTL_LOOPBACK (1UL << 14)
#define SPU_CTL_RESET (1UL << 15)
#define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK (1UL << 2)
#define BGX_SPUX_STATUS2 0x10020
#define SPU_STATUS2_RCVFLT (1UL << 10)
#define BGX_SPUX_BX_STATUS 0x10028
#define SPU_BX_STATUS_RX_ALIGN (1UL << 12)
#define BGX_SPUX_BR_STATUS1 0x10030
#define SPU_BR_STATUS_BLK_LOCK (1UL << 0)
#define SPU_BR_STATUS_RCV_LNK (1UL << 12)
#define BGX_SPUX_BR_PMD_CRTL 0x10068
#define SPU_PMD_CRTL_TRAIN_EN (1UL << 1)
#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
#define BGX_SPUX_BR_PMD_LD_REP 0x10090
#define BGX_SPUX_FEC_CONTROL 0x100A0
#define SPU_FEC_CTL_FEC_EN (1UL << 0)
#define SPU_FEC_CTL_ERR_EN (1UL << 1)
#define BGX_SPUX_AN_CONTROL 0x100C8
#define SPU_AN_CTL_AN_EN (1UL << 12)
#define SPU_AN_CTL_XNP_EN (1UL << 13)
#define BGX_SPUX_AN_ADV 0x100D8
#define BGX_SPUX_MISC_CONTROL 0x10218
#define SPU_MISC_CTL_INTLV_RDISP (1UL << 10)
#define SPU_MISC_CTL_RX_DIS (1UL << 12)
#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
#define BGX_SPUX_INT_W1S 0x10228
#define BGX_SPUX_INT_ENA_W1C 0x10230
#define BGX_SPUX_INT_ENA_W1S 0x10238
#define BGX_SPU_DBG_CONTROL 0x10300
#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN (1UL << 18)
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS (1UL << 29)
#define BGX_SMUX_RX_INT 0x20000
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
#define BGX_SMUX_TX_APPEND 0x20100
#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
#define BGX_SMUX_TX_MIN_PKT 0x20118
#define BGX_SMUX_TX_INT 0x20140
#define BGX_SMUX_TX_CTL 0x20178
#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
#define BGX_SMUX_TX_THRESH 0x20180
#define BGX_SMUX_CTL 0x20200
#define SMU_CTL_RX_IDLE BIT_ULL(0)
#define SMU_CTL_TX_IDLE BIT_ULL(1)
#define BGX_SMUX_RX_INT 0x20000
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3UL << 0)
#define BGX_SMUX_TX_APPEND 0x20100
#define SMU_TX_APPEND_FCS_D (1UL << 2)
#define BGX_SMUX_TX_MIN_PKT 0x20118
#define BGX_SMUX_TX_INT 0x20140
#define BGX_SMUX_TX_CTL 0x20178
#define SMU_TX_CTL_DIC_EN (1UL << 0)
#define SMU_TX_CTL_UNI_EN (1UL << 1)
#define SMU_TX_CTL_LNK_STATUS (3UL << 4)
#define BGX_SMUX_TX_THRESH 0x20180
#define BGX_SMUX_CTL 0x20200
#define SMU_CTL_RX_IDLE (1UL << 0)
#define SMU_CTL_TX_IDLE (1UL << 1)
#define BGX_GMP_PCS_MRX_CTL 0x30000
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define BGX_GMP_GMI_PRTX_CFG 0x38020
#define GMI_PORT_CFG_SPEED BIT_ULL(1)
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
#define BGX_GMP_GMI_TXX_SLOT 0x38220
#define BGX_GMP_GMI_TXX_BURST 0x38228
#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
#define BGX_GMP_PCS_MRX_CTL 0x30000
#define PCS_MRX_CTL_RST_AN (1UL << 9)
#define PCS_MRX_CTL_PWR_DN (1UL << 11)
#define PCS_MRX_CTL_AN_EN (1UL << 12)
#define PCS_MRX_CTL_LOOPBACK1 (1UL << 14)
#define PCS_MRX_CTL_RESET (1UL << 15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT (1UL << 5)
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
#define PCS_MISC_CTL_GMX_ENO (1UL << 11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7FUL
#define BGX_GMP_GMI_PRTX_CFG 0x38020
#define GMI_PORT_CFG_SPEED (1UL << 1)
#define GMI_PORT_CFG_DUPLEX (1UL << 2)
#define GMI_PORT_CFG_SLOT_TIME (1UL << 3)
#define GMI_PORT_CFG_SPEED_MSB (1UL << 8)
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
#define BGX_GMP_GMI_TXX_SLOT 0x38220
#define BGX_GMP_GMI_TXX_BURST 0x38228
#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
#define BGX_MSIX_VEC_0_29_CTL 0x400008
#define BGX_MSIX_PBA_0 0x4F0000
/* MSI-X interrupts */
#define BGX_MSIX_VECTORS 30
#define BGX_LMAC_VEC_OFFSET 7
#define BGX_MSIX_VEC_SHIFT 4
#define BGX_MSIX_VECTORS 30
#define BGX_LMAC_VEC_OFFSET 7
#define BGX_MSIX_VEC_SHIFT 4
#define CMRX_INT 0
#define SPUX_INT 1
#define SMUX_RX_INT 2
#define SMUX_TX_INT 3
#define GMPX_PCS_INT 4
#define GMPX_GMI_RX_INT 5
#define GMPX_GMI_TX_INT 6
#define CMR_MEM_INT 28
#define SPU_MEM_INT 29
#define CMRX_INT 0
#define SPUX_INT 1
#define SMUX_RX_INT 2
#define SMUX_TX_INT 3
#define GMPX_PCS_INT 4
#define GMPX_GMI_RX_INT 5
#define GMPX_GMI_TX_INT 6
#define CMR_MEM_INT 28
#define SPU_MEM_INT 29
#define LMAC_INTR_LINK_UP BIT(0)
#define LMAC_INTR_LINK_DOWN BIT(1)
#define LMAC_INTR_LINK_UP (1 << 0)
#define LMAC_INTR_LINK_DOWN (1 << 1)
/* RX_DMAC_CTL configuration*/
enum MCAST_MODE {
@ -200,29 +200,29 @@ enum MCAST_MODE {
RSVD
};
#define BCAST_ACCEPT 1
#define CAM_ACCEPT 1
#define BCAST_ACCEPT 1
#define CAM_ACCEPT 1
void octeon_mdiobus_force_mod_depencency(void);
void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
void bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
const uint8_t *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
#define BGX_TX_STATS_COUNT 18
int lmac_idx, boolean_t enable);
uint64_t bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
uint64_t bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
#define BGX_TX_STATS_COUNT 18
struct bgx_stats {
u64 rx_stats[BGX_RX_STATS_COUNT];
u64 tx_stats[BGX_TX_STATS_COUNT];
uint64_t rx_stats[BGX_RX_STATS_COUNT];
uint64_t tx_stats[BGX_TX_STATS_COUNT];
};
#define BGX_IN_PROMISCUOUS_MODE 1
#define BGX_IN_PROMISCUOUS_MODE 1
enum LMAC_TYPE {
BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */

View File

@ -0,0 +1,207 @@
/*-
* Copyright (c) 2015 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Semihalf under
* the sponsorship of the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bitset.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/pciio.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/cpuset.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
#include <dev/ofw/openfirm.h>
#include <dev/mii/miivar.h>
#include "thunder_bgx.h"
#include "thunder_bgx_var.h"
#define CONN_TYPE_MAXLEN 16
#define CONN_TYPE_OFFSET 2
int bgx_fdt_init_phy(struct bgx *);
static void
bgx_fdt_get_macaddr(phandle_t phy, uint8_t *hwaddr)
{
uint8_t addr[ETHER_ADDR_LEN];
if (OF_getprop(phy, "local-mac-address", addr, ETHER_ADDR_LEN) == -1) {
/* Missing MAC address should be marked by clearing it */
memset(hwaddr, 0, ETHER_ADDR_LEN);
} else
memcpy(hwaddr, addr, ETHER_ADDR_LEN);
}
static boolean_t
bgx_fdt_phy_mode_match(struct bgx *bgx, char *qlm_mode, size_t size)
{
size -= CONN_TYPE_OFFSET;
switch (bgx->qlm_mode) {
case QLM_MODE_SGMII:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "sgmii", size) == 0)
return (TRUE);
break;
case QLM_MODE_XAUI_1X4:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "xaui", size) == 0)
return (TRUE);
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "dxaui", size) == 0)
return (TRUE);
break;
case QLM_MODE_RXAUI_2X2:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "raui", size) == 0)
return (TRUE);
break;
case QLM_MODE_XFI_4X1:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "xfi", size) == 0)
return (TRUE);
break;
case QLM_MODE_XLAUI_1X4:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "xlaui", size) == 0)
return (TRUE);
break;
case QLM_MODE_10G_KR_4X1:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "xfi-10g-kr", size) == 0)
return (TRUE);
break;
case QLM_MODE_40G_KR4_1X4:
if (strncmp(&qlm_mode[CONN_TYPE_OFFSET], "xlaui-40g-kr", size) == 0)
return (TRUE);
break;
default:
return (FALSE);
}
return (FALSE);
}
int
bgx_fdt_init_phy(struct bgx *bgx)
{
phandle_t node, child;
phandle_t phy, mdio;
uint8_t lmac;
char bgx_sel[6];
char qlm_mode[CONN_TYPE_MAXLEN];
const char *mac;
(void)mac;
lmac = 0;
/* Get BGX node from DT */
snprintf(bgx_sel, 6, "/bgx%d", bgx->bgx_id);
node = OF_finddevice(bgx_sel);
if (node == 0 || node == -1) {
device_printf(bgx->dev,
"Could not find %s node in FDT\n", bgx_sel);
return (ENXIO);
}
for (child = OF_child(node); child > 0; child = OF_peer(child)) {
if (OF_getprop(child, "qlm-mode", qlm_mode,
sizeof(qlm_mode)) <= 0) {
/* Missing qlm-mode, skipping */
continue;
}
if (!bgx_fdt_phy_mode_match(bgx, qlm_mode, sizeof(qlm_mode))) {
/*
* Connection type not match with BGX mode.
*/
continue;
}
if (OF_getencprop(child, "phy-handle", &phy,
sizeof(phy)) <= 0) {
if (bootverbose) {
device_printf(bgx->dev,
"No phy-handle in PHY node. Skipping...\n");
}
continue;
}
/* Acquire PHY address */
phy = OF_node_from_xref(phy);
if (OF_getencprop(phy, "reg", &bgx->lmac[lmac].phyaddr,
sizeof(bgx->lmac[lmac].phyaddr)) <= 0) {
if (bootverbose) {
device_printf(bgx->dev,
"Could not retrieve PHY address\n");
}
bgx->lmac[lmac].phyaddr = MII_PHY_ANY;
}
/*
* Get PHY interface (MDIO bus) device.
* Driver must be already attached.
*/
mdio = OF_parent(phy);
bgx->lmac[lmac].phy_if_dev =
OF_device_from_xref(OF_xref_from_node(mdio));
if (bgx->lmac[lmac].phy_if_dev == NULL) {
if (bootverbose) {
device_printf(bgx->dev,
"Could not find interface to PHY\n");
}
continue;
}
/* Get mac address from FDT */
bgx_fdt_get_macaddr(phy, bgx->lmac[lmac].mac);
bgx->lmac[lmac].lmacid = lmac;
lmac++;
if (lmac == MAX_LMAC_PER_BGX)
break;
}
if (lmac == 0) {
device_printf(bgx->dev, "Could not find matching PHY\n");
return (ENXIO);
}
return (0);
}

View File

@ -0,0 +1,67 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __THUNDER_BGX_VAR_H__
#define __THUNDER_BGX_VAR_H__
struct lmac {
struct bgx *bgx;
int dmac;
uint8_t mac[ETHER_ADDR_LEN];
boolean_t link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
device_t phy_if_dev;
int phyaddr;
unsigned int last_duplex;
unsigned int last_link;
unsigned int last_speed;
boolean_t is_sgmii;
struct callout check_link;
struct mtx check_link_mtx;
};
struct bgx {
device_t dev;
struct resource * reg_base;
uint8_t bgx_id;
enum qlm_mode qlm_mode;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
int lmac_type;
int lane_to_sds;
int use_training;
};
#ifdef FDT
extern int bgx_fdt_init_phy(struct bgx *);
#endif
#endif