Raw import of ThunderX VNIC networking driver components

This import brings following components of the Linux driver:
- Thunder BGX (programmable MAC)
- Physical Function driver
- Virtual Function driver
- Headers

Revision:            1.0
Obtained from:       Cavium
License information: Cavium provided these files under BSD license
This commit is contained in:
Zbigniew Bodek 2015-10-18 21:39:15 +00:00
parent 325151a32e
commit 3c0086b813
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=289550
9 changed files with 7644 additions and 0 deletions

539
sys/dev/vnic/nic.h Normal file
View File

@ -0,0 +1,539 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef NIC_H
#define NIC_H
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include "thunder_bgx.h"
/* PCI device IDs */
#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define PCI_MSIX_REG_BAR_NUM 4
/* NIC SRIOV VF count */
#define MAX_NUM_VFS_SUPPORTED 128
#define DEFAULT_NUM_VF_ENABLED 8
#define NIC_TNS_BYPASS_MODE 0
#define NIC_TNS_MODE 1
/* NIC priv flags */
#define NIC_SRIOV_ENABLED BIT(0)
#define NIC_TNS_ENABLED BIT(1)
/* VNIC HW optimiation features */
#define VNIC_RSS_SUPPORT
#define VNIC_MULTI_QSET_SUPPORT
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
/* Max pkinds */
#define NIC_MAX_PKIND 16
/* Rx Channels */
/* Receive channel configuration in TNS bypass mode
* Below is configuration in TNS bypass mode
* BGX0-LMAC0-CHAN0 - VNIC CHAN0
* BGX0-LMAC1-CHAN0 - VNIC CHAN16
* ...
* BGX1-LMAC0-CHAN0 - VNIC CHAN128
* ...
* BGX1-LMAC3-CHAN0 - VNIC CHAN174
*/
#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
#define NIC_CHANS_PER_INF 128
#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
/* Tx scheduling */
#define NIC_MAX_TL4 1024
#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
#define NIC_MAX_TL3 256
#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
#define NIC_MAX_TL2 64
#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
#define NIC_MAX_TL1 2
/* TNS bypass mode */
#define NIC_TL2_PER_BGX 32
#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
/* NIC VF Interrupts */
#define NICVF_INTR_CQ 0
#define NICVF_INTR_SQ 1
#define NICVF_INTR_RBDR 2
#define NICVF_INTR_PKT_DROP 3
#define NICVF_INTR_TCP_TIMER 4
#define NICVF_INTR_MBOX 5
#define NICVF_INTR_QS_ERR 6
#define NICVF_INTR_CQ_SHIFT 0
#define NICVF_INTR_SQ_SHIFT 8
#define NICVF_INTR_RBDR_SHIFT 16
#define NICVF_INTR_PKT_DROP_SHIFT 20
#define NICVF_INTR_TCP_TIMER_SHIFT 21
#define NICVF_INTR_MBOX_SHIFT 22
#define NICVF_INTR_QS_ERR_SHIFT 23
#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
/* MSI-X interrupts */
#define NIC_PF_MSIX_VECTORS 10
#define NIC_VF_MSIX_VECTORS 20
#define NIC_PF_INTR_ID_ECC0_SBE 0
#define NIC_PF_INTR_ID_ECC0_DBE 1
#define NIC_PF_INTR_ID_ECC1_SBE 2
#define NIC_PF_INTR_ID_ECC1_DBE 3
#define NIC_PF_INTR_ID_ECC2_SBE 4
#define NIC_PF_INTR_ID_ECC2_DBE 5
#define NIC_PF_INTR_ID_ECC3_SBE 6
#define NIC_PF_INTR_ID_ECC3_DBE 7
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
/* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
*
* 1 tick per 0.05usec = value of 2.2
* This 10% would be covered in CQ timer thresh value
*/
#define NICPF_CLK_PER_INT_TICK 2
/* Time to wait before we decide that a SQ is stuck.
*
* Since both pkt rx and tx notifications are done with same CQ,
* when packets are being received at very high rate (eg: L2 forwarding)
* then freeing transmitted skbs will be delayed and watchdog
* will kick in, resetting interface. Hence keeping this value high.
*/
#define NICVF_TX_TIMEOUT (50 * HZ)
struct nicvf_cq_poll {
struct nicvf *nicvf;
u8 cq_idx; /* Completion queue index */
struct napi_struct napi;
};
#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
#ifdef VNIC_RSS_SUPPORT
struct nicvf_rss_info {
bool enable;
#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
#define RSS_IP_HASH_ENA BIT(1)
#define RSS_TCP_HASH_ENA BIT(2)
#define RSS_TCP_SYN_DIS BIT(3)
#define RSS_UDP_HASH_ENA BIT(4)
#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
#define RSS_ROCE_ENA BIT(6)
#define RSS_L3_BI_DIRECTION_ENA BIT(7)
#define RSS_L4_BI_DIRECTION_ENA BIT(8)
u64 cfg;
u8 hash_bits;
u16 rss_size;
u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
u64 key[RSS_HASH_KEY_SIZE];
} ____cacheline_aligned_in_smp;
#endif
enum rx_stats_reg_offset {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
RX_BCAST = 0x2,
RX_MCAST = 0x3,
RX_RED = 0x4,
RX_RED_OCTS = 0x5,
RX_ORUN = 0x6,
RX_ORUN_OCTS = 0x7,
RX_FCS = 0x8,
RX_L2ERR = 0x9,
RX_DRP_BCAST = 0xa,
RX_DRP_MCAST = 0xb,
RX_DRP_L3BCAST = 0xc,
RX_DRP_L3MCAST = 0xd,
RX_STATS_ENUM_LAST,
};
enum tx_stats_reg_offset {
TX_OCTS = 0x0,
TX_UCAST = 0x1,
TX_BCAST = 0x2,
TX_MCAST = 0x3,
TX_DROP = 0x4,
TX_STATS_ENUM_LAST,
};
struct nicvf_hw_stats {
u64 rx_bytes;
u64 rx_ucast_frames;
u64 rx_bcast_frames;
u64 rx_mcast_frames;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
u64 rx_drop_overrun_bytes;
u64 rx_drop_bcast;
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
u64 rx_bgx_errs;
u64 rx_prel2_errs;
u64 rx_l2_hdr_malformed;
u64 rx_oversize;
u64 rx_undersize;
u64 rx_l2_len_mismatch;
u64 rx_l2_pclp;
u64 rx_ip_ver_errs;
u64 rx_ip_csum_errs;
u64 rx_ip_hdr_malformed;
u64 rx_ip_payload_malformed;
u64 rx_ip_ttl_errs;
u64 rx_l3_pclp;
u64 rx_l4_malformed;
u64 rx_l4_csum_errs;
u64 rx_udp_len_errs;
u64 rx_l4_port_errs;
u64 rx_tcp_flag_errs;
u64 rx_tcp_offset_errs;
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
u64 tx_bytes_ok;
u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok;
u64 tx_mcast_frames_ok;
u64 tx_drops;
};
struct nicvf_drv_stats {
/* Rx */
u64 rx_frames_ok;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_tso;
u64 txq_stop;
u64 txq_wake;
};
struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
struct pci_dev *pdev;
u8 vf_id;
u8 node;
bool tns_mode:1;
bool sqs_mode:1;
bool loopback_supported:1;
u16 mtu;
struct queue_set *qs;
#ifdef VNIC_MULTI_QSET_SUPPORT
#define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11
u8 sqs_id;
u8 sqs_count; /* Secondary Qset count */
struct nicvf *snicvf[MAX_SQS_PER_VF];
#endif
u8 rx_queues;
u8 tx_queues;
u8 max_queues;
void __iomem *reg_base;
bool link_up;
u8 duplex;
u32 speed;
struct page *rb_page;
u32 rb_page_offset;
bool rb_alloc_fail;
bool rb_work_scheduled;
struct delayed_work rbdr_work;
struct tasklet_struct rbdr_task;
struct tasklet_struct qs_err_task;
struct tasklet_struct cq_task;
struct nicvf_cq_poll *napi[8];
#ifdef VNIC_RSS_SUPPORT
struct nicvf_rss_info rss_info;
#endif
u8 cpi_alg;
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
u32 msg_enable;
struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats;
struct work_struct reset_task;
/* MSI-X */
bool msix_enabled;
u8 num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
/* VF <-> PF mailbox communication */
bool pf_acked;
bool pf_nacked;
} ____cacheline_aligned_in_smp;
/* PF <--> VF Mailbox communication
* Eight 64bit registers are shared between PF and VF.
* Separate set for each VF.
* Writing '1' into last register mbx7 means end of message.
*/
/* PF <--> VF mailbox communication */
#define NIC_PF_VF_MAILBOX_SIZE 2
#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
/* Mailbox message types */
#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
struct nic_cfg_msg {
u8 msg;
u8 vf_id;
u8 node_id;
bool tns_mode:1;
bool sqs_mode:1;
bool loopback_supported:1;
u8 mac_addr[ETH_ALEN];
};
/* Qset configuration */
struct qs_cfg_msg {
u8 msg;
u8 num;
u8 sqs_count;
u64 cfg;
};
/* Receive queue configuration */
struct rq_cfg_msg {
u8 msg;
u8 qs_num;
u8 rq_num;
u64 cfg;
};
/* Send queue configuration */
struct sq_cfg_msg {
u8 msg;
u8 qs_num;
u8 sq_num;
bool sqs_mode;
u64 cfg;
};
/* Set VF's MAC address */
struct set_mac_msg {
u8 msg;
u8 vf_id;
u8 mac_addr[ETH_ALEN];
};
/* Set Maximum frame size */
struct set_frs_msg {
u8 msg;
u8 vf_id;
u16 max_frs;
};
/* Set CPI algorithm type */
struct cpi_cfg_msg {
u8 msg;
u8 vf_id;
u8 rq_cnt;
u8 cpi_alg;
};
/* Get RSS table size */
struct rss_sz_msg {
u8 msg;
u8 vf_id;
u16 ind_tbl_size;
};
/* Set RSS configuration */
struct rss_cfg_msg {
u8 msg;
u8 vf_id;
u8 hash_bits;
u8 tbl_len;
u8 tbl_offset;
#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
};
struct bgx_stats_msg {
u8 msg;
u8 vf_id;
u8 rx;
u8 idx;
u64 stats;
};
/* Physical interface link status */
struct bgx_link_status {
u8 msg;
u8 link_up;
u8 duplex;
u32 speed;
};
#ifdef VNIC_MULTI_QSET_SUPPORT
/* Get Extra Qset IDs */
struct sqs_alloc {
u8 msg;
u8 vf_id;
u8 qs_count;
};
struct nicvf_ptr {
u8 msg;
u8 vf_id;
bool sqs_mode;
u8 sqs_id;
u64 nicvf;
};
#endif
/* Set interface in loopback mode */
struct set_loopback {
u8 msg;
u8 vf_id;
bool enable;
};
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
struct nic_cfg_msg nic_cfg;
struct qs_cfg_msg qs;
struct rq_cfg_msg rq;
struct sq_cfg_msg sq;
struct set_mac_msg mac;
struct set_frs_msg frs;
struct cpi_cfg_msg cpi_cfg;
struct rss_sz_msg rss_size;
struct rss_cfg_msg rss_cfg;
struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status;
#ifdef VNIC_MULTI_QSET_SUPPORT
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
#endif
struct set_loopback lbk;
};
#define NIC_NODE_ID_MASK 0x03
#define NIC_NODE_ID_SHIFT 44
static inline int nic_get_node_id(struct pci_dev *pdev)
{
u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
}
int nicvf_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
int nicvf_open(struct net_device *netdev);
int nicvf_stop(struct net_device *netdev);
int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
void nicvf_config_rss(struct nicvf *nic);
void nicvf_set_rss_key(struct nicvf *nic);
void nicvf_set_ethtool_ops(struct net_device *netdev);
void nicvf_update_stats(struct nicvf *nic);
void nicvf_update_lmac_stats(struct nicvf *nic);
#endif /* NIC_H */

1192
sys/dev/vnic/nic_main.c Normal file

File diff suppressed because it is too large Load Diff

234
sys/dev/vnic/nic_reg.h Normal file
View File

@ -0,0 +1,234 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef NIC_REG_H
#define NIC_REG_H
#define NIC_PF_REG_COUNT 29573
#define NIC_VF_REG_COUNT 249
/* Physical function register offsets */
#define NIC_PF_CFG (0x0000)
#define NIC_PF_STATUS (0x0010)
#define NIC_PF_INTR_TIMER_CFG (0x0030)
#define NIC_PF_BIST_STATUS (0x0040)
#define NIC_PF_SOFT_RESET (0x0050)
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
#define NIC_PF_CQM_CF (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
#define NIC_PF_RRM_AVG_CFG (0x00C8)
#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
#define NIC_PF_MAILBOX_INT (0x0410)
#define NIC_PF_MAILBOX_INT_W1S (0x0430)
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
#define NIC_PF_ECC2_FLIP0 (0x1010)
#define NIC_PF_ECC3_FLIP0 (0x1018)
#define NIC_PF_ECC0_FLIP1 (0x1080)
#define NIC_PF_ECC1_FLIP1 (0x1088)
#define NIC_PF_ECC2_FLIP1 (0x1090)
#define NIC_PF_ECC3_FLIP1 (0x1098)
#define NIC_PF_ECC0_CDIS (0x1100)
#define NIC_PF_ECC1_CDIS (0x1108)
#define NIC_PF_ECC2_CDIS (0x1110)
#define NIC_PF_ECC3_CDIS (0x1118)
#define NIC_PF_BIST0_STATUS (0x1280)
#define NIC_PF_BIST1_STATUS (0x1288)
#define NIC_PF_BIST2_STATUS (0x1290)
#define NIC_PF_BIST3_STATUS (0x1298)
#define NIC_PF_ECC0_SBE_INT (0x2000)
#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
#define NIC_PF_ECC0_DBE_INT (0x2100)
#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
#define NIC_PF_ECC1_SBE_INT (0x2200)
#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
#define NIC_PF_ECC1_DBE_INT (0x2300)
#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
#define NIC_PF_ECC2_SBE_INT (0x2400)
#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
#define NIC_PF_ECC2_DBE_INT (0x2500)
#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
#define NIC_PF_ECC3_SBE_INT (0x2600)
#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
#define NIC_PF_ECC3_DBE_INT (0x2700)
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
#define NIC_PF_SW_SYNC_RX (0x490000)
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
#define NIC_PF_TL3_0_255_CHAN (0x620000)
#define NIC_PF_TL3_0_255_PIR (0x640000)
#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
#define NIC_PF_TL4_0_1023_CFG (0x800000)
#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
#define NIC_PF_QSET_0_127_CFG (0x20010000)
#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
#define NIC_PF_MSIX_PBA_0 (0x0F0000)
/* Virtual function register offsets */
#define NIC_VNIC_CFG (0x000020)
#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
#define NIC_VF_INT (0x000200)
#define NIC_VF_INT_W1S (0x000220)
#define NIC_VF_ENA_W1C (0x000240)
#define NIC_VF_ENA_W1S (0x000260)
#define NIC_VNIC_RSS_CFG (0x0020E0)
#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
#define NIC_VNIC_TX_STAT_0_4 (0x004000)
#define NIC_VNIC_RX_STAT_0_13 (0x004100)
#define NIC_QSET_RQ_GEN_CFG (0x010010)
#define NIC_QSET_CQ_0_7_CFG (0x010400)
#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
#define NIC_QSET_CQ_0_7_THRESH (0x010410)
#define NIC_QSET_CQ_0_7_BASE (0x010420)
#define NIC_QSET_CQ_0_7_HEAD (0x010428)
#define NIC_QSET_CQ_0_7_TAIL (0x010430)
#define NIC_QSET_CQ_0_7_DOOR (0x010438)
#define NIC_QSET_CQ_0_7_STATUS (0x010440)
#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
#define NIC_QSET_RQ_0_7_CFG (0x010600)
#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
#define NIC_QSET_SQ_0_7_CFG (0x010800)
#define NIC_QSET_SQ_0_7_THRESH (0x010810)
#define NIC_QSET_SQ_0_7_BASE (0x010820)
#define NIC_QSET_SQ_0_7_HEAD (0x010828)
#define NIC_QSET_SQ_0_7_TAIL (0x010830)
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
#define NIC_VF_MSIX_PBA (0x0F0000)
/* Offsets within registers */
#define NIC_MSIX_VEC_SHIFT 4
#define NIC_Q_NUM_SHIFT 18
#define NIC_QS_ID_SHIFT 21
#define NIC_VF_NUM_SHIFT 21
/* Port kind configuration register */
struct pkind_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_42_63:22;
u64 hdr_sl:5; /* Header skip length */
u64 rx_hdr:3; /* TNS Receive header present */
u64 lenerr_en:1;/* L2 length error check enable */
u64 reserved_32_32:1;
u64 maxlen:16; /* Max frame size */
u64 minlen:16; /* Min frame size */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 minlen:16;
u64 maxlen:16;
u64 reserved_32_32:1;
u64 lenerr_en:1;
u64 rx_hdr:3;
u64 hdr_sl:5;
u64 reserved_42_63:22;
#endif
};
#endif /* NIC_REG_H */

1667
sys/dev/vnic/nicvf_main.c Normal file

File diff suppressed because it is too large Load Diff

1467
sys/dev/vnic/nicvf_queues.c Normal file

File diff suppressed because it is too large Load Diff

366
sys/dev/vnic/nicvf_queues.h Normal file
View File

@ -0,0 +1,366 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef NICVF_QUEUES_H
#define NICVF_QUEUES_H
#include <linux/netdevice.h>
#include "q_struct.h"
#define MAX_QUEUE_SET 128
#define MAX_RCV_QUEUES_PER_QS 8
#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
#define MAX_SND_QUEUES_PER_QS 8
#define MAX_CMP_QUEUES_PER_QS 8
/* VF's queue interrupt ranges */
#define NICVF_INTR_ID_CQ 0
#define NICVF_INTR_ID_SQ 8
#define NICVF_INTR_ID_RBDR 16
#define NICVF_INTR_ID_MISC 18
#define NICVF_INTR_ID_QS_ERR 19
#define for_each_cq_irq(irq) \
for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
#define for_each_sq_irq(irq) \
for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
#define for_each_rbdr_irq(irq) \
for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
#define RBDR_SIZE0 0ULL /* 8K entries */
#define RBDR_SIZE1 1ULL /* 16K entries */
#define RBDR_SIZE2 2ULL /* 32K entries */
#define RBDR_SIZE3 3ULL /* 64K entries */
#define RBDR_SIZE4 4ULL /* 126K entries */
#define RBDR_SIZE5 5ULL /* 256K entries */
#define RBDR_SIZE6 6ULL /* 512K entries */
#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */
#define RBDR_CNT 1
#define RCV_QUEUE_CNT 8
#define SND_QUEUE_CNT 8
#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
#define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
/* Since timestamp not enabled, otherwise 2 */
#define MAX_CQE_PER_PKT_XMIT 1
/* Keep CQ and SQ sizes same, if timestamping
* is enabled this equation will change.
*/
#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
(NICVF_RCV_BUF_ALIGN_BYTES * 2))
#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
/* Calculate number of CQEs to reserve for all SQEs.
* Its 1/256th level of CQ size.
* '+ 1' to account for pipelining
*/
#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
(CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
/* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE 16
#define CMP_QUEUE_DESC_SIZE 512
/* Buffer / descriptor alignments */
#define NICVF_RCV_BUF_ALIGN 7
#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
(NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
#define NICVF_RCV_BUF_ALIGN_LEN(X)\
(NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
/* Queue enable/disable */
#define NICVF_SQ_EN BIT_ULL(19)
/* Queue reset */
#define NICVF_CQ_RESET BIT_ULL(41)
#define NICVF_SQ_RESET BIT_ULL(17)
#define NICVF_RBDR_RESET BIT_ULL(43)
enum CQ_RX_ERRLVL_E {
CQ_ERRLVL_MAC,
CQ_ERRLVL_L2,
CQ_ERRLVL_L3,
CQ_ERRLVL_L4,
};
enum CQ_RX_ERROP_E {
CQ_RX_ERROP_RE_NONE = 0x0,
CQ_RX_ERROP_RE_PARTIAL = 0x1,
CQ_RX_ERROP_RE_JABBER = 0x2,
CQ_RX_ERROP_RE_FCS = 0x7,
CQ_RX_ERROP_RE_TERMINATE = 0x9,
CQ_RX_ERROP_RE_RX_CTL = 0xb,
CQ_RX_ERROP_PREL2_ERR = 0x1f,
CQ_RX_ERROP_L2_FRAGMENT = 0x20,
CQ_RX_ERROP_L2_OVERRUN = 0x21,
CQ_RX_ERROP_L2_PFCS = 0x22,
CQ_RX_ERROP_L2_PUNY = 0x23,
CQ_RX_ERROP_L2_MAL = 0x24,
CQ_RX_ERROP_L2_OVERSIZE = 0x25,
CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
CQ_RX_ERROP_L2_LENMISM = 0x27,
CQ_RX_ERROP_L2_PCLP = 0x28,
CQ_RX_ERROP_IP_NOT = 0x41,
CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
CQ_RX_ERROP_IP_MAL = 0x43,
CQ_RX_ERROP_IP_MALD = 0x44,
CQ_RX_ERROP_IP_HOP = 0x45,
CQ_RX_ERROP_L3_ICRC = 0x46,
CQ_RX_ERROP_L3_PCLP = 0x47,
CQ_RX_ERROP_L4_MAL = 0x61,
CQ_RX_ERROP_L4_CHK = 0x62,
CQ_RX_ERROP_UDP_LEN = 0x63,
CQ_RX_ERROP_L4_PORT = 0x64,
CQ_RX_ERROP_TCP_FLAG = 0x65,
CQ_RX_ERROP_TCP_OFFSET = 0x66,
CQ_RX_ERROP_L4_PCLP = 0x67,
CQ_RX_ERROP_RBDR_TRUNC = 0x70,
};
enum CQ_TX_ERROP_E {
CQ_TX_ERROP_GOOD = 0x0,
CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
CQ_TX_ERROP_LOCK_VIOL = 0x83,
CQ_TX_ERROP_DATA_FAULT = 0x84,
CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
CQ_TX_ERROP_MEM_FAULT = 0x87,
CQ_TX_ERROP_CK_OVERLAP = 0x88,
CQ_TX_ERROP_CK_OFLOW = 0x89,
CQ_TX_ERROP_ENUM_LAST = 0x8a,
};
struct cmp_queue_stats {
struct tx_stats {
u64 good;
u64 desc_fault;
u64 hdr_cons_err;
u64 subdesc_err;
u64 imm_size_oflow;
u64 data_seq_err;
u64 mem_seq_err;
u64 lock_viol;
u64 data_fault;
u64 tstmp_conflict;
u64 tstmp_timeout;
u64 mem_fault;
u64 csum_overlap;
u64 csum_overflow;
} tx;
} ____cacheline_aligned_in_smp;
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS,
};
struct rx_tx_queue_stats {
u64 bytes;
u64 pkts;
} ____cacheline_aligned_in_smp;
struct q_desc_mem {
dma_addr_t dma;
u64 size;
u16 q_len;
dma_addr_t phys_base;
void *base;
void *unalign_base;
};
struct rbdr {
bool enable;
u32 dma_size;
u32 frag_len;
u32 thresh; /* Threshold level for interrupt */
void *desc;
u32 head;
u32 tail;
struct q_desc_mem dmem;
} ____cacheline_aligned_in_smp;
struct rcv_queue {
bool enable;
struct rbdr *rbdr_start;
struct rbdr *rbdr_cont;
bool en_tcp_reassembly;
u8 cq_qs; /* CQ's QS to which this RQ is assigned */
u8 cq_idx; /* CQ index (0 to 7) in the QS */
u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
u8 start_rbdr_qs; /* First buffer ptrs - QS num */
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
struct rx_tx_queue_stats stats;
} ____cacheline_aligned_in_smp;
struct cmp_queue {
bool enable;
u16 thresh;
spinlock_t lock; /* lock to serialize processing CQEs */
void *desc;
struct q_desc_mem dmem;
struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp;
struct snd_queue {
bool enable;
u8 cq_qs; /* CQ's QS to which this SQ is pointing */
u8 cq_idx; /* CQ index (0 to 7) in the above QS */
u16 thresh;
atomic_t free_cnt;
u32 head;
u32 tail;
u64 *skbuff;
void *desc;
struct q_desc_mem dmem;
struct rx_tx_queue_stats stats;
} ____cacheline_aligned_in_smp;
struct queue_set {
bool enable;
bool be_en;
u8 vnic_id;
u8 rq_cnt;
u8 cq_cnt;
u64 cq_len;
u8 sq_cnt;
u64 sq_len;
u8 rbdr_cnt;
u64 rbdr_len;
struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
} ____cacheline_aligned_in_smp;
#define GET_RBDR_DESC(RING, idx)\
(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
#define GET_SQ_DESC(RING, idx)\
(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
#define GET_CQ_DESC(RING, idx)\
(&(((union cq_desc_t *)((RING)->desc))[idx]))
/* CQ status bits */
#define CQ_WR_FULL BIT(26)
#define CQ_WR_DISABLE BIT(25)
#define CQ_WR_FAULT BIT(24)
#define CQ_CQE_COUNT (0xFFFF << 0)
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
void nicvf_config_vlan_stripping(struct nicvf *nic,
netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable);
void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable);
void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
void nicvf_sq_disable(struct nicvf *nic, int qidx);
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
void nicvf_sq_free_used_descs(struct net_device *netdev,
struct snd_queue *sq, int qidx);
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
void nicvf_rbdr_task(unsigned long data);
void nicvf_rbdr_work(struct work_struct *work);
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
u64 offset, u64 qidx);
/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */

719
sys/dev/vnic/q_struct.h Normal file
View File

@ -0,0 +1,719 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef Q_STRUCT_H
#define Q_STRUCT_H
/* Load transaction types for reading segment bytes specified by
* NIC_SEND_GATHER_S[LD_TYPE].
*/
enum nic_send_ld_type_e {
NIC_SEND_LD_TYPE_E_LDD = 0x0,
NIC_SEND_LD_TYPE_E_LDT = 0x1,
NIC_SEND_LD_TYPE_E_LDWB = 0x2,
NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
};
enum ether_type_algorithm {
ETYPE_ALG_NONE = 0x0,
ETYPE_ALG_SKIP = 0x1,
ETYPE_ALG_ENDPARSE = 0x2,
ETYPE_ALG_VLAN = 0x3,
ETYPE_ALG_VLAN_STRIP = 0x4,
};
enum layer3_type {
L3TYPE_NONE = 0x00,
L3TYPE_GRH = 0x01,
L3TYPE_IPV4 = 0x04,
L3TYPE_IPV4_OPTIONS = 0x05,
L3TYPE_IPV6 = 0x06,
L3TYPE_IPV6_OPTIONS = 0x07,
L3TYPE_ET_STOP = 0x0D,
L3TYPE_OTHER = 0x0E,
};
enum layer4_type {
L4TYPE_NONE = 0x00,
L4TYPE_IPSEC_ESP = 0x01,
L4TYPE_IPFRAG = 0x02,
L4TYPE_IPCOMP = 0x03,
L4TYPE_TCP = 0x04,
L4TYPE_UDP = 0x05,
L4TYPE_SCTP = 0x06,
L4TYPE_GRE = 0x07,
L4TYPE_ROCE_BTH = 0x08,
L4TYPE_OTHER = 0x0E,
};
/* CPI and RSSI configuration */
enum cpi_algorithm_type {
CPI_ALG_NONE = 0x0,
CPI_ALG_VLAN = 0x1,
CPI_ALG_VLAN16 = 0x2,
CPI_ALG_DIFF = 0x3,
};
enum rss_algorithm_type {
RSS_ALG_NONE = 0x00,
RSS_ALG_PORT = 0x01,
RSS_ALG_IP = 0x02,
RSS_ALG_TCP_IP = 0x03,
RSS_ALG_UDP_IP = 0x04,
RSS_ALG_SCTP_IP = 0x05,
RSS_ALG_GRE_IP = 0x06,
RSS_ALG_ROCE = 0x07,
};
enum rss_hash_cfg {
RSS_HASH_L2ETC = 0x00,
RSS_HASH_IP = 0x01,
RSS_HASH_TCP = 0x02,
RSS_HASH_TCP_SYN_DIS = 0x03,
RSS_HASH_UDP = 0x04,
RSS_HASH_L4ETC = 0x05,
RSS_HASH_ROCE = 0x06,
RSS_L3_BIDI = 0x07,
RSS_L4_BIDI = 0x08,
};
/* Completion queue entry types */
enum cqe_type {
CQE_TYPE_INVALID = 0x0,
CQE_TYPE_RX = 0x2,
CQE_TYPE_RX_SPLIT = 0x3,
CQE_TYPE_RX_TCP = 0x4,
CQE_TYPE_SEND = 0x8,
CQE_TYPE_SEND_PTP = 0x9,
};
enum cqe_rx_tcp_status {
CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
};
enum cqe_send_status {
CQE_SEND_STATUS_GOOD = 0x00,
CQE_SEND_STATUS_DESC_FAULT = 0x01,
CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
CQE_SEND_STATUS_LOCK_VIOL = 0x84,
CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
CQE_SEND_STATUS_DATA_FAULT = 0x86,
CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
CQE_SEND_STATUS_MEM_FAULT = 0x89,
CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
};
enum cqe_rx_tcp_end_reason {
CQE_RX_TCP_END_FIN_FLAG_DET = 0,
CQE_RX_TCP_END_INVALID_FLAG = 1,
CQE_RX_TCP_END_TIMEOUT = 2,
CQE_RX_TCP_END_OUT_OF_SEQ = 3,
CQE_RX_TCP_END_PKT_ERR = 4,
CQE_RX_TCP_END_QS_DISABLED = 0x0F,
};
/* Packet protocol level error enumeration */
enum cqe_rx_err_level {
CQE_RX_ERRLVL_RE = 0x0,
CQE_RX_ERRLVL_L2 = 0x1,
CQE_RX_ERRLVL_L3 = 0x2,
CQE_RX_ERRLVL_L4 = 0x3,
};
/* Packet protocol level error type enumeration */
enum cqe_rx_err_opcode {
CQE_RX_ERR_RE_NONE = 0x0,
CQE_RX_ERR_RE_PARTIAL = 0x1,
CQE_RX_ERR_RE_JABBER = 0x2,
CQE_RX_ERR_RE_FCS = 0x7,
CQE_RX_ERR_RE_TERMINATE = 0x9,
CQE_RX_ERR_RE_RX_CTL = 0xb,
CQE_RX_ERR_PREL2_ERR = 0x1f,
CQE_RX_ERR_L2_FRAGMENT = 0x20,
CQE_RX_ERR_L2_OVERRUN = 0x21,
CQE_RX_ERR_L2_PFCS = 0x22,
CQE_RX_ERR_L2_PUNY = 0x23,
CQE_RX_ERR_L2_MAL = 0x24,
CQE_RX_ERR_L2_OVERSIZE = 0x25,
CQE_RX_ERR_L2_UNDERSIZE = 0x26,
CQE_RX_ERR_L2_LENMISM = 0x27,
CQE_RX_ERR_L2_PCLP = 0x28,
CQE_RX_ERR_IP_NOT = 0x41,
CQE_RX_ERR_IP_CHK = 0x42,
CQE_RX_ERR_IP_MAL = 0x43,
CQE_RX_ERR_IP_MALD = 0x44,
CQE_RX_ERR_IP_HOP = 0x45,
CQE_RX_ERR_L3_ICRC = 0x46,
CQE_RX_ERR_L3_PCLP = 0x47,
CQE_RX_ERR_L4_MAL = 0x61,
CQE_RX_ERR_L4_CHK = 0x62,
CQE_RX_ERR_UDP_LEN = 0x63,
CQE_RX_ERR_L4_PORT = 0x64,
CQE_RX_ERR_TCP_FLAG = 0x65,
CQE_RX_ERR_TCP_OFFSET = 0x66,
CQE_RX_ERR_L4_PCLP = 0x67,
CQE_RX_ERR_RBDR_TRUNC = 0x70,
};
struct cqe_rx_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 stdn_fault:1;
u64 rsvd0:1;
u64 rq_qs:7;
u64 rq_idx:3;
u64 rsvd1:12;
u64 rss_alg:4;
u64 rsvd2:4;
u64 rb_cnt:4;
u64 vlan_found:1;
u64 vlan_stripped:1;
u64 vlan2_found:1;
u64 vlan2_stripped:1;
u64 l4_type:4;
u64 l3_type:4;
u64 l2_present:1;
u64 err_level:3;
u64 err_opcode:8;
u64 pkt_len:16; /* W1 */
u64 l2_ptr:8;
u64 l3_ptr:8;
u64 l4_ptr:8;
u64 cq_pkt_len:8;
u64 align_pad:3;
u64 rsvd3:1;
u64 chan:12;
u64 rss_tag:32; /* W2 */
u64 vlan_tci:16;
u64 vlan_ptr:8;
u64 vlan2_ptr:8;
u64 rb3_sz:16; /* W3 */
u64 rb2_sz:16;
u64 rb1_sz:16;
u64 rb0_sz:16;
u64 rb7_sz:16; /* W4 */
u64 rb6_sz:16;
u64 rb5_sz:16;
u64 rb4_sz:16;
u64 rb11_sz:16; /* W5 */
u64 rb10_sz:16;
u64 rb9_sz:16;
u64 rb8_sz:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 err_opcode:8;
u64 err_level:3;
u64 l2_present:1;
u64 l3_type:4;
u64 l4_type:4;
u64 vlan2_stripped:1;
u64 vlan2_found:1;
u64 vlan_stripped:1;
u64 vlan_found:1;
u64 rb_cnt:4;
u64 rsvd2:4;
u64 rss_alg:4;
u64 rsvd1:12;
u64 rq_idx:3;
u64 rq_qs:7;
u64 rsvd0:1;
u64 stdn_fault:1;
u64 cqe_type:4; /* W0 */
u64 chan:12;
u64 rsvd3:1;
u64 align_pad:3;
u64 cq_pkt_len:8;
u64 l4_ptr:8;
u64 l3_ptr:8;
u64 l2_ptr:8;
u64 pkt_len:16; /* W1 */
u64 vlan2_ptr:8;
u64 vlan_ptr:8;
u64 vlan_tci:16;
u64 rss_tag:32; /* W2 */
u64 rb0_sz:16;
u64 rb1_sz:16;
u64 rb2_sz:16;
u64 rb3_sz:16; /* W3 */
u64 rb4_sz:16;
u64 rb5_sz:16;
u64 rb6_sz:16;
u64 rb7_sz:16; /* W4 */
u64 rb8_sz:16;
u64 rb9_sz:16;
u64 rb10_sz:16;
u64 rb11_sz:16; /* W5 */
#endif
u64 rb0_ptr:64;
u64 rb1_ptr:64;
u64 rb2_ptr:64;
u64 rb3_ptr:64;
u64 rb4_ptr:64;
u64 rb5_ptr:64;
u64 rb6_ptr:64;
u64 rb7_ptr:64;
u64 rb8_ptr:64;
u64 rb9_ptr:64;
u64 rb10_ptr:64;
u64 rb11_ptr:64;
};
struct cqe_rx_tcp_err_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:60;
u64 rsvd1:4; /* W1 */
u64 partial_first:1;
u64 rsvd2:27;
u64 rbdr_bytes:8;
u64 rsvd3:24;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 rsvd0:60;
u64 cqe_type:4;
u64 rsvd3:24;
u64 rbdr_bytes:8;
u64 rsvd2:27;
u64 partial_first:1;
u64 rsvd1:4;
#endif
};
struct cqe_rx_tcp_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:52;
u64 cq_tcp_status:8;
u64 rsvd1:32; /* W1 */
u64 tcp_cntx_bytes:8;
u64 rsvd2:8;
u64 tcp_err_bytes:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 cq_tcp_status:8;
u64 rsvd0:52;
u64 cqe_type:4; /* W0 */
u64 tcp_err_bytes:16;
u64 rsvd2:8;
u64 tcp_cntx_bytes:8;
u64 rsvd1:32; /* W1 */
#endif
};
struct cqe_send_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:4;
u64 sqe_ptr:16;
u64 rsvd1:4;
u64 rsvd2:10;
u64 sq_qs:7;
u64 sq_idx:3;
u64 rsvd3:8;
u64 send_status:8;
u64 ptp_timestamp:64; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 send_status:8;
u64 rsvd3:8;
u64 sq_idx:3;
u64 sq_qs:7;
u64 rsvd2:10;
u64 rsvd1:4;
u64 sqe_ptr:16;
u64 rsvd0:4;
u64 cqe_type:4; /* W0 */
u64 ptp_timestamp:64; /* W1 */
#endif
};
union cq_desc_t {
u64 u[64];
struct cqe_send_t snd_hdr;
struct cqe_rx_t rx_hdr;
struct cqe_rx_tcp_t rx_tcp_hdr;
struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
};
struct rbdr_entry_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd0:15;
u64 buf_addr:42;
u64 cache_align:7;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 cache_align:7;
u64 buf_addr:42;
u64 rsvd0:15;
#endif
};
/* TCP reassembly context */
struct rbe_tcp_cnxt_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 tcp_pkt_cnt:12;
u64 rsvd1:4;
u64 align_hdr_bytes:4;
u64 align_ptr_bytes:4;
u64 ptr_bytes:16;
u64 rsvd2:24;
u64 cqe_type:4;
u64 rsvd0:54;
u64 tcp_end_reason:2;
u64 tcp_status:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tcp_status:4;
u64 tcp_end_reason:2;
u64 rsvd0:54;
u64 cqe_type:4;
u64 rsvd2:24;
u64 ptr_bytes:16;
u64 align_ptr_bytes:4;
u64 align_hdr_bytes:4;
u64 rsvd1:4;
u64 tcp_pkt_cnt:12;
#endif
};
/* Always Big endian */
struct rx_hdr_t {
u64 opaque:32;
u64 rss_flow:8;
u64 skip_length:6;
u64 disable_rss:1;
u64 disable_tcp_reassembly:1;
u64 nodrop:1;
u64 dest_alg:2;
u64 rsvd0:2;
u64 dest_rq:11;
};
enum send_l4_csum_type {
SEND_L4_CSUM_DISABLE = 0x00,
SEND_L4_CSUM_UDP = 0x01,
SEND_L4_CSUM_TCP = 0x02,
SEND_L4_CSUM_SCTP = 0x03,
};
enum send_crc_alg {
SEND_CRCALG_CRC32 = 0x00,
SEND_CRCALG_CRC32C = 0x01,
SEND_CRCALG_ICRC = 0x02,
};
enum send_load_type {
SEND_LD_TYPE_LDD = 0x00,
SEND_LD_TYPE_LDT = 0x01,
SEND_LD_TYPE_LDWB = 0x02,
};
enum send_mem_alg_type {
SEND_MEMALG_SET = 0x00,
SEND_MEMALG_ADD = 0x08,
SEND_MEMALG_SUB = 0x09,
SEND_MEMALG_ADDLEN = 0x0A,
SEND_MEMALG_SUBLEN = 0x0B,
};
enum send_mem_dsz_type {
SEND_MEMDSZ_B64 = 0x00,
SEND_MEMDSZ_B32 = 0x01,
SEND_MEMDSZ_B8 = 0x03,
};
enum sq_subdesc_type {
SQ_DESC_TYPE_INVALID = 0x00,
SQ_DESC_TYPE_HEADER = 0x01,
SQ_DESC_TYPE_CRC = 0x02,
SQ_DESC_TYPE_IMMEDIATE = 0x03,
SQ_DESC_TYPE_GATHER = 0x04,
SQ_DESC_TYPE_MEMORY = 0x05,
};
struct sq_crc_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd1:32;
u64 crc_ival:32;
u64 subdesc_type:4;
u64 crc_alg:2;
u64 rsvd0:10;
u64 crc_insert_pos:16;
u64 hdr_start:16;
u64 crc_len:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 crc_len:16;
u64 hdr_start:16;
u64 crc_insert_pos:16;
u64 rsvd0:10;
u64 crc_alg:2;
u64 subdesc_type:4;
u64 crc_ival:32;
u64 rsvd1:32;
#endif
};
struct sq_gather_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 ld_type:2;
u64 rsvd0:42;
u64 size:16;
u64 rsvd1:15; /* W1 */
u64 addr:49;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 size:16;
u64 rsvd0:42;
u64 ld_type:2;
u64 subdesc_type:4; /* W0 */
u64 addr:49;
u64 rsvd1:15; /* W1 */
#endif
};
/* SQ immediate subdescriptor */
struct sq_imm_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 rsvd0:46;
u64 len:14;
u64 data:64; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 len:14;
u64 rsvd0:46;
u64 subdesc_type:4; /* W0 */
u64 data:64; /* W1 */
#endif
};
struct sq_mem_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 mem_alg:4;
u64 mem_dsz:2;
u64 wmem:1;
u64 rsvd0:21;
u64 offset:32;
u64 rsvd1:15; /* W1 */
u64 addr:49;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 offset:32;
u64 rsvd0:21;
u64 wmem:1;
u64 mem_dsz:2;
u64 mem_alg:4;
u64 subdesc_type:4; /* W0 */
u64 addr:49;
u64 rsvd1:15; /* W1 */
#endif
};
struct sq_hdr_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4;
u64 tso:1;
u64 post_cqe:1; /* Post CQE on no error also */
u64 dont_send:1;
u64 tstmp:1;
u64 subdesc_cnt:8;
u64 csum_l4:2;
u64 csum_l3:1;
u64 rsvd0:5;
u64 l4_offset:8;
u64 l3_offset:8;
u64 rsvd1:4;
u64 tot_len:20; /* W0 */
u64 tso_sdc_cont:8;
u64 tso_sdc_first:8;
u64 tso_l4_offset:8;
u64 tso_flags_last:12;
u64 tso_flags_first:12;
u64 rsvd2:2;
u64 tso_max_paysize:14; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tot_len:20;
u64 rsvd1:4;
u64 l3_offset:8;
u64 l4_offset:8;
u64 rsvd0:5;
u64 csum_l3:1;
u64 csum_l4:2;
u64 subdesc_cnt:8;
u64 tstmp:1;
u64 dont_send:1;
u64 post_cqe:1; /* Post CQE on no error also */
u64 tso:1;
u64 subdesc_type:4; /* W0 */
u64 tso_max_paysize:14;
u64 rsvd2:2;
u64 tso_flags_first:12;
u64 tso_flags_last:12;
u64 tso_l4_offset:8;
u64 tso_sdc_first:8;
u64 tso_sdc_cont:8; /* W1 */
#endif
};
/* Queue config register formats */
struct rq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_2_63:62;
u64 ena:1;
u64 tcp_ena:1;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tcp_ena:1;
u64 ena:1;
u64 reserved_2_63:62;
#endif
};
struct cq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_43_63:21;
u64 ena:1;
u64 reset:1;
u64 caching:1;
u64 reserved_35_39:5;
u64 qsize:3;
u64 reserved_25_31:7;
u64 avg_con:9;
u64 reserved_0_15:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 reserved_0_15:16;
u64 avg_con:9;
u64 reserved_25_31:7;
u64 qsize:3;
u64 reserved_35_39:5;
u64 caching:1;
u64 reset:1;
u64 ena:1;
u64 reserved_43_63:21;
#endif
};
struct sq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_20_63:44;
u64 ena:1;
u64 reserved_18_18:1;
u64 reset:1;
u64 ldwb:1;
u64 reserved_11_15:5;
u64 qsize:3;
u64 reserved_3_7:5;
u64 tstmp_bgx_intf:3;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tstmp_bgx_intf:3;
u64 reserved_3_7:5;
u64 qsize:3;
u64 reserved_11_15:5;
u64 ldwb:1;
u64 reset:1;
u64 reserved_18_18:1;
u64 ena:1;
u64 reserved_20_63:44;
#endif
};
struct rbdr_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_45_63:19;
u64 ena:1;
u64 reset:1;
u64 ldwb:1;
u64 reserved_36_41:6;
u64 qsize:4;
u64 reserved_25_31:7;
u64 avg_con:9;
u64 reserved_12_15:4;
u64 lines:12;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 lines:12;
u64 reserved_12_15:4;
u64 avg_con:9;
u64 reserved_25_31:7;
u64 qsize:4;
u64 reserved_36_41:6;
u64 ldwb:1;
u64 reset:1;
u64 ena: 1;
u64 reserved_45_63:19;
#endif
};
struct qs_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_32_63:32;
u64 ena:1;
u64 reserved_27_30:4;
u64 sq_ins_ena:1;
u64 sq_ins_pos:6;
u64 lock_ena:1;
u64 lock_viol_cqe_ena:1;
u64 send_tstmp_ena:1;
u64 be:1;
u64 reserved_7_15:9;
u64 vnic:7;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 vnic:7;
u64 reserved_7_15:9;
u64 be:1;
u64 send_tstmp_ena:1;
u64 lock_viol_cqe_ena:1;
u64 lock_ena:1;
u64 sq_ins_pos:6;
u64 sq_ins_ena:1;
u64 reserved_27_30:4;
u64 ena:1;
u64 reserved_32_63:32;
#endif
};
#endif /* Q_STRUCT_H */

1212
sys/dev/vnic/thunder_bgx.c Normal file

File diff suppressed because it is too large Load Diff

248
sys/dev/vnic/thunder_bgx.h Normal file
View File

@ -0,0 +1,248 @@
/*
* Copyright (C) 2015 Cavium Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef THUNDER_BGX_H
#define THUNDER_BGX_H
#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
#define MAX_BGX_PER_CN88XX 2
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
/* Registers */
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13)
#define CMR_PKT_RX_EN BIT_ULL(14)
#define CMR_EN BIT_ULL(15)
#define BGX_CMR_GLOBAL_CFG 0x08
#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
#define BGX_CMRX_RX_ID_MAP 0x60
#define BGX_CMRX_RX_STAT0 0x70
#define BGX_CMRX_RX_STAT1 0x78
#define BGX_CMRX_RX_STAT2 0x80
#define BGX_CMRX_RX_STAT3 0x88
#define BGX_CMRX_RX_STAT4 0x90
#define BGX_CMRX_RX_STAT5 0x98
#define BGX_CMRX_RX_STAT6 0xA0
#define BGX_CMRX_RX_STAT7 0xA8
#define BGX_CMRX_RX_STAT8 0xB0
#define BGX_CMRX_RX_STAT9 0xB8
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
#define RX_DMAC_COUNT 32
#define BGX_CMR_RX_STREERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
#define BGX_CMRX_TX_STAT3 0x618
#define BGX_CMRX_TX_STAT4 0x620
#define BGX_CMRX_TX_STAT5 0x628
#define BGX_CMRX_TX_STAT6 0x630
#define BGX_CMRX_TX_STAT7 0x638
#define BGX_CMRX_TX_STAT8 0x640
#define BGX_CMRX_TX_STAT9 0x648
#define BGX_CMRX_TX_STAT10 0x650
#define BGX_CMRX_TX_STAT11 0x658
#define BGX_CMRX_TX_STAT12 0x660
#define BGX_CMRX_TX_STAT13 0x668
#define BGX_CMRX_TX_STAT14 0x670
#define BGX_CMRX_TX_STAT15 0x678
#define BGX_CMRX_TX_STAT16 0x680
#define BGX_CMRX_TX_STAT17 0x688
#define BGX_CMR_TX_LMACS 0x1000
#define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER BIT_ULL(11)
#define SPU_CTL_LOOPBACK BIT_ULL(14)
#define SPU_CTL_RESET BIT_ULL(15)
#define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
#define BGX_SPUX_STATUS2 0x10020
#define SPU_STATUS2_RCVFLT BIT_ULL(10)
#define BGX_SPUX_BX_STATUS 0x10028
#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
#define BGX_SPUX_BR_STATUS1 0x10030
#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
#define BGX_SPUX_BR_PMD_CRTL 0x10068
#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
#define BGX_SPUX_BR_PMD_LD_REP 0x10090
#define BGX_SPUX_FEC_CONTROL 0x100A0
#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
#define BGX_SPUX_AN_CONTROL 0x100C8
#define SPU_AN_CTL_AN_EN BIT_ULL(12)
#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
#define BGX_SPUX_AN_ADV 0x100D8
#define BGX_SPUX_MISC_CONTROL 0x10218
#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
#define BGX_SPUX_INT_W1S 0x10228
#define BGX_SPUX_INT_ENA_W1C 0x10230
#define BGX_SPUX_INT_ENA_W1S 0x10238
#define BGX_SPU_DBG_CONTROL 0x10300
#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
#define BGX_SMUX_TX_APPEND 0x20100
#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
#define BGX_SMUX_TX_MIN_PKT 0x20118
#define BGX_SMUX_TX_INT 0x20140
#define BGX_SMUX_TX_CTL 0x20178
#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
#define BGX_SMUX_TX_THRESH 0x20180
#define BGX_SMUX_CTL 0x20200
#define SMU_CTL_RX_IDLE BIT_ULL(0)
#define SMU_CTL_TX_IDLE BIT_ULL(1)
#define BGX_GMP_PCS_MRX_CTL 0x30000
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define BGX_GMP_GMI_PRTX_CFG 0x38020
#define GMI_PORT_CFG_SPEED BIT_ULL(1)
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
#define BGX_GMP_GMI_TXX_SLOT 0x38220
#define BGX_GMP_GMI_TXX_BURST 0x38228
#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
#define BGX_MSIX_VEC_0_29_CTL 0x400008
#define BGX_MSIX_PBA_0 0x4F0000
/* MSI-X interrupts */
#define BGX_MSIX_VECTORS 30
#define BGX_LMAC_VEC_OFFSET 7
#define BGX_MSIX_VEC_SHIFT 4
#define CMRX_INT 0
#define SPUX_INT 1
#define SMUX_RX_INT 2
#define SMUX_TX_INT 3
#define GMPX_PCS_INT 4
#define GMPX_GMI_RX_INT 5
#define GMPX_GMI_TX_INT 6
#define CMR_MEM_INT 28
#define SPU_MEM_INT 29
#define LMAC_INTR_LINK_UP BIT(0)
#define LMAC_INTR_LINK_DOWN BIT(1)
/* RX_DMAC_CTL configuration*/
enum MCAST_MODE {
MCAST_MODE_REJECT,
MCAST_MODE_ACCEPT,
MCAST_MODE_CAM_FILTER,
RSVD
};
#define BCAST_ACCEPT 1
#define CAM_ACCEPT 1
void octeon_mdiobus_force_mod_depencency(void);
void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11
#define BGX_TX_STATS_COUNT 18
struct bgx_stats {
u64 rx_stats[BGX_RX_STATS_COUNT];
u64 tx_stats[BGX_TX_STATS_COUNT];
};
#define BGX_IN_PROMISCUOUS_MODE 1
enum LMAC_TYPE {
BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
};
enum qlm_mode {
QLM_MODE_SGMII, /* SGMII, each lane independent */
QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
};
#endif /* THUNDER_BGX_H */