Delta D2489 - Add SRIOV support to the Intel 10G driver.

NOTE: This is a technology preview, while it has undergone
      development testing, Intel has not yet completed full
      validation of the feature. It is being integrated for
      early access and customer testing.
This commit is contained in:
Jack F Vogel 2015-06-01 17:43:34 +00:00
parent 3012653750
commit 48056c88e1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=283883
7 changed files with 1382 additions and 180 deletions

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixv_driver_version[] = "1.2.5";
char ixv_driver_version[] = "1.4.0";
/*********************************************************************
* PCI Device ID Table
@ -151,6 +151,10 @@ MODULE_DEPEND(ixv, ether, 1, 1, 1);
** TUNEABLE PARAMETERS:
*/
/* Number of Queues - do not exceed MSIX vectors - 1 */
static int ixv_num_queues = 1;
TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
/*
** AIM: Adaptive Interrupt Moderation
** which means that the interrupt rate
@ -338,6 +342,11 @@ ixv_attach(device_t dev)
ixgbe_reset_hw(hw);
/* Get the Mailbox API version */
device_printf(dev,"MBX API %d negotiation: %d\n",
ixgbe_mbox_api_11,
ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
error = ixgbe_init_hw(hw);
if (error) {
device_printf(dev,"Hardware Initialization Failure\n");
@ -1313,10 +1322,13 @@ static int
ixv_setup_msix(struct adapter *adapter)
{
device_t dev = adapter->dev;
int rid, want;
int rid, want, msgs;
/* First try MSI/X */
/* Must have at least 2 MSIX vectors */
msgs = pci_msix_count(dev);
if (msgs < 2)
goto out;
rid = PCIR_BAR(3);
adapter->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
@ -1327,11 +1339,16 @@ ixv_setup_msix(struct adapter *adapter)
}
/*
** Want two vectors: one for a queue,
** Want vectors for the queues,
** plus an additional for mailbox.
*/
want = 2;
if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
want = adapter->num_queues + 1;
if (want > msgs) {
want = msgs;
adapter->num_queues = msgs - 1;
} else
msgs = want;
if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
device_printf(adapter->dev,
"Using MSIX interrupts with %d vectors\n", want);
return (want);
@ -1370,7 +1387,9 @@ ixv_allocate_pci_resources(struct adapter *adapter)
rman_get_bushandle(adapter->pci_mem);
adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
adapter->num_queues = 1;
/* Pick up the tuneable queues */
adapter->num_queues = ixv_num_queues;
adapter->hw.back = &adapter->osdep;
/*
@ -1591,32 +1610,41 @@ ixv_initialize_receive_units(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
struct ixgbe_hw *hw = &adapter->hw;
struct ifnet *ifp = adapter->ifp;
u32 bufsz, fctrl, rxcsum, hlreg;
struct ifnet *ifp = adapter->ifp;
u32 bufsz, rxcsum, psrtype;
int max_frame;
/* Enable broadcasts */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_BAM;
fctrl |= IXGBE_FCTRL_DPF;
fctrl |= IXGBE_FCTRL_PMCF;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
/* Set for Jumbo Frames? */
hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
if (ifp->if_mtu > ETHERMTU) {
hlreg |= IXGBE_HLREG0_JUMBOEN;
if (ifp->if_mtu > ETHERMTU)
bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
} else {
hlreg &= ~IXGBE_HLREG0_JUMBOEN;
else
bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
/* Tell PF our expected packet-size */
max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
ixgbevf_rlpml_set_vf(hw, max_frame);
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
u64 rdba = rxr->rxdma.dma_paddr;
u32 reg, rxdctl;
/* Disable the queue */
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
for (int j = 0; j < 10; j++) {
if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
IXGBE_RXDCTL_ENABLE)
msec_delay(1);
else
break;
}
wmb();
/* Setup the Base and Length of the Rx Descriptor Ring */
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
(rdba & 0x00000000ffffffffULL));
@ -1625,6 +1653,10 @@ ixv_initialize_receive_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
/* Reset the ring indices */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
/* Set up the SRRCTL register */
reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@ -1633,14 +1665,14 @@ ixv_initialize_receive_units(struct adapter *adapter)
reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
/* Setup the HW Rx Head and Tail Descriptor Pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
/* Set the Tail Pointer */
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
adapter->num_rx_desc - 1);
/* Set the processing limit */
rxr->process_limit = ixv_rx_process_limit;
/* Set Rx Tail register */
/* Capture Rx Tail index */
rxr->tail = IXGBE_VFRDT(rxr->me);
/* Do the queue enabling last */
@ -2033,9 +2065,7 @@ ixv_add_stats_sysctls(struct adapter *adapter)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets),
"TX Packets");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
CTLFLAG_RD, &(txr->bytes), 0,
"TX Bytes");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
CTLFLAG_RD, &(txr->no_desc_avail),
"# of times not enough descriptors were available during TX");

View File

@ -578,7 +578,6 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
struct ixgbe_tx_buf *txbuf;
int i;
#ifdef DEV_NETMAP
struct netmap_adapter *na = NA(adapter->ifp);
struct netmap_slot *slot;
@ -601,7 +600,7 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
/* Free any existing tx buffers. */
txbuf = txr->tx_buffers;
for (i = 0; i < txr->num_desc; i++, txbuf++) {
for (int i = 0; i < txr->num_desc; i++, txbuf++) {
if (txbuf->m_head != NULL) {
bus_dmamap_sync(txr->txtag, txbuf->map,
BUS_DMASYNC_POSTWRITE);
@ -622,7 +621,8 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
*/
if (slot) {
int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
netmap_load_map(na, txr->txtag,
txbuf->map, NMB(na, slot + si));
}
#endif /* DEV_NETMAP */
/* Clear the EOP descriptor pointer */
@ -777,8 +777,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
if (mp->m_flags & M_VLANTAG) {
vtag = htole16(mp->m_pkthdr.ether_vtag);
vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
}
else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
} else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
return (0);
/*
@ -1379,7 +1378,7 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
struct adapter *adapter = rxr->adapter;
device_t dev = adapter->dev;
struct ixgbe_rx_buf *rxbuf;
int i, bsize, error;
int bsize, error;
bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
if (!(rxr->rx_buffers =
@ -1406,7 +1405,7 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
goto fail;
}
for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
rxbuf = &rxr->rx_buffers[i];
error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
if (error) {
@ -1428,9 +1427,8 @@ static void
ixgbe_free_receive_ring(struct rx_ring *rxr)
{
struct ixgbe_rx_buf *rxbuf;
int i;
for (i = 0; i < rxr->num_desc; i++) {
for (int i = 0; i < rxr->num_desc; i++) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->buf != NULL) {
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
@ -2140,6 +2138,9 @@ ixgbe_allocate_queues(struct adapter *adapter)
struct rx_ring *rxr;
int rsize, tsize, error = IXGBE_SUCCESS;
int txconf = 0, rxconf = 0;
#ifdef PCI_IOV
enum ixgbe_iov_mode iov_mode;
#endif
/* First allocate the top level queue structs */
if (!(adapter->queues =
@ -2172,6 +2173,12 @@ ixgbe_allocate_queues(struct adapter *adapter)
tsize = roundup2(adapter->num_tx_desc *
sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
#ifdef PCI_IOV
iov_mode = ixgbe_get_iov_mode(adapter);
adapter->pool = ixgbe_max_vfs(iov_mode);
#else
adapter->pool = 0;
#endif
/*
* Now set up the TX queues, txconf is needed to handle the
* possibility that things fail midcourse and we need to
@ -2181,7 +2188,11 @@ ixgbe_allocate_queues(struct adapter *adapter)
/* Set up some basics */
txr = &adapter->tx_rings[i];
txr->adapter = adapter;
#ifdef PCI_IOV
txr->me = ixgbe_pf_que_index(iov_mode, i);
#else
txr->me = i;
#endif
txr->num_desc = adapter->num_tx_desc;
/* Initialize the TX side lock */
@ -2228,7 +2239,11 @@ ixgbe_allocate_queues(struct adapter *adapter)
rxr = &adapter->rx_rings[i];
/* Set up some basics */
rxr->adapter = adapter;
#ifdef PCI_IOV
rxr->me = ixgbe_pf_que_index(iov_mode, i);
#else
rxr->me = i;
#endif
rxr->num_desc = adapter->num_rx_desc;
/* Initialize the RX side lock */

View File

@ -92,11 +92,21 @@
#include <machine/smp.h>
#include <sys/sbuf.h>
#ifdef PCI_IOV
#include <sys/nv.h>
#include <sys/iov_schema.h>
#endif
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#include "ixgbe_vf.h"
#ifdef PCI_IOV
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
#endif
/* Tunables */
/*
@ -244,6 +254,29 @@
(_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
(_adapter->hw.mac.type == ixgbe_mac_82599_vf))
#ifdef PCI_IOV
#define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
#define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
#define IXGBE_VT_MSG_MASK 0xFFFF
#define IXGBE_VT_MSGINFO(msg) \
(((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
#define IXGBE_VF_GET_QUEUES_RESP_LEN 5
#define IXGBE_API_VER_1_0 0
#define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
#define IXGBE_API_VER_1_1 2
#define IXGBE_API_VER_UNKNOWN UINT16_MAX
enum ixgbe_iov_mode {
IXGBE_64_VM,
IXGBE_32_VM,
IXGBE_NO_VM
};
#endif /* PCI_IOV */
/*
*****************************************************************************
@ -262,6 +295,7 @@ typedef struct _ixgbe_vendor_info_t {
unsigned int index;
} ixgbe_vendor_info_t;
struct ixgbe_tx_buf {
union ixgbe_adv_tx_desc *eop;
struct mbuf *m_head;
@ -290,6 +324,11 @@ struct ixgbe_dma_alloc {
int dma_nseg;
};
struct ixgbe_mc_addr {
u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
u32 vmdq;
};
/*
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
@ -387,6 +426,28 @@ struct rx_ring {
#endif
};
#ifdef PCI_IOV
#define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
#define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
#define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
#define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
#define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
struct ixgbe_vf {
u_int pool;
u_int rar_index;
u_int max_frame_size;
uint32_t flags;
uint8_t ether_addr[ETHER_ADDR_LEN];
uint16_t mc_hash[IXGBE_MAX_VF_MC];
uint16_t num_mc_hashes;
uint16_t default_vlan;
uint16_t vlan_tag;
uint16_t api_ver;
};
#endif /* PCI_IOV */
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
@ -438,8 +499,8 @@ struct adapter {
bool link_up;
u32 vector;
u16 dmac;
bool eee_support;
bool eee_enabled;
u32 phy_layer;
/* Power management-related */
bool wol_support;
@ -453,6 +514,9 @@ struct adapter {
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber */
#ifdef PCI_IOV
struct task mbx_task; /* VF -> PF mailbox interrupt */
#endif /* PCI_IOV */
#ifdef IXGBE_FDIR
int fdir_reinit;
struct task fdir_task;
@ -484,8 +548,12 @@ struct adapter {
u32 num_rx_desc;
/* Multicast array memory */
u8 *mta;
struct ixgbe_mc_addr *mta;
int num_vfs;
int pool;
#ifdef PCI_IOV
struct ixgbe_vf *vfs;
#endif
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
@ -671,4 +739,150 @@ bool ixgbe_rxeof(struct ix_queue *);
int ixgbe_dma_malloc(struct adapter *,
bus_size_t, struct ixgbe_dma_alloc *, int);
void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
#ifdef PCI_IOV
static inline boolean_t
ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
{
return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
}
static inline void
ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
{
if (vf->flags & IXGBE_VF_CTS)
msg |= IXGBE_VT_MSGTYPE_CTS;
ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
}
static inline void
ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
{
msg &= IXGBE_VT_MSG_MASK;
ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
}
static inline void
ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
{
msg &= IXGBE_VT_MSG_MASK;
ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
}
static inline void
ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
{
if (!(vf->flags & IXGBE_VF_CTS))
ixgbe_send_vf_nack(adapter, vf, 0);
}
static inline enum ixgbe_iov_mode
ixgbe_get_iov_mode(struct adapter *adapter)
{
if (adapter->num_vfs == 0)
return (IXGBE_NO_VM);
if (adapter->num_queues <= 2)
return (IXGBE_64_VM);
else if (adapter->num_queues <= 4)
return (IXGBE_32_VM);
else
return (IXGBE_NO_VM);
}
static inline u16
ixgbe_max_vfs(enum ixgbe_iov_mode mode)
{
/*
* We return odd numbers below because we
* reserve 1 VM's worth of queues for the PF.
*/
switch (mode) {
case IXGBE_64_VM:
return (63);
case IXGBE_32_VM:
return (31);
case IXGBE_NO_VM:
default:
return (0);
}
}
static inline int
ixgbe_vf_queues(enum ixgbe_iov_mode mode)
{
switch (mode) {
case IXGBE_64_VM:
return (2);
case IXGBE_32_VM:
return (4);
case IXGBE_NO_VM:
default:
return (0);
}
}
static inline int
ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
{
return ((vfnum * ixgbe_vf_queues(mode)) + num);
}
static inline int
ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
{
return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
}
static inline void
ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
{
if (adapter->max_frame_size < max_frame)
adapter->max_frame_size = max_frame;
}
static inline u32
ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
{
u32 mrqc = 0;
switch (mode) {
case IXGBE_64_VM:
mrqc = IXGBE_MRQC_VMDQRSS64EN;
break;
case IXGBE_32_VM:
mrqc = IXGBE_MRQC_VMDQRSS32EN;
break;
case IXGBE_NO_VM:
mrqc = 0;
break;
default:
panic("Unexpected SR-IOV mode %d", mode);
}
return(mrqc);
}
static inline u32
ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
{
uint32_t mtqc = 0;
switch (mode) {
case IXGBE_64_VM:
mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
break;
case IXGBE_32_VM:
mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
break;
case IXGBE_NO_VM:
mtqc = IXGBE_MTQC_64Q_1PB;
break;
default:
panic("Unexpected SR-IOV mode %d", mode);
}
return(mtqc);
}
#endif /* PCI_IOV */
#endif /* _IXGBE_H_ */

View File

@ -80,6 +80,21 @@
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
@ -106,6 +121,18 @@
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
/* mailbox API, version 2.0 VF requests */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
/* mailbox API, version 2.0 PF requests */
#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */

View File

@ -185,6 +185,8 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
DEBUGOUT("Issuing a function level reset to MAC\n");
@ -666,6 +668,57 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc);
return IXGBE_SUCCESS;
int err;
u32 msg[5];
/* do nothing if API doesn't support ixgbevf_get_queues */
switch (hw->api_version) {
case ixgbe_mbox_api_11:
break;
default:
return 0;
}
/* Fetch queue configuration from the PF */
msg[0] = IXGBE_VF_GET_QUEUES;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
if (!err)
err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/*
* if we we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_MBX;
/* record and validate values from message */
hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
if (hw->mac.max_tx_queues == 0 ||
hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
if (hw->mac.max_rx_queues == 0 ||
hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
/* in case of unknown state assume we cannot tag frames */
if (*num_tcs > hw->mac.max_rx_queues)
*num_tcs = 1;
*default_tc = msg[IXGBE_VF_DEF_QUEUE];
/* default to queue 0 on out-of-bounds queue number */
if (*default_tc >= hw->mac.max_tx_queues)
*default_tc = 0;
}
return err;
}

View File

@ -7,9 +7,8 @@ SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ixv.c ix_txrx.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c
SRCS += ixgbe_dcb.c ixgbe_mbx.c ixgbe_vf.c
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP
.include <bsd.kmod.mk>