cxgbe(4): netmap support for Terminator 5 (T5) based 10G/40G cards.

Netmap gets its own hardware-assisted virtual interface and won't take
over or disrupt the "normal" interface in any way.  You can use both
simultaneously.

For kernels with DEV_NETMAP, cxgbe(4) carves out an ncxl<N> interface
(note the 'n' prefix) in the hardware to accompany each cxl<N>
interface.  These two ifnet's per port share the same wire but really
are separate interfaces in the hardware and software.  Each gets its own
L2 MAC addresses (unicast and multicast), MTU, checksum caps, etc.  You
should run netmap on the 'n' interfaces only, that's what they are for.

With this, pkt-gen is able to transmit > 45Mpps out of a single 40G port
of a T580 card.  2 port tx is at ~56Mpps total (28M + 28M) as of now.
Single port receive is at 33Mpps but this is very much a work in
progress.  I expect it to be closer to 40Mpps once done.  In any case
the current effort can already saturate multiple 10G ports of a T5 card
at the smallest legal packet size.  T4 gear is totally untested.

trantor:~# ./pkt-gen -i ncxl0 -f tx -D 00:07:43🆎cd:ef
881.952141 main [1621] interface is ncxl0
881.952250 extract_ip_range [275] range is 10.0.0.1:0 to 10.0.0.1:0
881.952253 extract_ip_range [275] range is 10.1.0.1:0 to 10.1.0.1:0
881.962540 main [1804] mapped 334980KB at 0x801dff000
Sending on netmap:ncxl0: 4 queues, 1 threads and 1 cpus.
10.0.0.1 -> 10.1.0.1 (00:00:00:00:00:00 -> 00:07:43🆎cd:ef)
881.962562 main [1882] Sending 512 packets every  0.000000000 s
881.962563 main [1884] Wait 2 secs for phy reset
884.088516 main [1886] Ready...
884.088535 nm_open [457] overriding ifname ncxl0 ringid 0x0 flags 0x1
884.088607 sender_body [996] start
884.093246 sender_body [1064] drop copy
885.090435 main_thread [1418] 45206353 pps (45289533 pkts in 1001840 usec)
886.091600 main_thread [1418] 45322792 pps (45375593 pkts in 1001165 usec)
887.092435 main_thread [1418] 45313992 pps (45351784 pkts in 1000834 usec)
888.094434 main_thread [1418] 45315765 pps (45406397 pkts in 1002000 usec)
889.095434 main_thread [1418] 45333218 pps (45378551 pkts in 1001000 usec)
890.097434 main_thread [1418] 45315247 pps (45405877 pkts in 1002000 usec)
891.099434 main_thread [1418] 45326515 pps (45417168 pkts in 1002000 usec)
892.101434 main_thread [1418] 45333039 pps (45423705 pkts in 1002000 usec)
893.103434 main_thread [1418] 45324105 pps (45414708 pkts in 1001999 usec)
894.105434 main_thread [1418] 45318042 pps (45408723 pkts in 1002001 usec)
895.106434 main_thread [1418] 45332430 pps (45377762 pkts in 1001000 usec)
896.107434 main_thread [1418] 45338072 pps (45383410 pkts in 1001000 usec)
...

Relnotes:	Yes
Sponsored by:	Chelsio Communications.
This commit is contained in:
Navdeep Parhar 2014-05-27 18:18:41 +00:00
parent e23dd3b2d2
commit 298d969c53
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=266757
8 changed files with 1905 additions and 261 deletions

View File

@ -1127,6 +1127,8 @@ dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgbe/t4_main.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_netmap.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_sge.c optional cxgbe pci \
compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cxgbe/t4_l2t.c optional cxgbe pci \

View File

@ -54,6 +54,7 @@
#include <netinet/tcp_lro.h>
#include "offload.h"
#include "common/t4_msg.h"
#include "firmware/t4fw_interface.h"
MALLOC_DECLARE(M_CXGBE);
@ -131,6 +132,7 @@ enum {
RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
EQ_ESIZE = 64, /* All egress queues use this entry size */
SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
RX_FL_ESIZE = EQ_ESIZE, /* 8 64bit addresses */
#if MJUMPAGESIZE != MCLBYTES
@ -154,6 +156,17 @@ enum {
INTR_MSIX = (1 << 2)
};
enum {
XGMAC_MTU = (1 << 0),
XGMAC_PROMISC = (1 << 1),
XGMAC_ALLMULTI = (1 << 2),
XGMAC_VLANEX = (1 << 3),
XGMAC_UCADDR = (1 << 4),
XGMAC_MCADDRS = (1 << 5),
XGMAC_ALL = 0xffff
};
enum {
/* flags understood by begin_synchronized_op */
HOLD_LOCK = (1 << 0),
@ -168,7 +181,7 @@ enum {
/* adapter flags */
FULL_INIT_DONE = (1 << 0),
FW_OK = (1 << 1),
INTR_DIRECT = (1 << 2), /* direct interrupts for everything */
/* INTR_DIRECT = (1 << 2), No longer used. */
MASTER_PF = (1 << 3),
ADAP_SYSCTL_CTX = (1 << 4),
TOM_INIT_DONE = (1 << 5),
@ -181,6 +194,10 @@ enum {
PORT_INIT_DONE = (1 << 1),
PORT_SYSCTL_CTX = (1 << 2),
HAS_TRACEQ = (1 << 3),
INTR_RXQ = (1 << 4), /* All NIC rxq's take interrupts */
INTR_OFLD_RXQ = (1 << 5), /* All TOE rxq's take interrupts */
INTR_NM_RXQ = (1 << 6), /* All netmap rxq's take interrupts */
INTR_ALL = (INTR_RXQ | INTR_OFLD_RXQ | INTR_NM_RXQ),
};
#define IS_DOOMED(pi) ((pi)->flags & DOOMED)
@ -224,6 +241,19 @@ struct port_info {
int first_ofld_txq; /* index of first offload tx queue */
int nofldrxq; /* # of offload rx queues */
int first_ofld_rxq; /* index of first offload rx queue */
#endif
#ifdef DEV_NETMAP
int nnmtxq; /* # of netmap tx queues */
int first_nm_txq; /* index of first netmap tx queue */
int nnmrxq; /* # of netmap rx queues */
int first_nm_rxq; /* index of first netmap rx queue */
struct ifnet *nm_ifp;
struct ifmedia nm_media;
int nmif_flags;
uint16_t nm_viid;
int16_t nm_xact_addr_filt;
uint16_t nm_rss_size; /* size of netmap VI's RSS table slice */
#endif
int tmr_idx;
int pktc_idx;
@ -362,7 +392,7 @@ struct sge_eq {
struct tx_desc *desc; /* KVA of descriptor ring */
bus_addr_t ba; /* bus address of descriptor ring */
struct sge_qstat *spg; /* status page, for convenience */
int doorbells;
uint16_t doorbells;
volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
u_int udb_qid; /* relative qid within the doorbell page */
uint16_t cap; /* max # of desc, for convenience */
@ -538,6 +568,77 @@ struct sge_wrq {
uint32_t no_desc; /* out of hardware descriptors */
} __aligned(CACHE_LINE_SIZE);
#ifdef DEV_NETMAP
#define CPL_PAD (RX_IQ_ESIZE - sizeof(struct rsp_ctrl) - \
sizeof(struct rss_header))
struct nm_iq_desc {
struct rss_header rss;
union {
uint8_t cpl[CPL_PAD];
struct cpl_fw6_msg fw6_msg;
struct cpl_rx_pkt rx_pkt;
} u;
struct rsp_ctrl rsp;
};
CTASSERT(sizeof(struct nm_iq_desc) == RX_IQ_ESIZE);
struct sge_nm_rxq {
struct port_info *pi;
struct nm_iq_desc *iq_desc;
uint16_t iq_abs_id;
uint16_t iq_cntxt_id;
uint16_t iq_cidx;
uint16_t iq_sidx;
uint8_t iq_gen;
__be64 *fl_desc;
uint16_t fl_cntxt_id;
uint32_t fl_cidx;
uint32_t fl_pidx;
uint32_t fl_sidx;
uint32_t fl_db_val;
u_int fl_hwidx:4;
u_int nid; /* netmap ring # for this queue */
/* infrequently used items after this */
bus_dma_tag_t iq_desc_tag;
bus_dmamap_t iq_desc_map;
bus_addr_t iq_ba;
int intr_idx;
bus_dma_tag_t fl_desc_tag;
bus_dmamap_t fl_desc_map;
bus_addr_t fl_ba;
} __aligned(CACHE_LINE_SIZE);
struct sge_nm_txq {
struct tx_desc *desc;
uint16_t cidx;
uint16_t pidx;
uint16_t sidx;
uint16_t equiqidx; /* EQUIQ last requested at this pidx */
uint16_t equeqidx; /* EQUEQ last requested at this pidx */
uint16_t dbidx; /* pidx of the most recent doorbell */
uint16_t doorbells;
volatile uint32_t *udb;
u_int udb_qid;
u_int cntxt_id;
__be32 cpl_ctrl0; /* for convenience */
u_int nid; /* netmap ring # for this queue */
/* infrequently used items after this */
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
bus_addr_t ba;
int iqidx;
} __aligned(CACHE_LINE_SIZE);
#endif
struct sge {
int timer_val[SGE_NTIMERS];
int counter_val[SGE_NCOUNTERS];
@ -551,6 +652,10 @@ struct sge {
#ifdef TCP_OFFLOAD
int nofldrxq; /* total # of TOE rx queues */
int nofldtxq; /* total # of TOE tx queues */
#endif
#ifdef DEV_NETMAP
int nnmrxq; /* total # of netmap rx queues */
int nnmtxq; /* total # of netmap tx queues */
#endif
int niq; /* total # of ingress queues */
int neq; /* total # of egress queues */
@ -564,6 +669,10 @@ struct sge {
struct sge_wrq *ofld_txq; /* TOE tx queues */
struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */
#endif
#ifdef DEV_NETMAP
struct sge_nm_txq *nm_txq; /* netmap tx queues */
struct sge_nm_rxq *nm_rxq; /* netmap rx queues */
#endif
uint16_t iq_start;
int eq_start;
@ -629,7 +738,7 @@ struct adapter {
struct l2t_data *l2t; /* L2 table */
struct tid_info tids;
int doorbells;
uint16_t doorbells;
int open_device_map;
#ifdef TCP_OFFLOAD
int offload_map;
@ -730,6 +839,12 @@ struct adapter {
#define for_each_ofld_rxq(pi, iter, q) \
for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
iter < pi->nofldrxq; ++iter, ++q)
#define for_each_nm_txq(pi, iter, q) \
for (q = &pi->adapter->sge.nm_txq[pi->first_nm_txq], iter = 0; \
iter < pi->nnmtxq; ++iter, ++q)
#define for_each_nm_rxq(pi, iter, q) \
for (q = &pi->adapter->sge.nm_rxq[pi->first_nm_rxq], iter = 0; \
iter < pi->nnmrxq; ++iter, ++q)
/* One for errors, one for firmware events */
#define T4_EXTRA_INTR 2
@ -854,6 +969,18 @@ int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
void end_synchronized_op(struct adapter *, int);
int update_mac_settings(struct ifnet *, int);
int adapter_full_init(struct adapter *);
int adapter_full_uninit(struct adapter *);
int port_full_init(struct port_info *);
int port_full_uninit(struct port_info *);
#ifdef DEV_NETMAP
/* t4_netmap.c */
int create_netmap_ifnet(struct port_info *);
int destroy_netmap_ifnet(struct port_info *);
void t4_nm_intr(void *);
#endif
/* t4_sge.c */
void t4_sge_modload(void);

View File

@ -561,11 +561,11 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int exactf, unsigned int rcaps, unsigned int wxcaps);
int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
unsigned int port, unsigned int pf, unsigned int vf,
unsigned int nmac, u8 *mac, unsigned int *rss_size,
unsigned int nmac, u8 *mac, u16 *rss_size,
unsigned int portfunc, unsigned int idstype);
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
u16 *rss_size);
int t4_free_vi(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int viid);
@ -581,6 +581,8 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en);
int t4_enable_vi_ns(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en);
int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
unsigned int nblinks);
int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,

View File

@ -4826,7 +4826,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
*/
int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
unsigned int port, unsigned int pf, unsigned int vf,
unsigned int nmac, u8 *mac, unsigned int *rss_size,
unsigned int nmac, u8 *mac, u16 *rss_size,
unsigned int portfunc, unsigned int idstype)
{
int ret;
@ -4881,7 +4881,7 @@ int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size)
u16 *rss_size)
{
return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
FW_VI_FUNC_ETH, 0);
@ -5155,6 +5155,19 @@ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
int t4_enable_vi_ns(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en)
{
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_identify_port - identify a VI's port by blinking its LED
* @adap: the adapter
@ -5623,7 +5636,7 @@ int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
u8 addr[6];
int ret, i, j;
struct fw_port_cmd c;
unsigned int rss_size;
u16 rss_size;
adapter_t *adap = p->adapter;
memset(&c, 0, sizeof(c));

View File

@ -218,6 +218,24 @@ static int t4_nofldrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
#endif
#ifdef DEV_NETMAP
#define NNMTXQ_10G 2
static int t4_nnmtxq10g = -1;
TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
#define NNMRXQ_10G 2
static int t4_nnmrxq10g = -1;
TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
#define NNMTXQ_1G 1
static int t4_nnmtxq1g = -1;
TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
#define NNMRXQ_1G 1
static int t4_nnmrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
#endif
/*
* Holdoff parameters for 10G and 1G ports.
*/
@ -295,19 +313,26 @@ static int t5_write_combine = 0;
TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
struct intrs_and_queues {
int intr_type; /* INTx, MSI, or MSI-X */
int nirq; /* Number of vectors */
int intr_flags;
int ntxq10g; /* # of NIC txq's for each 10G port */
int nrxq10g; /* # of NIC rxq's for each 10G port */
int ntxq1g; /* # of NIC txq's for each 1G port */
int nrxq1g; /* # of NIC rxq's for each 1G port */
int rsrv_noflowq; /* Flag whether to reserve queue 0 */
uint16_t intr_type; /* INTx, MSI, or MSI-X */
uint16_t nirq; /* Total # of vectors */
uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */
uint16_t ntxq10g; /* # of NIC txq's for each 10G port */
uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */
uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
#ifdef TCP_OFFLOAD
int nofldtxq10g; /* # of TOE txq's for each 10G port */
int nofldrxq10g; /* # of TOE rxq's for each 10G port */
int nofldtxq1g; /* # of TOE txq's for each 1G port */
int nofldrxq1g; /* # of TOE rxq's for each 1G port */
uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
#endif
#ifdef DEV_NETMAP
uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */
uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */
uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */
uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */
#endif
};
@ -321,17 +346,6 @@ struct filter_entry {
struct t4_filter_specification fs;
};
enum {
XGMAC_MTU = (1 << 0),
XGMAC_PROMISC = (1 << 1),
XGMAC_ALLMULTI = (1 << 2),
XGMAC_VLANEX = (1 << 3),
XGMAC_UCADDR = (1 << 4),
XGMAC_MCADDRS = (1 << 5),
XGMAC_ALL = 0xffff
};
static int map_bars_0_and_4(struct adapter *);
static int map_bar_2(struct adapter *);
static void setup_memwin(struct adapter *);
@ -350,15 +364,10 @@ static int get_params__pre_init(struct adapter *);
static int get_params__post_init(struct adapter *);
static int set_params__post_init(struct adapter *);
static void t4_set_desc(struct adapter *);
static void build_medialist(struct port_info *);
static int update_mac_settings(struct port_info *, int);
static void build_medialist(struct port_info *, struct ifmedia *);
static int cxgbe_init_synchronized(struct port_info *);
static int cxgbe_uninit_synchronized(struct port_info *);
static int setup_intr_handlers(struct adapter *);
static int adapter_full_init(struct adapter *);
static int adapter_full_uninit(struct adapter *);
static int port_full_init(struct port_info *);
static int port_full_uninit(struct port_info *);
static void quiesce_eq(struct adapter *, struct sge_eq *);
static void quiesce_iq(struct adapter *, struct sge_iq *);
static void quiesce_fl(struct adapter *, struct sge_fl *);
@ -555,6 +564,9 @@ t4_attach(device_t dev)
struct sge *s;
#ifdef TCP_OFFLOAD
int ofld_rqidx, ofld_tqidx;
#endif
#ifdef DEV_NETMAP
int nm_rqidx, nm_tqidx;
#endif
const char *pcie_ts;
@ -685,6 +697,13 @@ t4_attach(device_t dev)
sc->port[i] = NULL;
goto done;
}
rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
if (rc != 0) {
device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
free(pi, M_CXGBE);
sc->port[i] = NULL;
goto done;
}
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
device_get_nameunit(dev), i);
@ -726,7 +745,6 @@ t4_attach(device_t dev)
sc->intr_type = iaq.intr_type;
sc->intr_count = iaq.nirq;
sc->flags |= iaq.intr_flags;
s = &sc->sge;
s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
@ -734,10 +752,8 @@ t4_attach(device_t dev)
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
s->neq += s->nofldtxq + s->nofldrxq;
@ -749,6 +765,17 @@ t4_attach(device_t dev)
M_CXGBE, M_ZERO | M_WAITOK);
}
#endif
#ifdef DEV_NETMAP
s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
s->neq += s->nnmtxq + s->nnmrxq;
s->niq += s->nnmrxq;
s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
M_CXGBE, M_ZERO | M_WAITOK);
s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
M_CXGBE, M_ZERO | M_WAITOK);
#endif
s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
M_ZERO | M_WAITOK);
@ -773,6 +800,9 @@ t4_attach(device_t dev)
rqidx = tqidx = 0;
#ifdef TCP_OFFLOAD
ofld_rqidx = ofld_tqidx = 0;
#endif
#ifdef DEV_NETMAP
nm_rqidx = nm_tqidx = 0;
#endif
for_each_port(sc, i) {
struct port_info *pi = sc->port[i];
@ -783,9 +813,11 @@ t4_attach(device_t dev)
pi->first_rxq = rqidx;
pi->first_txq = tqidx;
if (is_10G_port(pi) || is_40G_port(pi)) {
pi->flags |= iaq.intr_flags_10g;
pi->nrxq = iaq.nrxq10g;
pi->ntxq = iaq.ntxq10g;
} else {
pi->flags |= iaq.intr_flags_1g;
pi->nrxq = iaq.nrxq1g;
pi->ntxq = iaq.ntxq1g;
}
@ -797,7 +829,6 @@ t4_attach(device_t dev)
rqidx += pi->nrxq;
tqidx += pi->ntxq;
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
pi->first_ofld_rxq = ofld_rqidx;
@ -812,6 +843,19 @@ t4_attach(device_t dev)
ofld_rqidx += pi->nofldrxq;
ofld_tqidx += pi->nofldtxq;
}
#endif
#ifdef DEV_NETMAP
pi->first_nm_rxq = nm_rqidx;
pi->first_nm_txq = nm_tqidx;
if (is_10G_port(pi) || is_40G_port(pi)) {
pi->nnmrxq = iaq.nnmrxq10g;
pi->nnmtxq = iaq.nnmtxq10g;
} else {
pi->nnmrxq = iaq.nnmrxq1g;
pi->nnmtxq = iaq.nnmtxq1g;
}
nm_rqidx += pi->nnmrxq;
nm_tqidx += pi->nnmtxq;
#endif
}
@ -902,7 +946,7 @@ t4_detach(device_t dev)
for (i = 0; i < MAX_NPORTS; i++) {
pi = sc->port[i];
if (pi) {
t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid);
if (pi->dev)
device_delete_child(dev, pi->dev);
@ -938,6 +982,10 @@ t4_detach(device_t dev)
#ifdef TCP_OFFLOAD
free(sc->sge.ofld_rxq, M_CXGBE);
free(sc->sge.ofld_txq, M_CXGBE);
#endif
#ifdef DEV_NETMAP
free(sc->sge.nm_rxq, M_CXGBE);
free(sc->sge.nm_txq, M_CXGBE);
#endif
free(sc->irq, M_CXGBE);
free(sc->sge.rxq, M_CXGBE);
@ -966,7 +1014,6 @@ t4_detach(device_t dev)
return (0);
}
static int
cxgbe_probe(device_t dev)
{
@ -989,6 +1036,8 @@ cxgbe_attach(device_t dev)
{
struct port_info *pi = device_get_softc(dev);
struct ifnet *ifp;
char *s;
int n, o;
/* Allocate an ifnet and set it up */
ifp = if_alloc(IFT_ETHER);
@ -1021,22 +1070,39 @@ cxgbe_attach(device_t dev)
/* Initialize ifmedia for this port */
ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
build_medialist(pi);
build_medialist(pi, &pi->media);
pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
EVENTHANDLER_PRI_ANY);
ether_ifattach(ifp, pi->hw_addr);
n = 128;
s = malloc(n, M_CXGBE, M_WAITOK);
o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq);
MPASS(n > o);
#ifdef TCP_OFFLOAD
if (is_offload(pi->adapter)) {
device_printf(dev,
"%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
} else
o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)",
pi->nofldtxq, pi->nofldrxq);
MPASS(n > o);
}
#endif
device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
#ifdef DEV_NETMAP
o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq,
pi->nnmrxq);
MPASS(n > o);
#endif
device_printf(dev, "%s\n", s);
free(s, M_CXGBE);
#ifdef DEV_NETMAP
/* nm_media handled here to keep implementation private to this file */
ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
build_medialist(pi, &pi->nm_media);
create_netmap_ifnet(pi); /* logs errors it something fails */
#endif
cxgbe_sysctls(pi);
return (0);
@ -1084,6 +1150,11 @@ cxgbe_detach(device_t dev)
ether_ifdetach(pi->ifp);
if_free(pi->ifp);
#ifdef DEV_NETMAP
/* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */
destroy_netmap_ifnet(pi);
#endif
ADAPTER_LOCK(sc);
CLR_BUSY(sc);
wakeup(&sc->flags);
@ -1126,7 +1197,7 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
if (pi->flags & PORT_INIT_DONE) {
t4_update_fl_bufsize(ifp);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(pi, XGMAC_MTU);
rc = update_mac_settings(ifp, XGMAC_MTU);
}
end_synchronized_op(sc, 0);
break;
@ -1141,7 +1212,7 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
flags = pi->if_flags;
if ((ifp->if_flags ^ flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
rc = update_mac_settings(pi,
rc = update_mac_settings(ifp,
XGMAC_PROMISC | XGMAC_ALLMULTI);
}
} else
@ -1152,13 +1223,13 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
end_synchronized_op(sc, 0);
break;
case SIOCADDMULTI:
case SIOCADDMULTI:
case SIOCDELMULTI: /* these two are called with a mutex held :-( */
rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
if (rc)
return (rc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(pi, XGMAC_MCADDRS);
rc = update_mac_settings(ifp, XGMAC_MCADDRS);
end_synchronized_op(sc, LOCK_HELD);
break;
@ -1247,7 +1318,7 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
if (mask & IFCAP_VLAN_HWTAGGING) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(pi, XGMAC_VLANEX);
rc = update_mac_settings(ifp, XGMAC_VLANEX);
}
if (mask & IFCAP_VLAN_MTU) {
ifp->if_capenable ^= IFCAP_VLAN_MTU;
@ -1382,13 +1453,23 @@ static void
cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct port_info *pi = ifp->if_softc;
struct ifmedia_entry *cur = pi->media.ifm_cur;
struct ifmedia *media = NULL;
struct ifmedia_entry *cur;
int speed = pi->link_cfg.speed;
int data = (pi->port_type << 8) | pi->mod_type;
if (ifp == pi->ifp)
media = &pi->media;
#ifdef DEV_NETMAP
else if (ifp == pi->nm_ifp)
media = &pi->nm_media;
#endif
MPASS(media != NULL);
cur = media->ifm_cur;
if (cur->ifm_data != data) {
build_medialist(pi);
cur = pi->media.ifm_cur;
build_medialist(pi, media);
cur = media->ifm_cur;
}
ifmr->ifm_status = IFM_AVALID;
@ -1741,6 +1822,7 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
{
int rc, itype, navail, nrxq10g, nrxq1g, n;
int nofldrxq10g = 0, nofldrxq1g = 0;
int nnmrxq10g = 0, nnmrxq1g = 0;
bzero(iaq, sizeof(*iaq));
@ -1757,6 +1839,12 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
}
#endif
#ifdef DEV_NETMAP
iaq->nnmtxq10g = t4_nnmtxq10g;
iaq->nnmtxq1g = t4_nnmtxq1g;
iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
#endif
for (itype = INTR_MSIX; itype; itype >>= 1) {
@ -1774,30 +1862,60 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
continue;
iaq->intr_type = itype;
iaq->intr_flags = 0;
iaq->intr_flags_10g = 0;
iaq->intr_flags_1g = 0;
/*
* Best option: an interrupt vector for errors, one for the
* firmware event queue, and one each for each rxq (NIC as well
* as offload).
* firmware event queue, and one for every rxq (NIC, TOE, and
* netmap).
*/
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq))) {
iaq->intr_flags |= INTR_DIRECT;
iaq->intr_flags_10g = INTR_ALL;
iaq->intr_flags_1g = INTR_ALL;
goto allocate;
}
/*
* Second best option: an interrupt vector for errors, one for
* the firmware event queue, and one each for either NIC or
* offload rxq's.
* Second best option: a vector for errors, one for the firmware
* event queue, and vectors for either all the NIC rx queues or
* all the TOE rx queues. The queues that don't get vectors
* will forward their interrupts to those that do.
*
* Note: netmap rx queues cannot be created early and so they
* can't be setup to receive forwarded interrupts for others.
*/
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
if (nrxq10g >= nofldrxq10g) {
iaq->intr_flags_10g = INTR_RXQ;
iaq->nirq += n10g * nrxq10g;
#ifdef DEV_NETMAP
iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
#endif
} else {
iaq->intr_flags_10g = INTR_OFLD_RXQ;
iaq->nirq += n10g * nofldrxq10g;
#ifdef DEV_NETMAP
iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
#endif
}
if (nrxq1g >= nofldrxq1g) {
iaq->intr_flags_1g = INTR_RXQ;
iaq->nirq += n1g * nrxq1g;
#ifdef DEV_NETMAP
iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
#endif
} else {
iaq->intr_flags_1g = INTR_OFLD_RXQ;
iaq->nirq += n1g * nofldrxq1g;
#ifdef DEV_NETMAP
iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
#endif
}
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq)))
goto allocate;
@ -1805,8 +1923,8 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
/*
* Next best option: an interrupt vector for errors, one for the
* firmware event queue, and at least one per port. At this
* point we know we'll have to downsize nrxq or nofldrxq to fit
* what's available to us.
* point we know we'll have to downsize nrxq and/or nofldrxq
* and/or nnmrxq to fit what's available to us.
*/
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += n10g + n1g;
@ -1816,6 +1934,9 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
if (n10g > 0) {
int target = max(nrxq10g, nofldrxq10g);
iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
INTR_RXQ : INTR_OFLD_RXQ;
n = 1;
while (n < target && leftover >= n10g) {
leftover -= n10g;
@ -1824,14 +1945,19 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
}
iaq->nrxq10g = min(n, nrxq10g);
#ifdef TCP_OFFLOAD
if (is_offload(sc))
iaq->nofldrxq10g = min(n, nofldrxq10g);
iaq->nofldrxq10g = min(n, nofldrxq10g);
#endif
#ifdef DEV_NETMAP
iaq->nnmrxq10g = min(n, nnmrxq10g);
#endif
}
if (n1g > 0) {
int target = max(nrxq1g, nofldrxq1g);
iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
INTR_RXQ : INTR_OFLD_RXQ;
n = 1;
while (n < target && leftover >= n1g) {
leftover -= n1g;
@ -1840,8 +1966,10 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
}
iaq->nrxq1g = min(n, nrxq1g);
#ifdef TCP_OFFLOAD
if (is_offload(sc))
iaq->nofldrxq1g = min(n, nofldrxq1g);
iaq->nofldrxq1g = min(n, nofldrxq1g);
#endif
#ifdef DEV_NETMAP
iaq->nnmrxq1g = min(n, nnmrxq1g);
#endif
}
@ -1853,10 +1981,14 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
* Least desirable option: one interrupt vector for everything.
*/
iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
#ifdef TCP_OFFLOAD
if (is_offload(sc))
iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
#endif
#ifdef DEV_NETMAP
iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
#endif
allocate:
navail = iaq->nirq;
@ -2636,9 +2768,8 @@ t4_set_desc(struct adapter *sc)
}
static void
build_medialist(struct port_info *pi)
build_medialist(struct port_info *pi, struct ifmedia *media)
{
struct ifmedia *media = &pi->media;
int data, m;
PORT_LOCK(pi);
@ -2767,17 +2898,29 @@ build_medialist(struct port_info *pi)
* Program the port's XGMAC based on parameters in ifnet. The caller also
* indicates which parameters should be programmed (the rest are left alone).
*/
static int
update_mac_settings(struct port_info *pi, int flags)
int
update_mac_settings(struct ifnet *ifp, int flags)
{
int rc;
struct ifnet *ifp = pi->ifp;
int rc = 0;
struct port_info *pi = ifp->if_softc;
struct adapter *sc = pi->adapter;
int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
uint16_t viid = 0xffff;
int16_t *xact_addr_filt = NULL;
ASSERT_SYNCHRONIZED_OP(sc);
KASSERT(flags, ("%s: not told what to update.", __func__));
if (ifp == pi->ifp) {
viid = pi->viid;
xact_addr_filt = &pi->xact_addr_filt;
}
#ifdef DEV_NETMAP
else if (ifp == pi->nm_ifp) {
viid = pi->nm_viid;
xact_addr_filt = &pi->nm_xact_addr_filt;
}
#endif
if (flags & XGMAC_MTU)
mtu = ifp->if_mtu;
@ -2790,25 +2933,28 @@ update_mac_settings(struct port_info *pi, int flags)
if (flags & XGMAC_VLANEX)
vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
vlanex, false);
if (rc) {
if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
return (rc);
if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti,
1, vlanex, false);
if (rc) {
if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
rc);
return (rc);
}
}
if (flags & XGMAC_UCADDR) {
uint8_t ucaddr[ETHER_ADDR_LEN];
bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
ucaddr, true, true);
rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr,
true, true);
if (rc < 0) {
rc = -rc;
if_printf(ifp, "change_mac failed: %d\n", rc);
return (rc);
} else {
pi->xact_addr_filt = rc;
*xact_addr_filt = rc;
rc = 0;
}
}
@ -2828,8 +2974,8 @@ update_mac_settings(struct port_info *pi, int flags)
LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
if (i == FW_MAC_EXACT_CHUNK) {
rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
del, i, mcaddr, NULL, &hash, 0);
rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del,
i, mcaddr, NULL, &hash, 0);
if (rc < 0) {
rc = -rc;
for (j = 0; j < i; j++) {
@ -2849,8 +2995,8 @@ update_mac_settings(struct port_info *pi, int flags)
}
}
if (i > 0) {
rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
del, i, mcaddr, NULL, &hash, 0);
rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i,
mcaddr, NULL, &hash, 0);
if (rc < 0) {
rc = -rc;
for (j = 0; j < i; j++) {
@ -2867,7 +3013,7 @@ update_mac_settings(struct port_info *pi, int flags)
}
}
rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0);
if (rc != 0)
if_printf(ifp, "failed to set mc address hash: %d", rc);
mcfail:
@ -2970,16 +3116,10 @@ cxgbe_init_synchronized(struct port_info *pi)
((rc = port_full_init(pi)) != 0))
return (rc); /* error message displayed already */
rc = update_mac_settings(pi, XGMAC_ALL);
rc = update_mac_settings(ifp, XGMAC_ALL);
if (rc)
goto done; /* error message displayed already */
rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
if (rc != 0) {
if_printf(ifp, "start_link failed: %d\n", rc);
goto done;
}
rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
if (rc != 0) {
if_printf(ifp, "enable_vi failed: %d\n", rc);
@ -3064,61 +3204,41 @@ setup_intr_handlers(struct adapter *sc)
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
#endif
#ifdef DEV_NETMAP
struct sge_nm_rxq *nm_rxq;
#endif
/*
* Setup interrupts.
*/
irq = &sc->irq[0];
rid = sc->intr_type == INTR_INTX ? 0 : 1;
if (sc->intr_count == 1) {
KASSERT(!(sc->flags & INTR_DIRECT),
("%s: single interrupt && INTR_DIRECT?", __func__));
if (sc->intr_count == 1)
return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
if (rc != 0)
return (rc);
} else {
/* Multiple interrupts. */
KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
("%s: too few intr.", __func__));
/* Multiple interrupts. */
KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
("%s: too few intr.", __func__));
/* The first one is always error intr */
rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
if (rc != 0)
return (rc);
irq++;
rid++;
/* The first one is always error intr */
rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
if (rc != 0)
return (rc);
irq++;
rid++;
/* The second one is always the firmware event queue */
rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
"evt");
if (rc != 0)
return (rc);
irq++;
rid++;
/* The second one is always the firmware event queue */
rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
if (rc != 0)
return (rc);
irq++;
rid++;
/*
* Note that if INTR_DIRECT is not set then either the NIC rx
* queues or (exclusive or) the TOE rx queueus will be taking
* direct interrupts.
*
* There is no need to check for is_offload(sc) as nofldrxq
* will be 0 if offload is disabled.
*/
for_each_port(sc, p) {
pi = sc->port[p];
for_each_port(sc, p) {
pi = sc->port[p];
#ifdef TCP_OFFLOAD
/*
* Skip over the NIC queues if they aren't taking direct
* interrupts.
*/
if (!(sc->flags & INTR_DIRECT) &&
pi->nofldrxq > pi->nrxq)
goto ofld_queues;
#endif
rxq = &sc->sge.rxq[pi->first_rxq];
for (q = 0; q < pi->nrxq; q++, rxq++) {
if (pi->flags & INTR_RXQ) {
for_each_rxq(pi, q, rxq) {
snprintf(s, sizeof(s), "%d.%d", p, q);
rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
s);
@ -3127,17 +3247,10 @@ setup_intr_handlers(struct adapter *sc)
irq++;
rid++;
}
}
#ifdef TCP_OFFLOAD
/*
* Skip over the offload queues if they aren't taking
* direct interrupts.
*/
if (!(sc->flags & INTR_DIRECT))
continue;
ofld_queues:
ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
if (pi->flags & INTR_OFLD_RXQ) {
for_each_ofld_rxq(pi, q, ofld_rxq) {
snprintf(s, sizeof(s), "%d,%d", p, q);
rc = t4_alloc_irq(sc, irq, rid, t4_intr,
ofld_rxq, s);
@ -3146,14 +3259,28 @@ setup_intr_handlers(struct adapter *sc)
irq++;
rid++;
}
#endif
}
#endif
#ifdef DEV_NETMAP
if (pi->flags & INTR_NM_RXQ) {
for_each_nm_rxq(pi, q, nm_rxq) {
snprintf(s, sizeof(s), "%d-%d", p, q);
rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr,
nm_rxq, s);
if (rc != 0)
return (rc);
irq++;
rid++;
}
}
#endif
}
MPASS(irq == &sc->irq[sc->intr_count]);
return (0);
}
static int
int
adapter_full_init(struct adapter *sc)
{
int rc, i;
@ -3191,7 +3318,7 @@ adapter_full_init(struct adapter *sc)
return (rc);
}
static int
int
adapter_full_uninit(struct adapter *sc)
{
int i;
@ -3210,7 +3337,7 @@ adapter_full_uninit(struct adapter *sc)
return (0);
}
static int
int
port_full_init(struct port_info *pi)
{
struct adapter *sc = pi->adapter;
@ -3264,7 +3391,7 @@ port_full_init(struct port_info *pi)
/*
* Idempotent.
*/
static int
int
port_full_uninit(struct port_info *pi)
{
struct adapter *sc = pi->adapter;
@ -4597,6 +4724,18 @@ cxgbe_sysctls(struct port_info *pi)
"index of first TOE tx queue");
}
#endif
#ifdef DEV_NETMAP
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
&pi->nnmrxq, 0, "# of rx queues for netmap");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
&pi->nnmtxq, 0, "# of tx queues for netmap");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
CTLFLAG_RD, &pi->first_nm_rxq, 0,
"index of first netmap rx queue");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
CTLFLAG_RD, &pi->first_nm_txq, 0,
"index of first netmap tx queue");
#endif
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
@ -8084,6 +8223,20 @@ tweak_tunables(void)
t4_toecaps_allowed = 0;
#endif
#ifdef DEV_NETMAP
if (t4_nnmtxq10g < 1)
t4_nnmtxq10g = min(nc, NNMTXQ_10G);
if (t4_nnmtxq1g < 1)
t4_nnmtxq1g = min(nc, NNMTXQ_1G);
if (t4_nnmrxq10g < 1)
t4_nnmrxq10g = min(nc, NNMRXQ_10G);
if (t4_nnmrxq1g < 1)
t4_nnmrxq1g = min(nc, NNMRXQ_1G);
#endif
if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
t4_tmr_idx_10g = TMR_IDX_10G;

1148
sys/dev/cxgbe/t4_netmap.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -55,6 +55,13 @@ __FBSDID("$FreeBSD$");
#include <machine/md_var.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#ifdef DEV_NETMAP
#include <machine/bus.h>
#include <sys/selinfo.h>
#include <net/if_var.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
#endif
#include "common/common.h"
#include "common/t4_regs.h"
@ -71,7 +78,7 @@ __FBSDID("$FreeBSD$");
* Ethernet frames are DMA'd at this byte offset into the freelist buffer.
* 0-7 are valid values.
*/
static int fl_pktshift = 2;
int fl_pktshift = 2;
TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
/*
@ -80,7 +87,7 @@ TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
* 0: disable padding.
* Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
*/
static int fl_pad = -1;
int fl_pad = -1;
TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
/*
@ -88,7 +95,7 @@ TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
* -1: driver should figure out a good value.
* 64 or 128 are the only other valid values.
*/
static int spg_len = -1;
int spg_len = -1;
TUNABLE_INT("hw.cxgbe.spg_len", &spg_len);
/*
@ -194,6 +201,14 @@ static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int,
struct sysctl_oid *);
static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *);
#endif
#ifdef DEV_NETMAP
static int alloc_nm_rxq(struct port_info *, struct sge_nm_rxq *, int, int,
struct sysctl_oid *);
static int free_nm_rxq(struct port_info *, struct sge_nm_rxq *);
static int alloc_nm_txq(struct port_info *, struct sge_nm_txq *, int, int,
struct sysctl_oid *);
static int free_nm_txq(struct port_info *, struct sge_nm_txq *);
#endif
static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
#ifdef TCP_OFFLOAD
@ -808,6 +823,24 @@ t4_teardown_adapter_queues(struct adapter *sc)
return (0);
}
static inline int
port_intr_count(struct port_info *pi)
{
int rc = 0;
if (pi->flags & INTR_RXQ)
rc += pi->nrxq;
#ifdef TCP_OFFLOAD
if (pi->flags & INTR_OFLD_RXQ)
rc += pi->nofldrxq;
#endif
#ifdef DEV_NETMAP
if (pi->flags & INTR_NM_RXQ)
rc += pi->nnmrxq;
#endif
return (rc);
}
static inline int
first_vector(struct port_info *pi)
{
@ -818,28 +851,10 @@ first_vector(struct port_info *pi)
return (0);
for_each_port(sc, i) {
struct port_info *p = sc->port[i];
if (i == pi->port_id)
break;
#ifdef TCP_OFFLOAD
if (sc->flags & INTR_DIRECT)
rc += p->nrxq + p->nofldrxq;
else
rc += max(p->nrxq, p->nofldrxq);
#else
/*
* Not compiled with offload support and intr_count > 1. Only
* NIC queues exist and they'd better be taking direct
* interrupts.
*/
KASSERT(sc->flags & INTR_DIRECT,
("%s: intr_count %d, !INTR_DIRECT", __func__,
sc->intr_count));
rc += p->nrxq;
#endif
rc += port_intr_count(sc->port[i]);
}
return (rc);
@ -856,41 +871,48 @@ port_intr_iq(struct port_info *pi, int idx)
struct adapter *sc = pi->adapter;
struct sge *s = &sc->sge;
struct sge_iq *iq = NULL;
int nintr, i;
if (sc->intr_count == 1)
return (&sc->sge.fwq);
#ifdef TCP_OFFLOAD
if (sc->flags & INTR_DIRECT) {
idx %= pi->nrxq + pi->nofldrxq;
if (idx >= pi->nrxq) {
idx -= pi->nrxq;
iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
} else
iq = &s->rxq[pi->first_rxq + idx].iq;
} else {
idx %= max(pi->nrxq, pi->nofldrxq);
if (pi->nrxq >= pi->nofldrxq)
iq = &s->rxq[pi->first_rxq + idx].iq;
else
iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
}
#else
/*
* Not compiled with offload support and intr_count > 1. Only NIC
* queues exist and they'd better be taking direct interrupts.
*/
KASSERT(sc->flags & INTR_DIRECT,
("%s: intr_count %d, !INTR_DIRECT", __func__, sc->intr_count));
idx %= pi->nrxq;
iq = &s->rxq[pi->first_rxq + idx].iq;
nintr = port_intr_count(pi);
KASSERT(nintr != 0,
("%s: pi %p has no exclusive interrupts, total interrupts = %d",
__func__, pi, sc->intr_count));
#ifdef DEV_NETMAP
/* Exclude netmap queues as they can't take anyone else's interrupts */
if (pi->flags & INTR_NM_RXQ)
nintr -= pi->nnmrxq;
KASSERT(nintr > 0,
("%s: pi %p has nintr %d after netmap adjustment of %d", __func__,
pi, nintr, pi->nnmrxq));
#endif
i = idx % nintr;
KASSERT(iq->flags & IQ_INTR, ("%s: EDOOFUS", __func__));
if (pi->flags & INTR_RXQ) {
if (i < pi->nrxq) {
iq = &s->rxq[pi->first_rxq + i].iq;
goto done;
}
i -= pi->nrxq;
}
#ifdef TCP_OFFLOAD
if (pi->flags & INTR_OFLD_RXQ) {
if (i < pi->nofldrxq) {
iq = &s->ofld_rxq[pi->first_ofld_rxq + i].iq;
goto done;
}
i -= pi->nofldrxq;
}
#endif
panic("%s: pi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
pi, pi->flags & INTR_ALL, idx, nintr);
done:
MPASS(iq != NULL);
KASSERT(iq->flags & IQ_INTR,
("%s: iq %p (port %p, intr_flags 0x%lx, idx %d)", __func__, iq, pi,
pi->flags & INTR_ALL, idx));
return (iq);
}
@ -927,7 +949,10 @@ t4_setup_port_queues(struct port_info *pi)
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
struct sge_wrq *ofld_txq;
struct sysctl_oid *oid2 = NULL;
#endif
#ifdef DEV_NETMAP
struct sge_nm_rxq *nm_rxq;
struct sge_nm_txq *nm_txq;
#endif
char name[16];
struct adapter *sc = pi->adapter;
@ -936,27 +961,20 @@ t4_setup_port_queues(struct port_info *pi)
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
int maxp, pack, mtu = ifp->if_mtu;
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD,
NULL, "rx queues");
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
oid2 = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
CTLFLAG_RD, NULL,
"rx queues for offloaded TCP connections");
}
#endif
/* Interrupt vector to start from (when using multiple vectors) */
intr_idx = first_vector(pi);
/*
* First pass over all rx queues (NIC and TOE):
* First pass over all NIC and TOE rx queues:
* a) initialize iq and fl
* b) allocate queue iff it will take direct interrupts.
*/
maxp = mtu_to_max_payload(sc, mtu, 0);
pack = enable_buffer_packing(sc);
if (pi->flags & INTR_RXQ) {
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq",
CTLFLAG_RD, NULL, "rx queues");
}
for_each_rxq(pi, i, rxq) {
init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq,
@ -966,11 +984,7 @@ t4_setup_port_queues(struct port_info *pi)
device_get_nameunit(pi->dev), i);
init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, pack, name);
if (sc->flags & INTR_DIRECT
#ifdef TCP_OFFLOAD
|| (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
#endif
) {
if (pi->flags & INTR_RXQ) {
rxq->iq.flags |= IQ_INTR;
rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
if (rc != 0)
@ -978,9 +992,13 @@ t4_setup_port_queues(struct port_info *pi)
intr_idx++;
}
}
#ifdef TCP_OFFLOAD
maxp = mtu_to_max_payload(sc, mtu, 1);
if (is_offload(sc) && pi->flags & INTR_OFLD_RXQ) {
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
CTLFLAG_RD, NULL,
"rx queues for offloaded TCP connections");
}
for_each_ofld_rxq(pi, i, ofld_rxq) {
init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
@ -990,10 +1008,26 @@ t4_setup_port_queues(struct port_info *pi)
device_get_nameunit(pi->dev), i);
init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, pack, name);
if (sc->flags & INTR_DIRECT ||
(sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
if (pi->flags & INTR_OFLD_RXQ) {
ofld_rxq->iq.flags |= IQ_INTR;
rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
intr_idx++;
}
}
#endif
#ifdef DEV_NETMAP
/*
* We don't have buffers to back the netmap rx queues right now so we
* create the queues in a way that doesn't set off any congestion signal
* in the chip.
*/
if (pi->flags & INTR_NM_RXQ) {
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_rxq",
CTLFLAG_RD, NULL, "rx queues for netmap");
for_each_nm_rxq(pi, i, nm_rxq) {
rc = alloc_nm_rxq(pi, nm_rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
intr_idx++;
@ -1002,35 +1036,45 @@ t4_setup_port_queues(struct port_info *pi)
#endif
/*
* Second pass over all rx queues (NIC and TOE). The queues forwarding
* Second pass over all NIC and TOE rx queues. The queues forwarding
* their interrupts are allocated now.
*/
j = 0;
for_each_rxq(pi, i, rxq) {
if (rxq->iq.flags & IQ_INTR)
continue;
if (!(pi->flags & INTR_RXQ)) {
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq",
CTLFLAG_RD, NULL, "rx queues");
for_each_rxq(pi, i, rxq) {
MPASS(!(rxq->iq.flags & IQ_INTR));
intr_idx = port_intr_iq(pi, j)->abs_id;
intr_idx = port_intr_iq(pi, j)->abs_id;
rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
j++;
rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
j++;
}
}
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(pi, i, ofld_rxq) {
if (ofld_rxq->iq.flags & IQ_INTR)
continue;
if (is_offload(sc) && !(pi->flags & INTR_OFLD_RXQ)) {
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
CTLFLAG_RD, NULL,
"rx queues for offloaded TCP connections");
for_each_ofld_rxq(pi, i, ofld_rxq) {
MPASS(!(ofld_rxq->iq.flags & IQ_INTR));
intr_idx = port_intr_iq(pi, j)->abs_id;
intr_idx = port_intr_iq(pi, j)->abs_id;
rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
if (rc != 0)
goto done;
j++;
rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid);
if (rc != 0)
goto done;
j++;
}
}
#endif
#ifdef DEV_NETMAP
if (!(pi->flags & INTR_NM_RXQ))
CXGBE_UNIMPLEMENTED(__func__);
#endif
/*
* Now the tx queues. Only one pass needed.
@ -1039,10 +1083,7 @@ t4_setup_port_queues(struct port_info *pi)
NULL, "tx queues");
j = 0;
for_each_txq(pi, i, txq) {
uint16_t iqid;
iqid = port_intr_iq(pi, j)->cntxt_id;
snprintf(name, sizeof(name), "%s txq%d",
device_get_nameunit(pi->dev), i);
init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid,
@ -1053,15 +1094,13 @@ t4_setup_port_queues(struct port_info *pi)
goto done;
j++;
}
#ifdef TCP_OFFLOAD
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq",
CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
for_each_ofld_txq(pi, i, ofld_txq) {
uint16_t iqid;
struct sysctl_oid *oid2;
iqid = port_intr_iq(pi, j)->cntxt_id;
snprintf(name, sizeof(name), "%s ofld_txq%d",
device_get_nameunit(pi->dev), i);
init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan,
@ -1077,6 +1116,17 @@ t4_setup_port_queues(struct port_info *pi)
j++;
}
#endif
#ifdef DEV_NETMAP
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_txq",
CTLFLAG_RD, NULL, "tx queues for netmap use");
for_each_nm_txq(pi, i, nm_txq) {
iqid = pi->first_nm_rxq + (j % pi->nnmrxq);
rc = alloc_nm_txq(pi, nm_txq, iqid, i, oid);
if (rc != 0)
goto done;
j++;
}
#endif
/*
* Finally, the control queue.
@ -1110,6 +1160,10 @@ t4_teardown_port_queues(struct port_info *pi)
struct sge_ofld_rxq *ofld_rxq;
struct sge_wrq *ofld_txq;
#endif
#ifdef DEV_NETMAP
struct sge_nm_rxq *nm_rxq;
struct sge_nm_txq *nm_txq;
#endif
/* Do this before freeing the queues */
if (pi->flags & PORT_SYSCTL_CTX) {
@ -1127,12 +1181,15 @@ t4_teardown_port_queues(struct port_info *pi)
for_each_txq(pi, i, txq) {
free_txq(pi, txq);
}
#ifdef TCP_OFFLOAD
for_each_ofld_txq(pi, i, ofld_txq) {
free_wrq(sc, ofld_txq);
}
#endif
#ifdef DEV_NETMAP
for_each_nm_txq(pi, i, nm_txq)
free_nm_txq(pi, nm_txq);
#endif
/*
* Then take down the rx queues that forward their interrupts, as they
@ -1143,13 +1200,16 @@ t4_teardown_port_queues(struct port_info *pi)
if ((rxq->iq.flags & IQ_INTR) == 0)
free_rxq(pi, rxq);
}
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(pi, i, ofld_rxq) {
if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
free_ofld_rxq(pi, ofld_rxq);
}
#endif
#ifdef DEV_NETMAP
for_each_nm_rxq(pi, i, nm_rxq)
free_nm_rxq(pi, nm_rxq);
#endif
/*
* Then take down the rx queues that take direct interrupts.
@ -1159,13 +1219,15 @@ t4_teardown_port_queues(struct port_info *pi)
if (rxq->iq.flags & IQ_INTR)
free_rxq(pi, rxq);
}
#ifdef TCP_OFFLOAD
for_each_ofld_rxq(pi, i, ofld_rxq) {
if (ofld_rxq->iq.flags & IQ_INTR)
free_ofld_rxq(pi, ofld_rxq);
}
#endif
#ifdef DEV_NETMAP
CXGBE_UNIMPLEMENTED(__func__);
#endif
return (0);
}
@ -2558,6 +2620,143 @@ free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
}
#endif
#ifdef DEV_NETMAP
static int
alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx,
int idx, struct sysctl_oid *oid)
{
int rc;
struct sysctl_oid_list *children;
struct sysctl_ctx_list *ctx;
char name[16];
size_t len;
struct adapter *sc = pi->adapter;
struct netmap_adapter *na = NA(pi->nm_ifp);
MPASS(na != NULL);
len = pi->qsize_rxq * RX_IQ_ESIZE;
rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
&nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
if (rc != 0)
return (rc);
len = na->num_rx_desc * RX_FL_ESIZE + spg_len;
rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
&nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
if (rc != 0)
return (rc);
nm_rxq->pi = pi;
nm_rxq->nid = idx;
nm_rxq->iq_cidx = 0;
nm_rxq->iq_sidx = pi->qsize_rxq - spg_len / RX_IQ_ESIZE;
nm_rxq->iq_gen = F_RSPD_GEN;
nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
nm_rxq->fl_sidx = na->num_rx_desc;
nm_rxq->intr_idx = intr_idx;
ctx = &pi->ctx;
children = SYSCTL_CHILDREN(oid);
snprintf(name, sizeof(name), "%d", idx);
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL,
"rx queue");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16,
"I", "absolute id of the queue");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16,
"I", "SGE context id of the queue");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I",
"consumer index");
children = SYSCTL_CHILDREN(oid);
oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
"freelist");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16,
"I", "SGE context id of the freelist");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
&nm_rxq->fl_cidx, 0, "consumer index");
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
&nm_rxq->fl_pidx, 0, "producer index");
return (rc);
}
static int
free_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
{
struct adapter *sc = pi->adapter;
free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
nm_rxq->iq_desc);
free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
nm_rxq->fl_desc);
return (0);
}
static int
alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
struct sysctl_oid *oid)
{
int rc;
size_t len;
struct adapter *sc = pi->adapter;
struct netmap_adapter *na = NA(pi->nm_ifp);
char name[16];
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
len = na->num_tx_desc * EQ_ESIZE + spg_len;
rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
&nm_txq->ba, (void **)&nm_txq->desc);
if (rc)
return (rc);
nm_txq->pidx = nm_txq->cidx = 0;
nm_txq->sidx = na->num_tx_desc;
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf));
snprintf(name, sizeof(name), "%d", idx);
oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
NULL, "netmap tx queue");
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
&nm_txq->cntxt_id, 0, "SGE context id of the queue");
SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I",
"consumer index");
SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I",
"producer index");
return (rc);
}
static int
free_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq)
{
struct adapter *sc = pi->adapter;
free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
nm_txq->desc);
return (0);
}
#endif
static int
ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
{
@ -2986,7 +3185,7 @@ static inline void
iq_next(struct sge_iq *iq)
{
iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
if (__predict_false(++iq->cidx == iq->qsize - 1)) {
if (__predict_false(++iq->cidx == iq->qsize - spg_len / iq->esize)) {
iq->cidx = 0;
iq->gen ^= 1;
iq->cdesc = iq->desc;

View File

@ -8,7 +8,7 @@ CXGBE = ${.CURDIR}/../../../dev/cxgbe
.PATH: ${CXGBE} ${CXGBE}/common
KMOD = if_cxgbe
SRCS = t4_main.c t4_sge.c t4_l2t.c t4_tracer.c
SRCS = t4_main.c t4_sge.c t4_l2t.c t4_tracer.c t4_netmap.c
SRCS+= t4_hw.c
SRCS+= device_if.h bus_if.h pci_if.h
SRCS+= opt_inet.h opt_inet6.h