cxgbe(4): Merge netmap support from the ncxgbe/ncxl interfaces to the

vcxgbe/vcxl interfaces and retire the 'n' interfaces.  The main
cxgbe/cxl interfaces and tunables related to them are not affected by
any of this and will continue to operate as usual.

The driver used to create an additional 'n' interface for every
cxgbe/cxl interface if "device netmap" was in the kernel.  The 'n'
interface shared the wire with the main interface but was otherwise
autonomous (with its own MAC address, etc.).  It did not have normal
tx/rx but had a specialized netmap-only data path.  r291665 added
another set of virtual interfaces (the 'v' interfaces) to the driver.
These had normal tx/rx but no netmap support.

This revision consolidates the features of both the interfaces into the
'v' interface which now has a normal data path, TOE support, and native
netmap support.  The 'v' interfaces need to be created explicitly with
the hw.cxgbe.num_vis tunable.  This means "device netmap" will not
result in the automatic creation of any virtual interfaces.

The following tunables can be used to override the default number of
queues allocated for each 'v' interface.  nofld* = 0 will disable TOE on
the virtual interface and nnm* = 0 to will disable native netmap
support.

# number of normal NIC queues
hw.cxgbe.ntxq_vi
hw.cxgbe.nrxq_vi

# number of TOE queues
hw.cxgbe.nofldtxq_vi
hw.cxgbe.nofldrxq_vi

# number of netmap queues
hw.cxgbe.nnmtxq_vi
hw.cxgbe.nnmrxq_vi

hw.cxgbe.nnm{t,r}xq{10,1}g tunables have been removed.

--- tl;dr version ---
The workflow for netmap on cxgbe starting with FreeBSD 11 is:
1) "device netmap" in the kernel config.
2) "hw.cxgbe.num_vis=2" in loader.conf.  num_vis > 2 is ok too, you'll
end up with multiple autonomous netmap-capable interfaces for every
port.
3) "dmesg | grep vcxl | grep netmap" to verify that the interface has
netmap queues.
4) Use any of the 'v' interfaces for netmap.  pkt-gen -i vcxl<n>... .
One major improvement is that the netmap interface has a normal data
path as expected.
5) Just ignore the cxl interfaces if you want to use netmap only.  No
need to bring them up.  The vcxl interfaces are completely independent
and everything should just work.
---------------------

Approved by:	re@ (gjb@)
Relnotes:	Yes
Sponsored by:	Chelsio Communications
This commit is contained in:
Navdeep Parhar 2016-06-23 02:53:00 +00:00
parent 9134b0870a
commit 62291463de
4 changed files with 285 additions and 528 deletions

View File

@ -208,7 +208,6 @@ enum {
INTR_RXQ = (1 << 4), /* All NIC rxq's take interrupts */
INTR_OFLD_RXQ = (1 << 5), /* All TOE rxq's take interrupts */
INTR_ALL = (INTR_RXQ | INTR_OFLD_RXQ),
VI_NETMAP = (1 << 6),
/* adapter debug_flags */
DF_DUMP_MBOX = (1 << 0),
@ -230,7 +229,7 @@ struct vi_info {
unsigned long flags;
int if_flags;
uint16_t *rss;
uint16_t *rss, *nm_rss;
uint16_t viid;
int16_t xact_addr_filt;/* index of exact MAC address filter */
uint16_t rss_size; /* size of VI's RSS table slice */
@ -251,6 +250,10 @@ struct vi_info {
int first_ofld_txq; /* index of first offload tx queue */
int nofldrxq; /* # of offload rx queues */
int first_ofld_rxq; /* index of first offload rx queue */
int nnmtxq;
int first_nm_txq;
int nnmrxq;
int first_nm_rxq;
int tmr_idx;
int pktc_idx;
int qsize_rxq;
@ -362,6 +365,11 @@ enum {
IQS_DISABLED = 0,
IQS_BUSY = 1,
IQS_IDLE = 2,
/* netmap related flags */
NM_OFF = 0,
NM_ON = 1,
NM_BUSY = 2,
};
/*
@ -765,8 +773,11 @@ struct adapter {
struct irq {
struct resource *res;
int rid;
volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */
void *tag;
} *irq;
struct sge_rxq *rxq;
struct sge_nm_rxq *nm_rxq;
} __aligned(CACHE_LINE_SIZE) *irq;
bus_dma_tag_t dmat; /* Parent DMA tag */
@ -911,11 +922,11 @@ struct adapter {
for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \
iter < vi->nofldrxq; ++iter, ++q)
#define for_each_nm_txq(vi, iter, q) \
for (q = &vi->pi->adapter->sge.nm_txq[vi->first_txq], iter = 0; \
iter < vi->ntxq; ++iter, ++q)
for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \
iter < vi->nnmtxq; ++iter, ++q)
#define for_each_nm_rxq(vi, iter, q) \
for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_rxq], iter = 0; \
iter < vi->nrxq; ++iter, ++q)
for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \
iter < vi->nnmrxq; ++iter, ++q)
#define for_each_vi(_pi, _iter, _vi) \
for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \
++(_iter), ++(_vi))
@ -1087,8 +1098,8 @@ void vi_tick(void *);
#ifdef DEV_NETMAP
/* t4_netmap.c */
int create_netmap_ifnet(struct port_info *);
int destroy_netmap_ifnet(struct port_info *);
void cxgbe_nm_attach(struct vi_info *);
void cxgbe_nm_detach(struct vi_info *);
void t4_nm_intr(void *);
#endif
@ -1109,6 +1120,7 @@ int t4_setup_vi_queues(struct vi_info *);
int t4_teardown_vi_queues(struct vi_info *);
void t4_intr_all(void *);
void t4_intr(void *);
void t4_vi_intr(void *);
void t4_intr_err(void *);
void t4_intr_evt(void *);
void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);

View File

@ -230,6 +230,14 @@ TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
static int t4_nrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
#define NTXQ_VI 1
static int t4_ntxq_vi = -1;
TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
#define NRXQ_VI 1
static int t4_nrxq_vi = -1;
TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
static int t4_rsrv_noflowq = 0;
TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
@ -249,24 +257,24 @@ TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
#define NOFLDRXQ_1G 1
static int t4_nofldrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
#define NOFLDTXQ_VI 1
static int t4_nofldtxq_vi = -1;
TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
#define NOFLDRXQ_VI 1
static int t4_nofldrxq_vi = -1;
TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
#endif
#ifdef DEV_NETMAP
#define NNMTXQ_10G 2
static int t4_nnmtxq10g = -1;
TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g);
#define NNMTXQ_VI 2
static int t4_nnmtxq_vi = -1;
TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
#define NNMRXQ_10G 2
static int t4_nnmrxq10g = -1;
TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g);
#define NNMTXQ_1G 1
static int t4_nnmtxq1g = -1;
TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g);
#define NNMRXQ_1G 1
static int t4_nnmrxq1g = -1;
TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g);
#define NNMRXQ_VI 2
static int t4_nnmrxq_vi = -1;
TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
#endif
/*
@ -387,18 +395,18 @@ struct intrs_and_queues {
uint16_t ntxq1g; /* # of NIC txq's for each 1G port */
uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */
uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */
#ifdef TCP_OFFLOAD
uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */
uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */
uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */
uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */
#endif
#ifdef DEV_NETMAP
uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */
uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */
uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */
uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */
#endif
/* The vcxgbe/vcxl interfaces use these and not the ones above. */
uint16_t ntxq_vi; /* # of NIC txq's */
uint16_t nrxq_vi; /* # of NIC rxq's */
uint16_t nofldtxq_vi; /* # of TOE txq's */
uint16_t nofldrxq_vi; /* # of TOE rxq's */
uint16_t nnmtxq_vi; /* # of netmap txq's */
uint16_t nnmrxq_vi; /* # of netmap rxq's */
};
struct filter_entry {
@ -802,10 +810,10 @@ t4_attach(device_t dev)
goto done; /* error message displayed already */
/*
* Number of VIs to create per-port. The first VI is the
* "main" regular VI for the port. The second VI is used for
* netmap if present, and any remaining VIs are used for
* additional virtual interfaces.
* Number of VIs to create per-port. The first VI is the "main" regular
* VI for the port. The rest are additional virtual interfaces on the
* same physical port. Note that the main VI does not have native
* netmap support but the extra VIs do.
*
* Limit the number of VIs per port to the number of available
* MAC addresses per port.
@ -814,9 +822,6 @@ t4_attach(device_t dev)
num_vis = t4_num_vis;
else
num_vis = 1;
#ifdef DEV_NETMAP
num_vis++;
#endif
if (num_vis > nitems(vi_mac_funcs)) {
num_vis = nitems(vi_mac_funcs);
device_printf(dev, "Number of VIs limited to %d\n", num_vis);
@ -831,7 +836,6 @@ t4_attach(device_t dev)
n10g = n1g = 0;
for_each_port(sc, i) {
struct port_info *pi;
struct vi_info *vi;
pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
sc->port[i] = pi;
@ -839,7 +843,10 @@ t4_attach(device_t dev)
/* These must be set before t4_port_init */
pi->adapter = sc;
pi->port_id = i;
pi->nvi = num_vis;
/*
* XXX: vi[0] is special so we can't delay this allocation until
* pi->nvi's final value is known.
*/
pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
M_ZERO | M_WAITOK);
@ -881,26 +888,12 @@ t4_attach(device_t dev)
if (is_10G_port(pi) || is_40G_port(pi)) {
n10g++;
for_each_vi(pi, j, vi) {
vi->tmr_idx = t4_tmr_idx_10g;
vi->pktc_idx = t4_pktc_idx_10g;
}
} else {
n1g++;
for_each_vi(pi, j, vi) {
vi->tmr_idx = t4_tmr_idx_1g;
vi->pktc_idx = t4_pktc_idx_1g;
}
}
pi->linkdnrc = -1;
for_each_vi(pi, j, vi) {
vi->qsize_rxq = t4_qsize_rxq;
vi->qsize_txq = t4_qsize_txq;
vi->pi = pi;
}
pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
if (pi->dev == NULL) {
device_printf(dev,
@ -915,12 +908,11 @@ t4_attach(device_t dev)
/*
* Interrupt type, # of interrupts, # of rx/tx queues, etc.
*/
#ifdef DEV_NETMAP
num_vis--;
#endif
rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
if (rc != 0)
goto done; /* error message displayed already */
if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
num_vis = 1;
sc->intr_type = iaq.intr_type;
sc->intr_count = iaq.nirq;
@ -929,8 +921,8 @@ t4_attach(device_t dev)
s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
if (num_vis > 1) {
s->nrxq += (n10g + n1g) * (num_vis - 1);
s->ntxq += (n10g + n1g) * (num_vis - 1);
s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
}
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
@ -940,8 +932,10 @@ t4_attach(device_t dev)
s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
if (num_vis > 1) {
s->nofldrxq += (n10g + n1g) * (num_vis - 1);
s->nofldtxq += (n10g + n1g) * (num_vis - 1);
s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
iaq.nofldrxq_vi;
s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
iaq.nofldtxq_vi;
}
s->neq += s->nofldtxq + s->nofldrxq;
s->niq += s->nofldrxq;
@ -953,8 +947,10 @@ t4_attach(device_t dev)
}
#endif
#ifdef DEV_NETMAP
s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g;
s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g;
if (num_vis > 1) {
s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
}
s->neq += s->nnmtxq + s->nnmrxq;
s->niq += s->nnmrxq;
@ -998,61 +994,63 @@ t4_attach(device_t dev)
if (pi == NULL)
continue;
pi->nvi = num_vis;
for_each_vi(pi, j, vi) {
#ifdef DEV_NETMAP
if (j == 1) {
vi->flags |= VI_NETMAP | INTR_RXQ;
vi->first_rxq = nm_rqidx;
vi->first_txq = nm_tqidx;
if (is_10G_port(pi) || is_40G_port(pi)) {
vi->nrxq = iaq.nnmrxq10g;
vi->ntxq = iaq.nnmtxq10g;
} else {
vi->nrxq = iaq.nnmrxq1g;
vi->ntxq = iaq.nnmtxq1g;
}
nm_rqidx += vi->nrxq;
nm_tqidx += vi->ntxq;
continue;
}
#endif
vi->pi = pi;
vi->qsize_rxq = t4_qsize_rxq;
vi->qsize_txq = t4_qsize_txq;
vi->first_rxq = rqidx;
vi->first_txq = tqidx;
if (is_10G_port(pi) || is_40G_port(pi)) {
vi->tmr_idx = t4_tmr_idx_10g;
vi->pktc_idx = t4_pktc_idx_10g;
vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
vi->nrxq = j == 0 ? iaq.nrxq10g : 1;
vi->ntxq = j == 0 ? iaq.ntxq10g : 1;
vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
} else {
vi->tmr_idx = t4_tmr_idx_1g;
vi->pktc_idx = t4_pktc_idx_1g;
vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
vi->nrxq = j == 0 ? iaq.nrxq1g : 1;
vi->ntxq = j == 0 ? iaq.ntxq1g : 1;
vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
}
rqidx += vi->nrxq;
tqidx += vi->ntxq;
if (vi->ntxq > 1)
if (j == 0 && vi->ntxq > 1)
vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
else
vi->rsrv_noflowq = 0;
rqidx += vi->nrxq;
tqidx += vi->ntxq;
#ifdef TCP_OFFLOAD
if (!is_offload(sc))
continue;
vi->first_ofld_rxq = ofld_rqidx;
vi->first_ofld_txq = ofld_tqidx;
if (is_10G_port(pi) || is_40G_port(pi)) {
vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1;
vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1;
vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
iaq.nofldrxq_vi;
vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
iaq.nofldtxq_vi;
} else {
vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1;
vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1;
vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
iaq.nofldrxq_vi;
vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
iaq.nofldtxq_vi;
}
ofld_rqidx += vi->nofldrxq;
ofld_tqidx += vi->nofldtxq;
#endif
#ifdef DEV_NETMAP
if (j > 0) {
vi->first_nm_rxq = nm_rqidx;
vi->first_nm_txq = nm_tqidx;
vi->nnmrxq = iaq.nnmrxq_vi;
vi->nnmtxq = iaq.nnmtxq_vi;
nm_rqidx += vi->nnmrxq;
nm_tqidx += vi->nnmtxq;
}
#endif
}
}
@ -1275,13 +1273,21 @@ cxgbe_vi_attach(device_t dev, struct vi_info *vi)
EVENTHANDLER_PRI_ANY);
ether_ifattach(ifp, vi->hw_addr);
#ifdef DEV_NETMAP
if (vi->nnmrxq != 0)
cxgbe_nm_attach(vi);
#endif
sb = sbuf_new_auto();
sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
#ifdef TCP_OFFLOAD
if (ifp->if_capabilities & IFCAP_TOE)
sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
vi->nofldtxq, vi->nofldrxq);
#endif
#ifdef DEV_NETMAP
if (ifp->if_capabilities & IFCAP_NETMAP)
sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
vi->nnmtxq, vi->nnmrxq);
#endif
sbuf_finish(sb);
device_printf(dev, "%s\n", sbuf_data(sb));
@ -1308,21 +1314,8 @@ cxgbe_attach(device_t dev)
for_each_vi(pi, i, vi) {
if (i == 0)
continue;
#ifdef DEV_NETMAP
if (vi->flags & VI_NETMAP) {
/*
* media handled here to keep
* implementation private to this file
*/
ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
build_medialist(pi, &vi->media);
vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
"ncxgbe" : "ncxl", device_get_unit(dev));
} else
#endif
vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
"vcxgbe" : "vcxl", -1);
vi->dev = device_add_child(dev, is_t4(pi->adapter) ?
"vcxgbe" : "vcxl", -1);
if (vi->dev == NULL) {
device_printf(dev, "failed to add VI %d\n", i);
continue;
@ -1348,6 +1341,10 @@ cxgbe_vi_detach(struct vi_info *vi)
EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
/* Let detach proceed even if these fail. */
#ifdef DEV_NETMAP
if (ifp->if_capabilities & IFCAP_NETMAP)
cxgbe_nm_detach(vi);
#endif
cxgbe_uninit_synchronized(vi);
callout_drain(&vi->tick);
vi_full_uninit(vi);
@ -1710,7 +1707,7 @@ vi_get_counter(struct ifnet *ifp, ift_counter c)
uint64_t drops;
drops = 0;
if ((vi->flags & (VI_INIT_DONE | VI_NETMAP)) == VI_INIT_DONE) {
if (vi->flags & VI_INIT_DONE) {
int i;
struct sge_txq *txq;
@ -2379,28 +2376,29 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
{
int rc, itype, navail, nrxq10g, nrxq1g, n;
int nofldrxq10g = 0, nofldrxq1g = 0;
int nnmrxq10g = 0, nnmrxq1g = 0;
bzero(iaq, sizeof(*iaq));
iaq->ntxq10g = t4_ntxq10g;
iaq->ntxq1g = t4_ntxq1g;
iaq->ntxq_vi = t4_ntxq_vi;
iaq->nrxq10g = nrxq10g = t4_nrxq10g;
iaq->nrxq1g = nrxq1g = t4_nrxq1g;
iaq->nrxq_vi = t4_nrxq_vi;
iaq->rsrv_noflowq = t4_rsrv_noflowq;
#ifdef TCP_OFFLOAD
if (is_offload(sc)) {
iaq->nofldtxq10g = t4_nofldtxq10g;
iaq->nofldtxq1g = t4_nofldtxq1g;
iaq->nofldtxq_vi = t4_nofldtxq_vi;
iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
iaq->nofldrxq_vi = t4_nofldrxq_vi;
}
#endif
#ifdef DEV_NETMAP
iaq->nnmtxq10g = t4_nnmtxq10g;
iaq->nnmtxq1g = t4_nnmtxq1g;
iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g;
iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g;
iaq->nnmtxq_vi = t4_nnmtxq_vi;
iaq->nnmrxq_vi = t4_nnmrxq_vi;
#endif
for (itype = INTR_MSIX; itype; itype >>= 1) {
@ -2424,14 +2422,17 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
/*
* Best option: an interrupt vector for errors, one for the
* firmware event queue, and one for every rxq (NIC, TOE, and
* netmap).
* firmware event queue, and one for every rxq (NIC and TOE) of
* every VI. The VIs that support netmap use the same
* interrupts for the NIC rx queues and the netmap rx queues
* because only one set of queues is active at a time.
*/
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g);
iaq->nirq += n10g * 2 * (num_vis - 1);
iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g);
iaq->nirq += n1g * 2 * (num_vis - 1);
iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
iaq->nirq += (n10g + n1g) * (num_vis - 1) *
max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */
iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq))) {
iaq->intr_flags_10g = INTR_ALL;
@ -2439,43 +2440,44 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
goto allocate;
}
/* Disable the VIs (and netmap) if there aren't enough intrs */
if (num_vis > 1) {
device_printf(sc->dev, "virtual interfaces disabled "
"because num_vis=%u with current settings "
"(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
"nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
"nnmrxq_vi=%u) would need %u interrupts but "
"only %u are available.\n", num_vis, nrxq10g,
nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
navail);
num_vis = 1;
iaq->ntxq_vi = iaq->nrxq_vi = 0;
iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
goto restart;
}
/*
* Second best option: a vector for errors, one for the firmware
* event queue, and vectors for either all the NIC rx queues or
* all the TOE rx queues. The queues that don't get vectors
* will forward their interrupts to those that do.
*
* Note: netmap rx queues cannot be created early and so they
* can't be setup to receive forwarded interrupts for others.
*/
iaq->nirq = T4_EXTRA_INTR;
if (nrxq10g >= nofldrxq10g) {
iaq->intr_flags_10g = INTR_RXQ;
iaq->nirq += n10g * nrxq10g;
iaq->nirq += n10g * (num_vis - 1);
#ifdef DEV_NETMAP
iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g);
#endif
} else {
iaq->intr_flags_10g = INTR_OFLD_RXQ;
iaq->nirq += n10g * nofldrxq10g;
#ifdef DEV_NETMAP
iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g);
#endif
}
if (nrxq1g >= nofldrxq1g) {
iaq->intr_flags_1g = INTR_RXQ;
iaq->nirq += n1g * nrxq1g;
iaq->nirq += n1g * (num_vis - 1);
#ifdef DEV_NETMAP
iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g);
#endif
} else {
iaq->intr_flags_1g = INTR_OFLD_RXQ;
iaq->nirq += n1g * nofldrxq1g;
#ifdef DEV_NETMAP
iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g);
#endif
}
if (iaq->nirq <= navail &&
(itype != INTR_MSI || powerof2(iaq->nirq)))
@ -2483,12 +2485,12 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
/*
* Next best option: an interrupt vector for errors, one for the
* firmware event queue, and at least one per VI. At this
* point we know we'll have to downsize nrxq and/or nofldrxq
* and/or nnmrxq to fit what's available to us.
* firmware event queue, and at least one per main-VI. At this
* point we know we'll have to downsize nrxq and/or nofldrxq to
* fit what's available to us.
*/
iaq->nirq = T4_EXTRA_INTR;
iaq->nirq += (n10g + n1g) * num_vis;
iaq->nirq += n10g + n1g;
if (iaq->nirq <= navail) {
int leftover = navail - iaq->nirq;
@ -2507,9 +2509,6 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
iaq->nrxq10g = min(n, nrxq10g);
#ifdef TCP_OFFLOAD
iaq->nofldrxq10g = min(n, nofldrxq10g);
#endif
#ifdef DEV_NETMAP
iaq->nnmrxq10g = min(n, nnmrxq10g);
#endif
}
@ -2528,9 +2527,6 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
iaq->nrxq1g = min(n, nrxq1g);
#ifdef TCP_OFFLOAD
iaq->nofldrxq1g = min(n, nofldrxq1g);
#endif
#ifdef DEV_NETMAP
iaq->nnmrxq1g = min(n, nnmrxq1g);
#endif
}
@ -2547,10 +2543,6 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
if (is_offload(sc))
iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
#endif
#ifdef DEV_NETMAP
iaq->nnmrxq10g = iaq->nnmrxq1g = 1;
#endif
allocate:
navail = iaq->nirq;
rc = 0;
@ -3823,6 +3815,7 @@ setup_intr_handlers(struct adapter *sc)
struct irq *irq;
struct port_info *pi;
struct vi_info *vi;
struct sge *sge = &sc->sge;
struct sge_rxq *rxq;
#ifdef TCP_OFFLOAD
struct sge_ofld_rxq *ofld_rxq;
@ -3854,7 +3847,7 @@ setup_intr_handlers(struct adapter *sc)
rid++;
/* The second one is always the firmware event queue */
rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt");
rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
if (rc != 0)
return (rc);
irq++;
@ -3864,29 +3857,37 @@ setup_intr_handlers(struct adapter *sc)
pi = sc->port[p];
for_each_vi(pi, v, vi) {
vi->first_intr = rid - 1;
if (vi->nnmrxq > 0) {
int n = max(vi->nrxq, vi->nnmrxq);
MPASS(vi->flags & INTR_RXQ);
rxq = &sge->rxq[vi->first_rxq];
#ifdef DEV_NETMAP
if (vi->flags & VI_NETMAP) {
for_each_nm_rxq(vi, q, nm_rxq) {
snprintf(s, sizeof(s), "%d-%d", p, q);
nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
#endif
for (q = 0; q < n; q++) {
snprintf(s, sizeof(s), "%x%c%x", p,
'a' + v, q);
if (q < vi->nrxq)
irq->rxq = rxq++;
#ifdef DEV_NETMAP
if (q < vi->nnmrxq)
irq->nm_rxq = nm_rxq++;
#endif
rc = t4_alloc_irq(sc, irq, rid,
t4_nm_intr, nm_rxq, s);
t4_vi_intr, irq, s);
if (rc != 0)
return (rc);
irq++;
rid++;
vi->nintr++;
}
continue;
}
#endif
if (vi->flags & INTR_RXQ) {
} else if (vi->flags & INTR_RXQ) {
for_each_rxq(vi, q, rxq) {
if (v == 0)
snprintf(s, sizeof(s), "%d.%d",
p, q);
else
snprintf(s, sizeof(s),
"%d(%d).%d", p, v, q);
snprintf(s, sizeof(s), "%x%c%x", p,
'a' + v, q);
rc = t4_alloc_irq(sc, irq, rid,
t4_intr, rxq, s);
if (rc != 0)
@ -3903,7 +3904,8 @@ setup_intr_handlers(struct adapter *sc)
#ifdef TCP_OFFLOAD
if (vi->flags & INTR_OFLD_RXQ) {
for_each_ofld_rxq(vi, q, ofld_rxq) {
snprintf(s, sizeof(s), "%d,%d", p, q);
snprintf(s, sizeof(s), "%x%c%x", p,
'A' + v, q);
rc = t4_alloc_irq(sc, irq, rid,
t4_intr, ofld_rxq, s);
if (rc != 0)
@ -4074,14 +4076,6 @@ vi_full_init(struct vi_info *vi)
if (rc != 0)
goto done; /* error message displayed already */
#ifdef DEV_NETMAP
/* Netmap VIs configure RSS when netmap is enabled. */
if (vi->flags & VI_NETMAP) {
vi->flags |= VI_INIT_DONE;
return (0);
}
#endif
/*
* Setup RSS for this VI. Save a copy of the RSS table for later use.
*/
@ -4206,10 +4200,6 @@ vi_full_uninit(struct vi_info *vi)
if (vi->flags & VI_INIT_DONE) {
/* Need to quiesce queues. */
#ifdef DEV_NETMAP
if (vi->flags & VI_NETMAP)
goto skip;
#endif
/* XXX: Only for the first VI? */
if (IS_MAIN_VI(vi))
@ -4237,10 +4227,8 @@ vi_full_uninit(struct vi_info *vi)
}
#endif
free(vi->rss, M_CXGBE);
free(vi->nm_rss, M_CXGBE);
}
#ifdef DEV_NETMAP
skip:
#endif
t4_teardown_vi_queues(vi);
vi->flags &= ~VI_INIT_DONE;
@ -4975,7 +4963,7 @@ vi_sysctls(struct vi_info *vi)
ctx = device_get_sysctl_ctx(vi->dev);
/*
* dev.[nv](cxgbe|cxl).X.
* dev.v?(cxgbe|cxl).X.
*/
oid = device_get_sysctl_tree(vi->dev);
children = SYSCTL_CHILDREN(oid);
@ -4991,12 +4979,11 @@ vi_sysctls(struct vi_info *vi)
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
&vi->first_txq, 0, "index of first tx queue");
if (vi->flags & VI_NETMAP)
return;
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
"Reserve queue 0 for non-flowid packets");
if (IS_MAIN_VI(vi)) {
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
"Reserve queue 0 for non-flowid packets");
}
#ifdef TCP_OFFLOAD
if (vi->nofldrxq != 0) {
@ -5014,6 +5001,20 @@ vi_sysctls(struct vi_info *vi)
"index of first TOE tx queue");
}
#endif
#ifdef DEV_NETMAP
if (vi->nnmrxq != 0) {
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
&vi->nnmrxq, 0, "# of netmap rx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
&vi->nnmtxq, 0, "# of netmap tx queues");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
CTLFLAG_RD, &vi->first_nm_rxq, 0,
"index of first netmap rx queue");
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
CTLFLAG_RD, &vi->first_nm_txq, 0,
"index of first netmap tx queue");
}
#endif
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
@ -8871,9 +8872,6 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
struct sge_txq *txq;
struct sge_wrq *wrq;
if (vi->flags & VI_NETMAP)
continue;
for_each_rxq(vi, i, rxq) {
#if defined(INET) || defined(INET6)
rxq->lro.lro_queued = 0;
@ -9187,6 +9185,9 @@ tweak_tunables(void)
#endif
}
if (t4_ntxq_vi < 1)
t4_ntxq_vi = min(nc, NTXQ_VI);
if (t4_nrxq10g < 1) {
#ifdef RSS
t4_nrxq10g = rss_getnumbuckets();
@ -9204,6 +9205,9 @@ tweak_tunables(void)
#endif
}
if (t4_nrxq_vi < 1)
t4_nrxq_vi = min(nc, NRXQ_VI);
#ifdef TCP_OFFLOAD
if (t4_nofldtxq10g < 1)
t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
@ -9211,12 +9215,18 @@ tweak_tunables(void)
if (t4_nofldtxq1g < 1)
t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
if (t4_nofldtxq_vi < 1)
t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI);
if (t4_nofldrxq10g < 1)
t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
if (t4_nofldrxq1g < 1)
t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
if (t4_nofldrxq_vi < 1)
t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI);
if (t4_toecaps_allowed == -1)
t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
@ -9242,17 +9252,11 @@ tweak_tunables(void)
#endif
#ifdef DEV_NETMAP
if (t4_nnmtxq10g < 1)
t4_nnmtxq10g = min(nc, NNMTXQ_10G);
if (t4_nnmtxq_vi < 1)
t4_nnmtxq_vi = min(nc, NNMTXQ_VI);
if (t4_nnmtxq1g < 1)
t4_nnmtxq1g = min(nc, NNMTXQ_1G);
if (t4_nnmrxq10g < 1)
t4_nnmrxq10g = min(nc, NNMRXQ_10G);
if (t4_nnmrxq1g < 1)
t4_nnmrxq1g = min(nc, NNMRXQ_1G);
if (t4_nnmrxq_vi < 1)
t4_nnmrxq_vi = min(nc, NNMRXQ_VI);
#endif
if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)

View File

@ -85,198 +85,6 @@ SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
static int nm_cong_drop = 1;
TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop);
/* netmap ifnet routines */
static void cxgbe_nm_init(void *);
static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t);
static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *);
static void cxgbe_nm_qflush(struct ifnet *);
static int cxgbe_nm_init_synchronized(struct vi_info *);
static int cxgbe_nm_uninit_synchronized(struct vi_info *);
/* T4 netmap VI (ncxgbe) interface */
static int ncxgbe_probe(device_t);
static int ncxgbe_attach(device_t);
static int ncxgbe_detach(device_t);
static device_method_t ncxgbe_methods[] = {
DEVMETHOD(device_probe, ncxgbe_probe),
DEVMETHOD(device_attach, ncxgbe_attach),
DEVMETHOD(device_detach, ncxgbe_detach),
{ 0, 0 }
};
static driver_t ncxgbe_driver = {
"ncxgbe",
ncxgbe_methods,
sizeof(struct vi_info)
};
/* T5 netmap VI (ncxl) interface */
static driver_t ncxl_driver = {
"ncxl",
ncxgbe_methods,
sizeof(struct vi_info)
};
static void
cxgbe_nm_init(void *arg)
{
struct vi_info *vi = arg;
struct adapter *sc = vi->pi->adapter;
if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
return;
cxgbe_nm_init_synchronized(vi);
end_synchronized_op(sc, 0);
return;
}
static int
cxgbe_nm_init_synchronized(struct vi_info *vi)
{
struct adapter *sc = vi->pi->adapter;
struct ifnet *ifp = vi->ifp;
int rc = 0;
ASSERT_SYNCHRONIZED_OP(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
return (0); /* already running */
if (!(sc->flags & FULL_INIT_DONE) &&
((rc = adapter_full_init(sc)) != 0))
return (rc); /* error message displayed already */
if (!(vi->flags & VI_INIT_DONE) &&
((rc = vi_full_init(vi)) != 0))
return (rc); /* error message displayed already */
rc = update_mac_settings(ifp, XGMAC_ALL);
if (rc)
return (rc); /* error message displayed already */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
callout_reset(&vi->tick, hz, vi_tick, vi);
return (rc);
}
static int
cxgbe_nm_uninit_synchronized(struct vi_info *vi)
{
#ifdef INVARIANTS
struct adapter *sc = vi->pi->adapter;
#endif
struct ifnet *ifp = vi->ifp;
ASSERT_SYNCHRONIZED_OP(sc);
callout_stop(&vi->tick);
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
return (0);
}
static int
cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
{
int rc = 0, mtu, flags;
struct vi_info *vi = ifp->if_softc;
struct adapter *sc = vi->pi->adapter;
struct ifreq *ifr = (struct ifreq *)data;
uint32_t mask;
MPASS(vi->ifp == ifp);
switch (cmd) {
case SIOCSIFMTU:
mtu = ifr->ifr_mtu;
if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
return (EINVAL);
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmtu");
if (rc)
return (rc);
ifp->if_mtu = mtu;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MTU);
end_synchronized_op(sc, 0);
break;
case SIOCSIFFLAGS:
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nflg");
if (rc)
return (rc);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
flags = vi->if_flags;
if ((ifp->if_flags ^ flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
rc = update_mac_settings(ifp,
XGMAC_PROMISC | XGMAC_ALLMULTI);
}
} else
rc = cxgbe_nm_init_synchronized(vi);
vi->if_flags = ifp->if_flags;
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = cxgbe_nm_uninit_synchronized(vi);
end_synchronized_op(sc, 0);
break;
case SIOCADDMULTI:
case SIOCDELMULTI: /* these two are called with a mutex held :-( */
rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4nmulti");
if (rc)
return (rc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MCADDRS);
end_synchronized_op(sc, LOCK_HELD);
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
if (mask & IFCAP_TXCSUM) {
ifp->if_capenable ^= IFCAP_TXCSUM;
ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
}
if (mask & IFCAP_TXCSUM_IPV6) {
ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
}
if (mask & IFCAP_RXCSUM)
ifp->if_capenable ^= IFCAP_RXCSUM;
if (mask & IFCAP_RXCSUM_IPV6)
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
break;
default:
rc = ether_ioctl(ifp, cmd, data);
}
return (rc);
}
static int
cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m)
{
m_freem(m);
return (0);
}
static void
cxgbe_nm_qflush(struct ifnet *ifp)
{
return;
}
static int
alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
{
@ -512,7 +320,6 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
struct sge_nm_txq *nm_txq;
int rc, i, j, hwidx;
struct hw_buf_info *hwb;
uint16_t *rss;
ASSERT_SYNCHRONIZED_OP(sc);
@ -536,6 +343,8 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
nm_set_native_flags(na);
for_each_nm_rxq(vi, i, nm_rxq) {
struct irq *irq = &sc->irq[vi->first_intr + i];
alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
nm_rxq->fl_hwidx = hwidx;
slot = netmap_reset(na, NR_RX, i, 0);
@ -557,6 +366,8 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
wmb();
t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
nm_rxq->fl_db_val | V_PIDX(j));
atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON);
}
for_each_nm_txq(vi, i, nm_txq) {
@ -565,24 +376,21 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
MPASS(slot != NULL); /* XXXNM: error check, not assert */
}
rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO |
M_WAITOK);
if (vi->nm_rss == NULL) {
vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
M_ZERO | M_WAITOK);
}
for (i = 0; i < vi->rss_size;) {
for_each_nm_rxq(vi, j, nm_rxq) {
rss[i++] = nm_rxq->iq_abs_id;
vi->nm_rss[i++] = nm_rxq->iq_abs_id;
if (i == vi->rss_size)
break;
}
}
rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
rss, vi->rss_size);
vi->nm_rss, vi->rss_size);
if (rc != 0)
if_printf(ifp, "netmap rss_config failed: %d\n", rc);
free(rss, M_CXGBE);
rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
if (rc != 0)
if_printf(ifp, "netmap enable_vi failed: %d\n", rc);
return (rc);
}
@ -600,9 +408,10 @@ cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
if ((vi->flags & VI_INIT_DONE) == 0)
return (0);
rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
vi->rss, vi->rss_size);
if (rc != 0)
if_printf(ifp, "netmap disable_vi failed: %d\n", rc);
if_printf(ifp, "failed to restore RSS config: %d\n", rc);
nm_clear_native_flags(na);
for_each_nm_txq(vi, i, nm_txq) {
@ -619,6 +428,11 @@ cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
free_nm_txq_hwq(vi, nm_txq);
}
for_each_nm_rxq(vi, i, nm_rxq) {
struct irq *irq = &sc->irq[vi->first_intr + i];
while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF))
pause("nmst", 1);
free_nm_rxq_hwq(vi, nm_rxq);
}
@ -890,7 +704,7 @@ cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
struct ifnet *ifp = na->ifp;
struct vi_info *vi = ifp->if_softc;
struct adapter *sc = vi->pi->adapter;
struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_txq + kring->ring_id];
struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
const u_int head = kring->rhead;
u_int reclaimed = 0;
int n, d, npkt_remaining, ndesc_remaining, txcsum;
@ -955,7 +769,7 @@ cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
struct ifnet *ifp = na->ifp;
struct vi_info *vi = ifp->if_softc;
struct adapter *sc = vi->pi->adapter;
struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_rxq + kring->ring_id];
struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
u_int const head = kring->rhead;
u_int n;
int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
@ -1021,93 +835,22 @@ cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
return (0);
}
static int
ncxgbe_probe(device_t dev)
void
cxgbe_nm_attach(struct vi_info *vi)
{
char buf[128];
struct vi_info *vi = device_get_softc(dev);
snprintf(buf, sizeof(buf), "port %d netmap vi", vi->pi->port_id);
device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
static int
ncxgbe_attach(device_t dev)
{
struct vi_info *vi;
struct port_info *pi;
struct adapter *sc;
struct netmap_adapter na;
struct ifnet *ifp;
int rc;
vi = device_get_softc(dev);
MPASS(vi->nnmrxq > 0);
MPASS(vi->ifp != NULL);
pi = vi->pi;
sc = pi->adapter;
/*
* Allocate a virtual interface exclusively for netmap use. Give it the
* MAC address normally reserved for use by a TOE interface. (The TOE
* driver on FreeBSD doesn't use it).
*/
rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, FW_VI_FUNC_OFLD, 0);
if (rc < 0) {
device_printf(dev, "unable to allocate netmap virtual "
"interface for port %d: %d\n", pi->port_id, -rc);
return (-rc);
}
vi->viid = rc;
vi->xact_addr_filt = -1;
callout_init(&vi->tick, 1);
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "Cannot allocate netmap ifnet\n");
return (ENOMEM);
}
vi->ifp = ifp;
ifp->if_softc = vi;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_init = cxgbe_nm_init;
ifp->if_ioctl = cxgbe_nm_ioctl;
ifp->if_transmit = cxgbe_nm_transmit;
ifp->if_qflush = cxgbe_nm_qflush;
ifp->if_get_counter = cxgbe_get_counter;
/*
* netmap(4) says "netmap does not use features such as checksum
* offloading, TCP segmentation offloading, encryption, VLAN
* encapsulation/decapsulation, etc."
*
* By default we comply with the statement above. But we do declare the
* ifnet capable of L3/L4 checksumming so that a user can override
* netmap and have the hardware do the L3/L4 checksums.
*/
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU |
IFCAP_HWCSUM_IPV6;
ifp->if_capenable = 0;
ifp->if_hwassist = 0;
/* vi->media has already been setup by the caller */
ether_ifattach(ifp, vi->hw_addr);
device_printf(dev, "%d txq, %d rxq (netmap)\n", vi->ntxq, vi->nrxq);
vi_sysctls(vi);
/*
* Register with netmap in the kernel.
*/
bzero(&na, sizeof(na));
na.ifp = ifp;
na.ifp = vi->ifp;
na.na_flags = NAF_BDG_MAYSLEEP;
/* Netmap doesn't know about the space reserved for the status page. */
@ -1123,37 +866,19 @@ ncxgbe_attach(device_t dev)
na.nm_txsync = cxgbe_netmap_txsync;
na.nm_rxsync = cxgbe_netmap_rxsync;
na.nm_register = cxgbe_netmap_reg;
na.num_tx_rings = vi->ntxq;
na.num_rx_rings = vi->nrxq;
na.num_tx_rings = vi->nnmtxq;
na.num_rx_rings = vi->nnmrxq;
netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */
return (0);
}
static int
ncxgbe_detach(device_t dev)
void
cxgbe_nm_detach(struct vi_info *vi)
{
struct vi_info *vi;
struct adapter *sc;
vi = device_get_softc(dev);
sc = vi->pi->adapter;
doom_vi(sc, vi);
MPASS(vi->nnmrxq > 0);
MPASS(vi->ifp != NULL);
netmap_detach(vi->ifp);
ether_ifdetach(vi->ifp);
cxgbe_nm_uninit_synchronized(vi);
callout_drain(&vi->tick);
vi_full_uninit(vi);
ifmedia_removeall(&vi->media);
if_free(vi->ifp);
vi->ifp = NULL;
t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
end_synchronized_op(sc, 0);
return (0);
}
static void
@ -1283,12 +1008,4 @@ t4_nm_intr(void *arg)
V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
}
static devclass_t ncxgbe_devclass, ncxl_devclass;
DRIVER_MODULE(ncxgbe, cxgbe, ncxgbe_driver, ncxgbe_devclass, 0, 0);
MODULE_VERSION(ncxgbe, 1);
DRIVER_MODULE(ncxl, cxl, ncxl_driver, ncxl_devclass, 0, 0);
MODULE_VERSION(ncxl, 1);
#endif

View File

@ -813,8 +813,6 @@ vi_intr_iq(struct vi_info *vi, int idx)
if (sc->intr_count == 1)
return (&sc->sge.fwq);
KASSERT(!(vi->flags & VI_NETMAP),
("%s: called on netmap VI", __func__));
nintr = vi->nintr;
KASSERT(nintr != 0,
("%s: vi %p has no exclusive interrupts, total interrupts = %d",
@ -881,6 +879,7 @@ t4_setup_vi_queues(struct vi_info *vi)
struct sge_wrq *ofld_txq;
#endif
#ifdef DEV_NETMAP
int saved_idx;
struct sge_nm_rxq *nm_rxq;
struct sge_nm_txq *nm_txq;
#endif
@ -896,13 +895,18 @@ t4_setup_vi_queues(struct vi_info *vi)
intr_idx = first_vector(vi);
#ifdef DEV_NETMAP
if (vi->flags & VI_NETMAP) {
saved_idx = intr_idx;
if (ifp->if_capabilities & IFCAP_NETMAP) {
/* netmap is supported with direct interrupts only. */
MPASS(vi->flags & INTR_RXQ);
/*
* We don't have buffers to back the netmap rx queues
* right now so we create the queues in a way that
* doesn't set off any congestion signal in the chip.
*/
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq",
CTLFLAG_RD, NULL, "rx queues");
for_each_nm_rxq(vi, i, nm_rxq) {
rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid);
@ -911,16 +915,18 @@ t4_setup_vi_queues(struct vi_info *vi)
intr_idx++;
}
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq",
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
CTLFLAG_RD, NULL, "tx queues");
for_each_nm_txq(vi, i, nm_txq) {
iqid = vi->first_rxq + (i % vi->nrxq);
iqid = vi->first_nm_rxq + (i % vi->nnmrxq);
rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid);
if (rc != 0)
goto done;
}
goto done;
}
/* Normal rx queues and netmap rx queues share the same interrupts. */
intr_idx = saved_idx;
#endif
/*
@ -949,6 +955,10 @@ t4_setup_vi_queues(struct vi_info *vi)
intr_idx++;
}
}
#ifdef DEV_NETMAP
if (ifp->if_capabilities & IFCAP_NETMAP)
intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
#endif
#ifdef TCP_OFFLOAD
maxp = mtu_to_max_payload(sc, mtu, 1);
if (vi->flags & INTR_OFLD_RXQ) {
@ -1101,7 +1111,7 @@ t4_teardown_vi_queues(struct vi_info *vi)
}
#ifdef DEV_NETMAP
if (vi->flags & VI_NETMAP) {
if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
for_each_nm_txq(vi, i, nm_txq) {
free_nm_txq(vi, nm_txq);
}
@ -1109,7 +1119,6 @@ t4_teardown_vi_queues(struct vi_info *vi)
for_each_nm_rxq(vi, i, nm_rxq) {
free_nm_rxq(vi, nm_rxq);
}
return (0);
}
#endif
@ -1213,6 +1222,21 @@ t4_intr(void *arg)
}
}
void
t4_vi_intr(void *arg)
{
struct irq *irq = arg;
#ifdef DEV_NETMAP
if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) {
t4_nm_intr(irq->nm_rxq);
atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON);
}
#endif
if (irq->rxq != NULL)
t4_intr(irq->rxq);
}
/*
* Deals with anything and everything on the given ingress queue.
*/