cxgbe(4): Add a knob to split the rx queues for a netmap enabled

interface into two groups.  Filters can be used to match traffic
and distribute it across a group.

hw.cxgbe.nm_split_rss

Sponsored by:	Chelsio Communications
This commit is contained in:
Navdeep Parhar 2018-10-25 22:55:18 +00:00
parent 4f77f48884
commit f02c9e69cb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=339749

View File

@ -101,6 +101,15 @@ int lazy_tx_credit_flush = 1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN,
&lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues.");
/*
* Split the netmap rx queues into two groups that populate separate halves of
* the RSS indirection table. This allows filters with hashmask to steer to a
* particular group of queues.
*/
static int nm_split_rss = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN,
&nm_split_rss, 0, "Split the netmap rx queues into two groups.");
static int
alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
{
@ -333,7 +342,7 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
struct netmap_kring *kring;
struct sge_nm_rxq *nm_rxq;
struct sge_nm_txq *nm_txq;
int rc, i, j, hwidx;
int rc, i, j, hwidx, defq, nrssq;
struct hw_buf_info *hwb;
ASSERT_SYNCHRONIZED_OP(sc);
@ -403,11 +412,61 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
M_ZERO | M_WAITOK);
}
for (i = 0; i < vi->rss_size;) {
for_each_nm_rxq(vi, j, nm_rxq) {
vi->nm_rss[i++] = nm_rxq->iq_abs_id;
if (i == vi->rss_size)
break;
MPASS(vi->nnmrxq > 0);
if (nm_split_rss == 0 || vi->nnmrxq == 1) {
for (i = 0; i < vi->rss_size;) {
for_each_nm_rxq(vi, j, nm_rxq) {
vi->nm_rss[i++] = nm_rxq->iq_abs_id;
if (i == vi->rss_size)
break;
}
}
defq = vi->nm_rss[0];
} else {
/* We have multiple queues and we want to split the table. */
MPASS(nm_split_rss != 0);
MPASS(vi->nnmrxq > 1);
nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq];
nrssq = vi->nnmrxq;
if (vi->nnmrxq & 1) {
/*
* Odd number of queues. The first rxq is designated the
* default queue, the rest are split evenly.
*/
defq = nm_rxq->iq_abs_id;
nm_rxq++;
nrssq--;
} else {
/*
* Even number of queues split into two halves. The
* first rxq in one of the halves is designated the
* default queue.
*/
#if 1
/* First rxq in the first half. */
defq = nm_rxq->iq_abs_id;
#else
/* First rxq in the second half. */
defq = nm_rxq[vi->nnmrxq / 2].iq_abs_id;
#endif
}
i = 0;
while (i < vi->rss_size / 2) {
for (j = 0; j < nrssq / 2; j++) {
vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
if (i == vi->rss_size / 2)
break;
}
}
while (i < vi->rss_size) {
for (j = nrssq / 2; j < nrssq; j++) {
vi->nm_rss[i++] = nm_rxq[j].iq_abs_id;
if (i == vi->rss_size)
break;
}
}
}
rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
@ -415,8 +474,7 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
if (rc != 0)
if_printf(ifp, "netmap rss_config failed: %d\n", rc);
rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen,
vi->nm_rss[0], 0, 0);
rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0);
if (rc != 0)
if_printf(ifp, "netmap rss hash/defaultq config failed: %d\n", rc);