cxgbe(4): Implement ifnet callbacks that deal with send tags.

An etid (ethoffload tid) is allocated for a send tag and it acquires a
reference on the traffic class that matches the send parameters
associated with the tag.

Sponsored by:	Chelsio Communications
This commit is contained in:
Navdeep Parhar 2018-05-18 06:09:15 +00:00
parent 6e36248f79
commit 67e071128d
4 changed files with 254 additions and 4 deletions

View File

@ -1236,6 +1236,15 @@ int t4_free_tx_sched(struct adapter *);
void t4_update_tx_sched(struct adapter *);
int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *);
void t4_release_cl_rl_kbps(struct adapter *, int, int);
#ifdef RATELIMIT
void t4_init_etid_table(struct adapter *);
void t4_free_etid_table(struct adapter *);
int cxgbe_snd_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
struct m_snd_tag **);
int cxgbe_snd_tag_modify(struct m_snd_tag *, union if_snd_tag_modify_params *);
int cxgbe_snd_tag_query(struct m_snd_tag *, union if_snd_tag_query_params *);
void cxgbe_snd_tag_free(struct m_snd_tag *);
#endif
/* t4_filter.c */
int get_filter_mode(struct adapter *, uint32_t *);

View File

@ -79,6 +79,38 @@ union aopen_entry {
union aopen_entry *next;
};
struct cxgbe_snd_tag {
struct m_snd_tag com;
struct adapter *adapter;
u_int flags;
struct mtx lock;
int port_id;
int etid;
struct sge_wrq *eo_txq;
uint16_t iqid;
int8_t schedcl;
uint64_t max_rate; /* in bytes/s */
int8_t next_credits; /* need these many tx credits next */
uint8_t next_nsegs; /* next WR will have these many GL segs total */
uint8_t next_msegs; /* max segs for a single mbuf in next chain */
uint8_t tx_total; /* total tx WR credits (in 16B units) */
uint8_t tx_credits; /* tx WR credits (in 16B units) available */
uint8_t tx_nocompl; /* tx WR credits since last compl request */
uint8_t ncompl; /* # of completions outstanding. */
};
static inline struct cxgbe_snd_tag *
mst_to_cst(struct m_snd_tag *t)
{
return (__containerof(t, struct cxgbe_snd_tag, com));
}
union etid_entry {
struct cxgbe_snd_tag *cst;
union etid_entry *next;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables. The tables themselves are allocated dynamically.
@ -98,8 +130,8 @@ struct tid_info {
struct mtx atid_lock __aligned(CACHE_LINE_SIZE);
union aopen_entry *atid_tab;
u_int natids;
union aopen_entry *afree;
u_int natids;
u_int atids_in_use;
struct mtx ftid_lock __aligned(CACHE_LINE_SIZE);
@ -115,9 +147,11 @@ struct tid_info {
/* ntids, tids_in_use */
struct mtx etid_lock __aligned(CACHE_LINE_SIZE);
struct etid_entry *etid_tab;
union etid_entry *etid_tab;
union etid_entry *efree;
u_int netids;
u_int etid_base;
u_int etids_in_use;
};
struct t4_range {

View File

@ -1105,6 +1105,9 @@ t4_attach(device_t dev)
t4_init_l2t(sc, M_WAITOK);
t4_init_tx_sched(sc);
#ifdef RATELIMIT
t4_init_etid_table(sc);
#endif
/*
* Second pass over the ports. This time we know the number of rx and
@ -1375,6 +1378,9 @@ t4_detach_common(device_t dev)
if (sc->l2t)
t4_free_l2t(sc->l2t);
#ifdef RATELIMIT
t4_free_etid_table(sc);
#endif
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
free(sc->sge.ofld_txq, M_CXGBE);
@ -1486,6 +1492,12 @@ cxgbe_vi_attach(device_t dev, struct vi_info *vi)
ifp->if_transmit = cxgbe_transmit;
ifp->if_qflush = cxgbe_qflush;
ifp->if_get_counter = cxgbe_get_counter;
#ifdef RATELIMIT
ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc;
ifp->if_snd_tag_modify = cxgbe_snd_tag_modify;
ifp->if_snd_tag_query = cxgbe_snd_tag_query;
ifp->if_snd_tag_free = cxgbe_snd_tag_free;
#endif
ifp->if_capabilities = T4_CAP;
#ifdef TCP_OFFLOAD
@ -7928,8 +7940,8 @@ sysctl_tids(SYSCTL_HANDLER_ARGS)
}
if (t->netids) {
sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
t->etid_base + t->netids - 1);
sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base,
t->etid_base + t->netids - 1, t->etids_in_use);
}
sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",

View File

@ -30,6 +30,7 @@ __FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_ratelimit.h"
#include <sys/types.h>
#include <sys/malloc.h>
@ -463,3 +464,197 @@ t4_release_cl_rl_kbps(struct adapter *sc, int port_id, int tc_idx)
tc->refcount--;
mtx_unlock(&sc->tc_lock);
}
#ifdef RATELIMIT
void
t4_init_etid_table(struct adapter *sc)
{
int i;
struct tid_info *t;
if (!is_ethoffload(sc))
return;
t = &sc->tids;
MPASS(t->netids > 0);
mtx_init(&t->etid_lock, "etid lock", NULL, MTX_DEF);
t->etid_tab = malloc(sizeof(*t->etid_tab) * t->netids, M_CXGBE,
M_ZERO | M_WAITOK);
t->efree = t->etid_tab;
t->etids_in_use = 0;
for (i = 1; i < t->netids; i++)
t->etid_tab[i - 1].next = &t->etid_tab[i];
t->etid_tab[t->netids - 1].next = NULL;
}
void
t4_free_etid_table(struct adapter *sc)
{
struct tid_info *t;
if (!is_ethoffload(sc))
return;
t = &sc->tids;
MPASS(t->netids > 0);
free(t->etid_tab, M_CXGBE);
t->etid_tab = NULL;
if (mtx_initialized(&t->etid_lock))
mtx_destroy(&t->etid_lock);
}
/* etid services */
static int alloc_etid(struct adapter *, struct cxgbe_snd_tag *);
static void free_etid(struct adapter *, int);
static int
alloc_etid(struct adapter *sc, struct cxgbe_snd_tag *cst)
{
struct tid_info *t = &sc->tids;
int etid = -1;
mtx_lock(&t->etid_lock);
if (t->efree) {
union etid_entry *p = t->efree;
etid = p - t->etid_tab + t->etid_base;
t->efree = p->next;
p->cst = cst;
t->etids_in_use++;
}
mtx_unlock(&t->etid_lock);
return (etid);
}
#ifdef notyet
struct cxgbe_snd_tag *
lookup_etid(struct adapter *sc, int etid)
{
struct tid_info *t = &sc->tids;
return (t->etid_tab[etid - t->etid_base].cst);
}
#endif
static void
free_etid(struct adapter *sc, int etid)
{
struct tid_info *t = &sc->tids;
union etid_entry *p = &t->etid_tab[etid - t->etid_base];
mtx_lock(&t->etid_lock);
p->next = t->efree;
t->efree = p;
t->etids_in_use--;
mtx_unlock(&t->etid_lock);
}
int
cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
struct m_snd_tag **pt)
{
int rc, schedcl;
struct vi_info *vi = ifp->if_softc;
struct port_info *pi = vi->pi;
struct adapter *sc = pi->adapter;
struct cxgbe_snd_tag *cst;
if (params->hdr.type != IF_SND_TAG_TYPE_RATE_LIMIT)
return (ENOTSUP);
rc = t4_reserve_cl_rl_kbps(sc, pi->port_id,
(params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
if (rc != 0)
return (rc);
MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
cst = malloc(sizeof(*cst), M_CXGBE, M_ZERO | M_NOWAIT);
if (cst == NULL) {
failed:
t4_release_cl_rl_kbps(sc, pi->port_id, schedcl);
return (ENOMEM);
}
cst->etid = alloc_etid(sc, cst);
if (cst->etid < 0) {
free(cst, M_CXGBE);
goto failed;
}
mtx_init(&cst->lock, "cst_lock", NULL, MTX_DEF);
cst->com.ifp = ifp;
cst->adapter = sc;
cst->port_id = pi->port_id;
cst->schedcl = schedcl;
cst->max_rate = params->rate_limit.max_rate;
cst->next_credits = -1;
cst->tx_credits = sc->params.ofldq_wr_cred;
cst->tx_total = cst->tx_credits;
/*
* Queues will be selected later when the connection flowid is available.
*/
*pt = &cst->com;
return (0);
}
/*
* Change in parameters, no change in ifp.
*/
int
cxgbe_snd_tag_modify(struct m_snd_tag *mst,
union if_snd_tag_modify_params *params)
{
int rc, schedcl;
struct cxgbe_snd_tag *cst = mst_to_cst(mst);
struct adapter *sc = cst->adapter;
/* XXX: is schedcl -1 ok here? */
MPASS(cst->schedcl >= 0 && cst->schedcl < sc->chip_params->nsched_cls);
rc = t4_reserve_cl_rl_kbps(sc, cst->port_id,
(params->rate_limit.max_rate * 8ULL / 1000), &schedcl);
if (rc != 0)
return (rc);
MPASS(schedcl >= 0 && schedcl < sc->chip_params->nsched_cls);
t4_release_cl_rl_kbps(sc, cst->port_id, cst->schedcl);
cst->schedcl = schedcl;
cst->max_rate = params->rate_limit.max_rate;
return (0);
}
int
cxgbe_snd_tag_query(struct m_snd_tag *mst,
union if_snd_tag_query_params *params)
{
struct cxgbe_snd_tag *cst = mst_to_cst(mst);
params->rate_limit.max_rate = cst->max_rate;
#define CST_TO_MST_QLEVEL_SCALE (IF_SND_QUEUE_LEVEL_MAX / cst->tx_total)
params->rate_limit.queue_level =
(cst->tx_total - cst->tx_credits) * CST_TO_MST_QLEVEL_SCALE;
return (0);
}
void
cxgbe_snd_tag_free(struct m_snd_tag *mst)
{
struct cxgbe_snd_tag *cst = mst_to_cst(mst);
struct adapter *sc = cst->adapter;
if (cst->etid >= 0)
free_etid(sc, cst->etid);
if (cst->schedcl != -1)
t4_release_cl_rl_kbps(sc, cst->port_id, cst->schedcl);
if (mtx_initialized(&cst->lock))
mtx_destroy(&cst->lock);
free(cst, M_CXGBE);
}
#endif