cxgbe(4): Move all control queues to the adapter.
There used to be one control queue per adapter (the mgmtq) that was initialized during adapter init and one per port that was initialized later during port init. This change moves all the control queues (one per port/channel) to the adapter so that they are initialized during adapter init and are available before any port is up. This allows the driver to issue ctrlq work requests over any channel without having to bring up any port. MFH: 2 weeks Sponsored by: Chelsio Communications
This commit is contained in:
parent
a079a34fd5
commit
37310a98a8
@ -100,7 +100,7 @@ enum {
|
||||
EQ_ESIZE = 64,
|
||||
|
||||
/* Default queue sizes for all kinds of egress queues */
|
||||
CTRL_EQ_QSIZE = 128,
|
||||
CTRL_EQ_QSIZE = 1024,
|
||||
TX_EQ_QSIZE = 1024,
|
||||
|
||||
#if MJUMPAGESIZE != MCLBYTES
|
||||
@ -738,7 +738,6 @@ struct sge {
|
||||
int neq; /* total # of egress queues */
|
||||
|
||||
struct sge_iq fwq; /* Firmware event queue */
|
||||
struct sge_wrq mgmtq; /* Management queue (control queue) */
|
||||
struct sge_wrq *ctrlq; /* Control queues */
|
||||
struct sge_txq *txq; /* NIC tx queues */
|
||||
struct sge_rxq *rxq; /* NIC rx queues */
|
||||
|
@ -63,7 +63,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct wrqe *wr;
|
||||
|
||||
wr_len = sizeof *res_wr + sizeof *res;
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
|
||||
if (wr == NULL)
|
||||
return (0);
|
||||
res_wr = wrtod(wr);
|
||||
@ -133,7 +133,7 @@ create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
/* build fw_ri_res_wr */
|
||||
wr_len = sizeof *res_wr + sizeof *res;
|
||||
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
|
||||
if (wr == NULL)
|
||||
return (0);
|
||||
res_wr = wrtod(wr);
|
||||
|
@ -82,7 +82,7 @@ write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
|
||||
wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
|
||||
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
|
||||
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
|
||||
if (wr == NULL)
|
||||
return (0);
|
||||
ulpmc = wrtod(wr);
|
||||
|
@ -236,7 +236,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
/* build fw_ri_res_wr */
|
||||
wr_len = sizeof *res_wr + 2 * sizeof *res;
|
||||
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
|
||||
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
|
||||
if (wr == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto free_rq_dma;
|
||||
|
@ -400,7 +400,7 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
|
||||
len16 = howmany(sizeof(struct fw_filter2_wr), 16);
|
||||
else
|
||||
len16 = howmany(sizeof(struct fw_filter_wr), 16);
|
||||
fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie);
|
||||
fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
|
||||
if (__predict_false(fwr == NULL))
|
||||
rc = ENOMEM;
|
||||
else {
|
||||
@ -519,7 +519,7 @@ set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
|
||||
fwr->newfport = htobe16(f->fs.nat_sport);
|
||||
fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
|
||||
}
|
||||
commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
|
||||
commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
|
||||
|
||||
/* Wait for response. */
|
||||
mtx_lock(&sc->tids.ftid_lock);
|
||||
@ -824,7 +824,7 @@ del_tcamfilter(struct adapter *sc, struct t4_filter *t)
|
||||
goto done;
|
||||
}
|
||||
MPASS(f->tid == tid_base + t->idx);
|
||||
fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
|
||||
fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
|
||||
if (fwr == NULL) {
|
||||
rc = ENOMEM;
|
||||
goto done;
|
||||
@ -833,7 +833,7 @@ del_tcamfilter(struct adapter *sc, struct t4_filter *t)
|
||||
bzero(fwr, sizeof (*fwr));
|
||||
t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
|
||||
f->pending = 1;
|
||||
commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
|
||||
commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
|
||||
t->fs = f->fs; /* extra info for the caller */
|
||||
|
||||
for (;;) {
|
||||
@ -901,7 +901,7 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
|
||||
struct wrq_cookie cookie;
|
||||
struct cpl_set_tcb_field *req;
|
||||
|
||||
req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
|
||||
req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
bzero(req, sizeof(*req));
|
||||
@ -914,7 +914,7 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
|
||||
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
|
||||
req->mask = htobe64(mask);
|
||||
req->val = htobe64(val);
|
||||
commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
|
||||
commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -1044,7 +1044,7 @@ t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
|
||||
f->tid = act_open_rpl_status_to_errno(status);
|
||||
f->valid = 0;
|
||||
if (act_open_has_tid(status))
|
||||
release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
|
||||
release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
|
||||
free_filter_resources(f);
|
||||
if (f->locked == 0)
|
||||
free(f, M_CXGBE);
|
||||
@ -1081,7 +1081,7 @@ t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
|
||||
f->valid = 0;
|
||||
free_filter_resources(f);
|
||||
remove_hftid(sc, tid, f->fs.type ? 2 : 1);
|
||||
release_tid(sc, tid, &sc->sge.mgmtq);
|
||||
release_tid(sc, tid, &sc->sge.ctrlq[0]);
|
||||
if (f->locked == 0)
|
||||
free(f, M_CXGBE);
|
||||
}
|
||||
@ -1112,7 +1112,7 @@ t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
|
||||
f->valid = 0;
|
||||
free_filter_resources(f);
|
||||
remove_hftid(sc, tid, f->fs.type ? 2 : 1);
|
||||
release_tid(sc, tid, &sc->sge.mgmtq);
|
||||
release_tid(sc, tid, &sc->sge.ctrlq[0]);
|
||||
if (f->locked == 0)
|
||||
free(f, M_CXGBE);
|
||||
}
|
||||
@ -1374,7 +1374,7 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
|
||||
}
|
||||
MPASS(atid >= 0);
|
||||
|
||||
wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
|
||||
wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
|
||||
&cookie);
|
||||
if (wr == NULL) {
|
||||
free_atid(sc, atid);
|
||||
@ -1394,7 +1394,7 @@ set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
|
||||
f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
|
||||
f->pending = 1;
|
||||
f->tid = -1;
|
||||
commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
|
||||
commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
|
||||
|
||||
for (;;) {
|
||||
MPASS(f->locked);
|
||||
@ -1571,7 +1571,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t)
|
||||
rc = EBUSY;
|
||||
goto done;
|
||||
}
|
||||
wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
|
||||
wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
|
||||
if (wr == NULL) {
|
||||
rc = ENOMEM;
|
||||
goto done;
|
||||
@ -1580,7 +1580,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t)
|
||||
mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
|
||||
f->locked = 1;
|
||||
f->pending = 1;
|
||||
commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
|
||||
commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
|
||||
t->fs = f->fs; /* extra info for the caller */
|
||||
|
||||
for (;;) {
|
||||
|
@ -182,7 +182,7 @@ t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan,
|
||||
|
||||
e->vlan = vlan;
|
||||
e->lport = port;
|
||||
e->wrq = &sc->sge.mgmtq;
|
||||
e->wrq = &sc->sge.ctrlq[0];
|
||||
e->iqid = sc->sge.fwq.abs_id;
|
||||
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
|
||||
mtx_lock(&e->lock);
|
||||
|
@ -1052,7 +1052,7 @@ t4_attach(device_t dev)
|
||||
s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi;
|
||||
}
|
||||
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
|
||||
s->neq += nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
|
||||
s->neq += nports; /* ctrl queues: 1 per port */
|
||||
s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
|
||||
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
|
||||
if (is_offload(sc) || is_ethoffload(sc)) {
|
||||
|
@ -224,8 +224,8 @@ static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
|
||||
struct sysctl_oid *, struct sge_fl *);
|
||||
static int alloc_fwq(struct adapter *);
|
||||
static int free_fwq(struct adapter *);
|
||||
static int alloc_mgmtq(struct adapter *);
|
||||
static int free_mgmtq(struct adapter *);
|
||||
static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int,
|
||||
struct sysctl_oid *);
|
||||
static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
|
||||
struct sysctl_oid *);
|
||||
static int free_rxq(struct vi_info *, struct sge_rxq *);
|
||||
@ -1009,7 +1009,8 @@ t4_destroy_dma_tag(struct adapter *sc)
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and initialize the firmware event queue and the management queue.
|
||||
* Allocate and initialize the firmware event queue, control queues, and special
|
||||
* purpose rx queues owned by the adapter.
|
||||
*
|
||||
* Returns errno on failure. Resources allocated up to that point may still be
|
||||
* allocated. Caller is responsible for cleanup in case this function fails.
|
||||
@ -1017,7 +1018,9 @@ t4_destroy_dma_tag(struct adapter *sc)
|
||||
int
|
||||
t4_setup_adapter_queues(struct adapter *sc)
|
||||
{
|
||||
int rc;
|
||||
struct sysctl_oid *oid;
|
||||
struct sysctl_oid_list *children;
|
||||
int rc, i;
|
||||
|
||||
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
|
||||
|
||||
@ -1032,11 +1035,30 @@ t4_setup_adapter_queues(struct adapter *sc)
|
||||
return (rc);
|
||||
|
||||
/*
|
||||
* Management queue. This is just a control queue that uses the fwq as
|
||||
* its associated iq.
|
||||
* That's all for the VF driver.
|
||||
*/
|
||||
if (!(sc->flags & IS_VF))
|
||||
rc = alloc_mgmtq(sc);
|
||||
if (sc->flags & IS_VF)
|
||||
return (rc);
|
||||
|
||||
oid = device_get_sysctl_tree(sc->dev);
|
||||
children = SYSCTL_CHILDREN(oid);
|
||||
|
||||
/*
|
||||
* XXX: General purpose rx queues, one per port.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Control queues, one per port.
|
||||
*/
|
||||
oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq",
|
||||
CTLFLAG_RD, NULL, "control queues");
|
||||
for_each_port(sc, i) {
|
||||
struct sge_wrq *ctrlq = &sc->sge.ctrlq[i];
|
||||
|
||||
rc = alloc_ctrlq(sc, ctrlq, i, oid);
|
||||
if (rc != 0)
|
||||
return (rc);
|
||||
}
|
||||
|
||||
return (rc);
|
||||
}
|
||||
@ -1047,6 +1069,7 @@ t4_setup_adapter_queues(struct adapter *sc)
|
||||
int
|
||||
t4_teardown_adapter_queues(struct adapter *sc)
|
||||
{
|
||||
int i;
|
||||
|
||||
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
|
||||
|
||||
@ -1056,7 +1079,8 @@ t4_teardown_adapter_queues(struct adapter *sc)
|
||||
sc->flags &= ~ADAP_SYSCTL_CTX;
|
||||
}
|
||||
|
||||
free_mgmtq(sc);
|
||||
for_each_port(sc, i)
|
||||
free_wrq(sc, &sc->sge.ctrlq[i]);
|
||||
free_fwq(sc);
|
||||
|
||||
return (0);
|
||||
@ -1092,7 +1116,6 @@ t4_setup_vi_queues(struct vi_info *vi)
|
||||
int rc = 0, i, intr_idx, iqidx;
|
||||
struct sge_rxq *rxq;
|
||||
struct sge_txq *txq;
|
||||
struct sge_wrq *ctrlq;
|
||||
#ifdef TCP_OFFLOAD
|
||||
struct sge_ofld_rxq *ofld_rxq;
|
||||
#endif
|
||||
@ -1239,20 +1262,6 @@ t4_setup_vi_queues(struct vi_info *vi)
|
||||
goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Finally, the control queue.
|
||||
*/
|
||||
if (!IS_MAIN_VI(vi) || sc->flags & IS_VF)
|
||||
goto done;
|
||||
oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
|
||||
NULL, "ctrl queue");
|
||||
ctrlq = &sc->sge.ctrlq[pi->port_id];
|
||||
snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
|
||||
init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan,
|
||||
sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name);
|
||||
rc = alloc_wrq(sc, vi, ctrlq, oid);
|
||||
|
||||
done:
|
||||
if (rc)
|
||||
t4_teardown_vi_queues(vi);
|
||||
@ -1267,16 +1276,16 @@ int
|
||||
t4_teardown_vi_queues(struct vi_info *vi)
|
||||
{
|
||||
int i;
|
||||
struct port_info *pi = vi->pi;
|
||||
struct adapter *sc = pi->adapter;
|
||||
struct sge_rxq *rxq;
|
||||
struct sge_txq *txq;
|
||||
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
|
||||
struct port_info *pi = vi->pi;
|
||||
struct adapter *sc = pi->adapter;
|
||||
struct sge_wrq *ofld_txq;
|
||||
#endif
|
||||
#ifdef TCP_OFFLOAD
|
||||
struct sge_ofld_rxq *ofld_rxq;
|
||||
#endif
|
||||
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
|
||||
struct sge_wrq *ofld_txq;
|
||||
#endif
|
||||
#ifdef DEV_NETMAP
|
||||
struct sge_nm_rxq *nm_rxq;
|
||||
struct sge_nm_txq *nm_txq;
|
||||
@ -1305,9 +1314,6 @@ t4_teardown_vi_queues(struct vi_info *vi)
|
||||
* (for egress updates, etc.).
|
||||
*/
|
||||
|
||||
if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
|
||||
free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
|
||||
|
||||
for_each_txq(vi, i, txq) {
|
||||
free_txq(vi, txq);
|
||||
}
|
||||
@ -3257,35 +3263,25 @@ free_fwq(struct adapter *sc)
|
||||
}
|
||||
|
||||
static int
|
||||
alloc_mgmtq(struct adapter *sc)
|
||||
alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx,
|
||||
struct sysctl_oid *oid)
|
||||
{
|
||||
int rc;
|
||||
struct sge_wrq *mgmtq = &sc->sge.mgmtq;
|
||||
char name[16];
|
||||
struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
|
||||
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
|
||||
struct sysctl_oid_list *children;
|
||||
|
||||
oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD,
|
||||
NULL, "management queue");
|
||||
|
||||
snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
|
||||
init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
|
||||
snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev),
|
||||
idx);
|
||||
init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan,
|
||||
sc->sge.fwq.cntxt_id, name);
|
||||
rc = alloc_wrq(sc, NULL, mgmtq, oid);
|
||||
if (rc != 0) {
|
||||
device_printf(sc->dev,
|
||||
"failed to create management queue: %d\n", rc);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
children = SYSCTL_CHILDREN(oid);
|
||||
snprintf(name, sizeof(name), "%d", idx);
|
||||
oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
|
||||
NULL, "ctrl queue");
|
||||
rc = alloc_wrq(sc, NULL, ctrlq, oid);
|
||||
|
||||
static int
|
||||
free_mgmtq(struct adapter *sc)
|
||||
{
|
||||
|
||||
return free_wrq(sc, &sc->sge.mgmtq);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -210,7 +210,7 @@ t4_smt_set_switching(struct adapter *sc, struct smt_entry *e, uint16_t pfvf,
|
||||
if (atomic_load_acq_int(&e->refcnt) == 1) {
|
||||
/* Setup the entry for the first time */
|
||||
mtx_lock(&e->lock);
|
||||
e->wrq = &sc->sge.mgmtq;
|
||||
e->wrq = &sc->sge.ctrlq[0];
|
||||
e->iqid = sc->sge.fwq.abs_id;
|
||||
e->pfvf = pfvf;
|
||||
e->state = SMT_STATE_SWITCHING;
|
||||
|
@ -662,7 +662,7 @@ t4vf_attach(device_t dev)
|
||||
s->nrxq = sc->params.nports * iaq.nrxq;
|
||||
s->ntxq = sc->params.nports * iaq.ntxq;
|
||||
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
|
||||
s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
|
||||
s->neq += sc->params.nports; /* ctrl queues: 1 per port */
|
||||
s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
|
||||
|
||||
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
|
||||
|
Loading…
x
Reference in New Issue
Block a user