net/cxgbe: add control queue to communicate filter requests
Add control queue to communicate filter creation/deletion requests with firmware. This API will be used by subsequent patches. Signed-off-by: Shagun Agrawal <shaguna@chelsio.com> Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com> Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
This commit is contained in:
parent
ee61f5113b
commit
3a3aaabc7c
@ -19,6 +19,7 @@
|
||||
|
||||
enum {
|
||||
MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
|
||||
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
|
||||
};
|
||||
|
||||
struct adapter;
|
||||
@ -256,10 +257,20 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
|
||||
unsigned int flags; /* flags for state of the queue */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
struct sge_ctrl_txq { /* State for an SGE control Tx queue */
|
||||
struct sge_txq q; /* txq */
|
||||
struct adapter *adapter; /* adapter associated with this queue */
|
||||
rte_spinlock_t ctrlq_lock; /* control queue lock */
|
||||
u8 full; /* the Tx ring is full */
|
||||
u64 txp; /* number of transmits */
|
||||
struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
|
||||
} __rte_cache_aligned;
|
||||
|
||||
struct sge {
|
||||
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
|
||||
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
|
||||
struct sge_rspq fw_evtq __rte_cache_aligned;
|
||||
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
|
||||
|
||||
u16 max_ethqsets; /* # of available Ethernet queue sets */
|
||||
u32 stat_len; /* length of status page at ring end */
|
||||
@ -720,6 +731,7 @@ void t4_sge_tx_monitor_start(struct adapter *adap);
|
||||
void t4_sge_tx_monitor_stop(struct adapter *adap);
|
||||
int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
|
||||
uint16_t nb_pkts);
|
||||
int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
|
||||
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
|
||||
const struct pkt_gl *gl);
|
||||
int t4_sge_init(struct adapter *adap);
|
||||
@ -727,6 +739,9 @@ int t4vf_sge_init(struct adapter *adap);
|
||||
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
|
||||
struct rte_eth_dev *eth_dev, uint16_t queue_id,
|
||||
unsigned int iqid, int socket_id);
|
||||
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
|
||||
struct rte_eth_dev *eth_dev, uint16_t queue_id,
|
||||
unsigned int iqid, int socket_id);
|
||||
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
|
||||
struct rte_eth_dev *eth_dev, int intr_idx,
|
||||
struct sge_fl *fl, rspq_handler_t handler,
|
||||
|
@ -378,6 +378,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int fl0id, unsigned int fl1id);
|
||||
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int eqid);
|
||||
int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int eqid);
|
||||
|
||||
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
|
||||
{
|
||||
|
@ -4490,6 +4490,31 @@ static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_ctrl_eq_free - free a control egress queue
|
||||
* @adap: the adapter
|
||||
* @mbox: mailbox to use for the FW command
|
||||
* @pf: the PF owning the queue
|
||||
* @vf: the VF owning the queue
|
||||
* @eqid: egress queue id
|
||||
*
|
||||
* Frees a control egress queue.
|
||||
*/
|
||||
int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
|
||||
unsigned int vf, unsigned int eqid)
|
||||
{
|
||||
struct fw_eq_ctrl_cmd c;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
|
||||
F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
|
||||
V_FW_EQ_CTRL_CMD_PFN(pf) |
|
||||
V_FW_EQ_CTRL_CMD_VFN(vf));
|
||||
c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
|
||||
c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
|
||||
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_handle_fw_rpl - process a FW reply message
|
||||
* @adap: the adapter
|
||||
|
@ -178,6 +178,7 @@ enum fw_cmd_opcodes {
|
||||
FW_PFVF_CMD = 0x09,
|
||||
FW_IQ_CMD = 0x10,
|
||||
FW_EQ_ETH_CMD = 0x12,
|
||||
FW_EQ_CTRL_CMD = 0x13,
|
||||
FW_VI_CMD = 0x14,
|
||||
FW_VI_MAC_CMD = 0x15,
|
||||
FW_VI_RXMODE_CMD = 0x16,
|
||||
@ -960,6 +961,75 @@ struct fw_eq_eth_cmd {
|
||||
#define G_FW_EQ_ETH_CMD_VIID(x) \
|
||||
(((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
|
||||
|
||||
struct fw_eq_ctrl_cmd {
|
||||
__be32 op_to_vfn;
|
||||
__be32 alloc_to_len16;
|
||||
__be32 cmpliqid_eqid;
|
||||
__be32 physeqid_pkd;
|
||||
__be32 fetchszm_to_iqid;
|
||||
__be32 dcaen_to_eqsize;
|
||||
__be64 eqaddr;
|
||||
};
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_PFN 8
|
||||
#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_VFN 0
|
||||
#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_ALLOC 31
|
||||
#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
|
||||
#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_FREE 30
|
||||
#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
|
||||
#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_EQSTART 28
|
||||
#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
|
||||
#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
|
||||
#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_EQID 0
|
||||
#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
|
||||
#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
|
||||
#define G_FW_EQ_CTRL_CMD_EQID(x) \
|
||||
(((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
|
||||
#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
|
||||
#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
|
||||
#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
|
||||
(((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_FETCHRO 22
|
||||
#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
|
||||
#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
|
||||
#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
|
||||
#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_PCIECHN 16
|
||||
#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_IQID 0
|
||||
#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_FBMIN 23
|
||||
#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_FBMAX 20
|
||||
#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
|
||||
#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
|
||||
|
||||
#define S_FW_EQ_CTRL_CMD_EQSIZE 0
|
||||
#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
|
||||
|
||||
enum fw_vi_func {
|
||||
FW_VI_FUNC_ETH,
|
||||
};
|
||||
|
@ -42,6 +42,7 @@ int link_start(struct port_info *pi);
|
||||
void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
|
||||
unsigned int cnt, unsigned int size, unsigned int iqe_size);
|
||||
int setup_sge_fwevtq(struct adapter *adapter);
|
||||
int setup_sge_ctrl_txq(struct adapter *adapter);
|
||||
void cfg_queues(struct rte_eth_dev *eth_dev);
|
||||
int cfg_queue_count(struct rte_eth_dev *eth_dev);
|
||||
int init_rss(struct adapter *adap);
|
||||
|
@ -365,6 +365,9 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
|
||||
if (err)
|
||||
return err;
|
||||
adapter->flags |= FW_QUEUE_BOUND;
|
||||
err = setup_sge_ctrl_txq(adapter);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = cfg_queue_count(eth_dev);
|
||||
|
@ -94,6 +94,47 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup sge control queues to pass control information.
|
||||
*/
|
||||
int setup_sge_ctrl_txq(struct adapter *adapter)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
int err = 0, i = 0;
|
||||
|
||||
for_each_port(adapter, i) {
|
||||
char name[RTE_ETH_NAME_MAX_LEN];
|
||||
struct sge_ctrl_txq *q = &s->ctrlq[i];
|
||||
|
||||
q->q.size = 1024;
|
||||
err = t4_sge_alloc_ctrl_txq(adapter, q,
|
||||
adapter->eth_dev, i,
|
||||
s->fw_evtq.cntxt_id,
|
||||
rte_socket_id());
|
||||
if (err) {
|
||||
dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
|
||||
err);
|
||||
goto out;
|
||||
}
|
||||
snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
|
||||
q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
|
||||
RTE_CACHE_LINE_SIZE,
|
||||
RTE_MBUF_PRIV_ALIGN,
|
||||
RTE_MBUF_DEFAULT_BUF_SIZE,
|
||||
SOCKET_ID_ANY);
|
||||
if (!q->mb_pool) {
|
||||
dev_err(adapter, "Can't create ctrl pool for port: %d",
|
||||
i);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
t4_free_sge_resources(adapter);
|
||||
return err;
|
||||
}
|
||||
|
||||
int setup_sge_fwevtq(struct adapter *adapter)
|
||||
{
|
||||
struct sge *s = &adapter->sge;
|
||||
|
@ -54,6 +54,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
|
||||
*/
|
||||
#define MAX_IMM_TX_PKT_LEN 256
|
||||
|
||||
/*
|
||||
* Max size of a WR sent through a control Tx queue.
|
||||
*/
|
||||
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
|
||||
|
||||
/*
|
||||
* Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
|
||||
* per mbuf buffer). We currently only support two sizes for 1500- and
|
||||
@ -1299,6 +1304,126 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
|
||||
* @q: the SGE control Tx queue
|
||||
*
|
||||
* This is a variant of reclaim_completed_tx() that is used for Tx queues
|
||||
* that send only immediate data (presently just the control queues) and
|
||||
* thus do not have any mbufs to release.
|
||||
*/
|
||||
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
|
||||
{
|
||||
int hw_cidx = ntohs(q->stat->cidx);
|
||||
int reclaim = hw_cidx - q->cidx;
|
||||
|
||||
if (reclaim < 0)
|
||||
reclaim += q->size;
|
||||
|
||||
q->in_use -= reclaim;
|
||||
q->cidx = hw_cidx;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_imm - check whether a packet can be sent as immediate data
|
||||
* @mbuf: the packet
|
||||
*
|
||||
* Returns true if a packet can be sent as a WR with immediate data.
|
||||
*/
|
||||
static inline int is_imm(const struct rte_mbuf *mbuf)
|
||||
{
|
||||
return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
|
||||
}
|
||||
|
||||
/**
|
||||
* inline_tx_mbuf: inline a packet's data into TX descriptors
|
||||
* @q: the TX queue where the packet will be inlined
|
||||
* @from: pointer to data portion of packet
|
||||
* @to: pointer after cpl where data has to be inlined
|
||||
* @len: length of data to inline
|
||||
*
|
||||
* Inline a packet's contents directly to TX descriptors, starting at
|
||||
* the given position within the TX DMA ring.
|
||||
* Most of the complexity of this operation is dealing with wrap arounds
|
||||
* in the middle of the packet we want to inline.
|
||||
*/
|
||||
static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
|
||||
int len)
|
||||
{
|
||||
int left = RTE_PTR_DIFF(q->stat, *to);
|
||||
|
||||
if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
|
||||
rte_memcpy(*to, from, len);
|
||||
*to = RTE_PTR_ADD(*to, len);
|
||||
} else {
|
||||
rte_memcpy(*to, from, left);
|
||||
from = RTE_PTR_ADD(from, left);
|
||||
left = len - left;
|
||||
rte_memcpy((void *)q->desc, from, left);
|
||||
*to = RTE_PTR_ADD((void *)q->desc, left);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ctrl_xmit - send a packet through an SGE control Tx queue
|
||||
* @q: the control queue
|
||||
* @mbuf: the packet
|
||||
*
|
||||
* Send a packet through an SGE control Tx queue. Packets sent through
|
||||
* a control queue must fit entirely as immediate data.
|
||||
*/
|
||||
static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
|
||||
{
|
||||
unsigned int ndesc;
|
||||
struct fw_wr_hdr *wr;
|
||||
caddr_t dst;
|
||||
|
||||
if (unlikely(!is_imm(mbuf))) {
|
||||
WARN_ON(1);
|
||||
rte_pktmbuf_free(mbuf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
reclaim_completed_tx_imm(&q->q);
|
||||
ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
|
||||
t4_os_lock(&q->ctrlq_lock);
|
||||
|
||||
q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
|
||||
if (unlikely(q->full)) {
|
||||
t4_os_unlock(&q->ctrlq_lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
|
||||
dst = (void *)wr;
|
||||
inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
|
||||
&dst, mbuf->data_len);
|
||||
|
||||
txq_advance(&q->q, ndesc);
|
||||
if (unlikely(txq_avail(&q->q) < 64))
|
||||
wr->lo |= htonl(F_FW_WR_EQUEQ);
|
||||
|
||||
q->txp++;
|
||||
|
||||
ring_tx_db(q->adapter, &q->q);
|
||||
t4_os_unlock(&q->ctrlq_lock);
|
||||
|
||||
rte_pktmbuf_free(mbuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_mgmt_tx - send a management message
|
||||
* @q: the control queue
|
||||
* @mbuf: the packet containing the management message
|
||||
*
|
||||
* Send a management message through control queue.
|
||||
*/
|
||||
int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
|
||||
{
|
||||
return ctrl_xmit(q, mbuf);
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_ring - allocate resources for an SGE descriptor ring
|
||||
* @dev: the PCI device's core device
|
||||
@ -2080,6 +2205,64 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
|
||||
struct rte_eth_dev *eth_dev, uint16_t queue_id,
|
||||
unsigned int iqid, int socket_id)
|
||||
{
|
||||
int ret, nentries;
|
||||
struct fw_eq_ctrl_cmd c;
|
||||
struct sge *s = &adap->sge;
|
||||
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
|
||||
char z_name[RTE_MEMZONE_NAMESIZE];
|
||||
char z_name_sw[RTE_MEMZONE_NAMESIZE];
|
||||
|
||||
/* Add status entries */
|
||||
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
|
||||
|
||||
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
|
||||
eth_dev->device->driver->name, "ctrl_tx_ring",
|
||||
eth_dev->data->port_id, queue_id);
|
||||
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
|
||||
|
||||
txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
|
||||
0, &txq->q.phys_addr,
|
||||
NULL, 0, queue_id,
|
||||
socket_id, z_name, z_name_sw);
|
||||
if (!txq->q.desc)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
|
||||
F_FW_CMD_WRITE | F_FW_CMD_EXEC |
|
||||
V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
|
||||
V_FW_EQ_CTRL_CMD_VFN(0));
|
||||
c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
|
||||
F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
|
||||
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
|
||||
c.physeqid_pkd = htonl(0);
|
||||
c.fetchszm_to_iqid =
|
||||
htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
|
||||
V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
|
||||
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
|
||||
c.dcaen_to_eqsize =
|
||||
htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
|
||||
V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
|
||||
V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
|
||||
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
|
||||
|
||||
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
|
||||
if (ret) {
|
||||
txq->q.desc = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
|
||||
G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
|
||||
txq->adapter = adap;
|
||||
txq->full = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_txq(struct sge_txq *q)
|
||||
{
|
||||
q->cntxt_id = 0;
|
||||
@ -2174,7 +2357,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap)
|
||||
*/
|
||||
void t4_free_sge_resources(struct adapter *adap)
|
||||
{
|
||||
int i;
|
||||
unsigned int i;
|
||||
struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
|
||||
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
|
||||
|
||||
@ -2191,6 +2374,18 @@ void t4_free_sge_resources(struct adapter *adap)
|
||||
}
|
||||
}
|
||||
|
||||
/* clean up control Tx queues */
|
||||
for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
|
||||
struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
|
||||
|
||||
if (cq->q.desc) {
|
||||
reclaim_completed_tx_imm(&cq->q);
|
||||
t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
|
||||
cq->q.cntxt_id);
|
||||
free_txq(&cq->q);
|
||||
}
|
||||
}
|
||||
|
||||
if (adap->sge.fw_evtq.desc)
|
||||
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user