net/cxgbe: support to delete flows in HASH region

Add interface to delete offloaded flows in HASH region. Use the
hash index saved during insertion to delete the corresponding flow.

Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
This commit is contained in:
Shagun Agrawal 2018-06-29 23:42:20 +05:30 committed by Ferruh Yigit
parent af44a57798
commit 41dc98b082
7 changed files with 351 additions and 0 deletions

View File

@ -8,7 +8,12 @@
enum {
CPL_ACT_OPEN_REQ = 0x3,
CPL_SET_TCB_FIELD = 0x5,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
CPL_TID_RELEASE = 0x1A,
CPL_ACT_OPEN_RPL = 0x25,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SET_TCB_RPL = 0x3A,
CPL_ACT_OPEN_REQ6 = 0x83,
CPL_SGE_EGR_UPDATE = 0xA5,
@ -27,6 +32,11 @@ enum {
ULP_MODE_NONE = 0,
};
enum {
CPL_ABORT_SEND_RST = 0,
CPL_ABORT_NO_RST,
};
enum { /* TX_PKT_XT checksum types */
TX_CSUM_TCPIP = 8,
TX_CSUM_UDPIP = 9,
@ -189,6 +199,29 @@ struct cpl_act_open_rpl {
#define M_AOPEN_ATID 0xFFFFFF
#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
struct cpl_set_tcb_field {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
__be16 word_cookie;
__be64 mask;
__be64 val;
};
/* cpl_set_tcb_field.word_cookie fields */
#define S_WORD 0
#define V_WORD(x) ((x) << S_WORD)
/* cpl_get_tcb.reply_ctrl fields */
#define S_QUEUENO 0
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define S_NO_REPLY 15
#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
struct cpl_set_tcb_rpl {
RSS_HDR
union opcode_tid ot;
@ -198,6 +231,39 @@ struct cpl_set_tcb_rpl {
__be64 oldval;
};
/* cpl_abort_req status command code
*/
struct cpl_abort_req {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
__u8 rsvd1;
__u8 cmd;
__u8 rsvd2[6];
};
struct cpl_abort_rpl_rss {
RSS_HDR
union opcode_tid ot;
__u8 rsvd[3];
__u8 status;
};
struct cpl_abort_rpl {
WR_HDR;
union opcode_tid ot;
__be32 rsvd0;
__u8 rsvd1;
__u8 cmd;
__u8 rsvd2[6];
};
struct cpl_tid_release {
WR_HDR;
union opcode_tid ot;
__be32 rsvd;
};
struct cpl_tx_data {
union opcode_tid ot;
__be32 len;
@ -403,7 +469,13 @@ struct cpl_fw6_msg {
__be64 data[4];
};
/* ULP_TX opcodes */
enum {
ULP_TX_PKT = 4
};
enum {
ULP_TX_SC_NOOP = 0x80,
ULP_TX_SC_IMM = 0x81,
ULP_TX_SC_DSGL = 0x82,
ULP_TX_SC_ISGL = 0x83

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Chelsio Communications.
* All rights reserved.
*/
#ifndef _T4_TCB_DEFS_H
#define _T4_TCB_DEFS_H
/* 105:96 */
#define W_TCB_RSS_INFO 3
#define S_TCB_RSS_INFO 0
#define M_TCB_RSS_INFO 0x3ffULL
#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
#endif /* _T4_TCB_DEFS_H */

View File

@ -55,6 +55,7 @@ enum fw_memtype {
enum fw_wr_opcodes {
FW_FILTER_WR = 0x02,
FW_ULPTX_WR = 0x04,
FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
@ -78,6 +79,11 @@ struct fw_wr_hdr {
#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
*/
#define S_FW_WR_ATOMIC 23
#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
/* work request immediate data length (hi)
*/
#define S_FW_WR_IMMDLEN 0

View File

@ -4,6 +4,7 @@
*/
#include <rte_net.h>
#include "common.h"
#include "t4_tcb.h"
#include "t4_regs.h"
#include "cxgbe_filter.h"
#include "clip_tbl.h"
@ -116,6 +117,34 @@ int writable_filter(struct filter_entry *f)
return 0;
}
/**
* Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
*/
static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
struct cpl_set_tcb_field *req,
unsigned int word,
u64 mask, u64 val, u8 cookie,
int no_reply)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DEST(0));
txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
V_QUEUENO(0));
req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
req->mask = cpu_to_be64(mask);
req->val = cpu_to_be64(val);
sc = (struct ulptx_idata *)(req + 1);
sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
sc->len = cpu_to_be32(0);
}
/**
* Check if entry already filled.
*/
@ -185,6 +214,132 @@ static u64 hash_filter_ntuple(const struct filter_entry *f)
return ntuple;
}
/**
* Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
*/
static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
unsigned int tid)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DEST(0));
txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(sizeof(*abort_req) -
sizeof(struct work_request_hdr));
OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
abort_req->rsvd0 = cpu_to_be32(0);
abort_req->rsvd1 = 0;
abort_req->cmd = CPL_ABORT_NO_RST;
sc = (struct ulptx_idata *)(abort_req + 1);
sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
sc->len = cpu_to_be32(0);
}
/**
* Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
*/
static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
unsigned int tid)
{
struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DEST(0));
txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(sizeof(*abort_rpl) -
sizeof(struct work_request_hdr));
OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
abort_rpl->rsvd0 = cpu_to_be32(0);
abort_rpl->rsvd1 = 0;
abort_rpl->cmd = CPL_ABORT_NO_RST;
sc = (struct ulptx_idata *)(abort_rpl + 1);
sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
sc->len = cpu_to_be32(0);
}
/**
* Delete the specified hash filter.
*/
static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
unsigned int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = ethdev2adap(dev);
struct tid_info *t = &adapter->tids;
struct filter_entry *f;
struct sge_ctrl_txq *ctrlq;
unsigned int port_id = ethdev2pinfo(dev)->port_id;
int ret;
if (filter_id > adapter->tids.ntids)
return -E2BIG;
f = lookup_tid(t, filter_id);
if (!f) {
dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
__func__, filter_id);
return -EINVAL;
}
ret = writable_filter(f);
if (ret)
return ret;
if (f->valid) {
unsigned int wrlen;
struct rte_mbuf *mbuf;
struct work_request_hdr *wr;
struct ulptx_idata *aligner;
struct cpl_set_tcb_field *req;
struct cpl_abort_req *abort_req;
struct cpl_abort_rpl *abort_rpl;
f->ctx = ctx;
f->pending = 1;
wrlen = cxgbe_roundup(sizeof(*wr) +
(sizeof(*req) + sizeof(*aligner)) +
sizeof(*abort_req) + sizeof(*abort_rpl),
16);
ctrlq = &adapter->sge.ctrlq[port_id];
mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
if (!mbuf) {
dev_err(adapter, "%s: could not allocate skb ..\n",
__func__);
goto out_err;
}
mbuf->data_len = wrlen;
mbuf->pkt_len = mbuf->data_len;
req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
INIT_ULPTX_WR(req, wrlen, 0, 0);
wr = (struct work_request_hdr *)req;
wr++;
req = (struct cpl_set_tcb_field *)wr;
mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
V_TCB_RSS_INFO(M_TCB_RSS_INFO),
V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
0, 1);
aligner = (struct ulptx_idata *)(req + 1);
abort_req = (struct cpl_abort_req *)(aligner + 1);
mk_abort_req_ulp(abort_req, f->tid);
abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
mk_abort_rpl_ulp(abort_rpl, f->tid);
t4_mgmt_tx(ctrlq, mbuf);
}
return 0;
out_err:
return -ENOMEM;
}
/**
* Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
*/
@ -560,6 +715,9 @@ int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
unsigned int chip_ver;
int ret;
if (is_hashfilter(adapter) && fs->cap)
return cxgbe_del_hash_filter(dev, filter_id, ctx);
if (filter_id >= adapter->tids.nftids)
return -ERANGE;
@ -967,3 +1125,38 @@ int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
}
return 0;
}
/**
* Handle a Hash filter delete reply.
*/
void hash_del_filter_rpl(struct adapter *adap,
const struct cpl_abort_rpl_rss *rpl)
{
struct tid_info *t = &adap->tids;
struct filter_entry *f;
struct filter_ctx *ctx = NULL;
unsigned int tid = GET_TID(rpl);
f = lookup_tid(t, tid);
if (!f) {
dev_warn(adap, "%s: could not find filter entry: %u\n",
__func__, tid);
return;
}
ctx = f->ctx;
f->ctx = NULL;
f->valid = 0;
if (f->clipt)
cxgbe_clip_release(f->dev, f->clipt);
cxgbe_remove_tid(t, 0, tid, 0);
t4_os_free(f);
if (ctx) {
ctx->result = 0;
t4_complete(&ctx->completion);
}
}

View File

@ -224,6 +224,8 @@ int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);
int init_hash_filter(struct adapter *adap);
void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
void hash_del_filter_rpl(struct adapter *adap,
const struct cpl_abort_rpl_rss *rpl);
int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);
int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
u64 *c, bool get_byte);

View File

@ -87,6 +87,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_fw6_msg *msg = (const void *)rsp;
t4_handle_fw_rpl(q->adapter, msg->data);
} else if (opcode == CPL_ABORT_RPL_RSS) {
const struct cpl_abort_rpl_rss *p = (const void *)rsp;
hash_del_filter_rpl(q->adapter, p);
} else if (opcode == CPL_SET_TCB_RPL) {
const struct cpl_set_tcb_rpl *p = (const void *)rsp;
@ -301,6 +305,50 @@ void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
t4_os_unlock(&t->atid_lock);
}
/**
* Populate a TID_RELEASE WR. Caller must properly size the skb.
*/
static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
{
struct cpl_tid_release *req;
req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
}
/**
* Release a TID and inform HW. If we are unable to allocate the release
* message we defer to a work queue.
*/
void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
unsigned short family)
{
struct rte_mbuf *mbuf;
struct adapter *adap = container_of(t, struct adapter, tids);
WARN_ON(tid >= t->ntids);
if (t->tid_tab[tid]) {
t->tid_tab[tid] = NULL;
rte_atomic32_dec(&t->conns_in_use);
if (t->hash_base && tid >= t->hash_base) {
if (family == FILTER_TYPE_IPV4)
rte_atomic32_dec(&t->hash_tids_in_use);
} else {
if (family == FILTER_TYPE_IPV4)
rte_atomic32_dec(&t->tids_in_use);
}
}
mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
if (mbuf) {
mbuf->data_len = sizeof(struct cpl_tid_release);
mbuf->pkt_len = mbuf->data_len;
mk_tid_release(mbuf, tid);
t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
}
}
/**
* Insert a TID.
*/

View File

@ -19,6 +19,19 @@
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
INIT_TP_WR(w, tid); \
OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
} while (0)
#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
(w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \
V_FW_WR_ATOMIC(atomic)); \
(w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
V_FW_WR_FLOWID(tid)); \
(w)->wr.wr_lo = cpu_to_be64(0); \
} while (0)
/*
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
*/
@ -68,6 +81,8 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
int cxgbe_alloc_atid(struct tid_info *t, void *data);
void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
unsigned short family);
void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
unsigned short family);