net/cxgbe: support to redirect packets to egress port

Add action to redirect matched packets to specified egress physical
port without sending them to host.

Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
This commit is contained in:
Shagun Agrawal 2018-06-29 23:42:24 +05:30 committed by Ferruh Yigit
parent 6b805d23a5
commit 281c6e7baf
4 changed files with 63 additions and 3 deletions

View File

@ -113,6 +113,9 @@ struct work_request_hdr {
#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE) #define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
/* option 0 fields */ /* option 0 fields */
#define S_TX_CHAN 2
#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
#define S_DELACK 5 #define S_DELACK 5
#define V_DELACK(x) ((x) << S_DELACK) #define V_DELACK(x) ((x) << S_DELACK)
@ -145,6 +148,9 @@ struct work_request_hdr {
#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL) #define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
#define F_RX_CHANNEL V_RX_CHANNEL(1U) #define F_RX_CHANNEL V_RX_CHANNEL(1U)
#define S_CCTRL_ECN 27
#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
#define S_T5_OPT_2_VALID 31 #define S_T5_OPT_2_VALID 31
#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID) #define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U) #define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)

View File

@ -71,6 +71,15 @@ int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
#undef S #undef S
#undef U #undef U
/*
* If the user is requesting that the filter action loop
* matching packets back out one of our ports, make sure that
* the egress port is in range.
*/
if (fs->action == FILTER_SWITCH &&
fs->eport >= adapter->params.nports)
return -ERANGE;
/* /*
* Don't allow various trivially obvious bogus out-of-range * Don't allow various trivially obvious bogus out-of-range
* values ... * values ...
@ -419,6 +428,7 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) | req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
<< 1) | << 1) |
V_TX_CHAN(f->fs.eport) |
V_ULP_MODE(ULP_MODE_NONE) | V_ULP_MODE(ULP_MODE_NONE) |
F_TCAM_BYPASS | F_NON_OFFLOAD); F_TCAM_BYPASS | F_NON_OFFLOAD);
req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
@ -427,7 +437,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
F_T5_OPT_2_VALID | F_T5_OPT_2_VALID |
F_RX_CHANNEL | F_RX_CHANNEL |
V_CONG_CNTRL((f->fs.action == FILTER_DROP) | V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1))); (f->fs.dirsteer << 1)) |
V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
} }
/** /**
@ -460,6 +471,7 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) | req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F) V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
<< 1) | << 1) |
V_TX_CHAN(f->fs.eport) |
V_ULP_MODE(ULP_MODE_NONE) | V_ULP_MODE(ULP_MODE_NONE) |
F_TCAM_BYPASS | F_NON_OFFLOAD); F_TCAM_BYPASS | F_NON_OFFLOAD);
req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f))); req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
@ -468,7 +480,8 @@ static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
F_T5_OPT_2_VALID | F_T5_OPT_2_VALID |
F_RX_CHANNEL | F_RX_CHANNEL |
V_CONG_CNTRL((f->fs.action == FILTER_DROP) | V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
(f->fs.dirsteer << 1))); (f->fs.dirsteer << 1)) |
V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
} }
/** /**
@ -666,7 +679,9 @@ int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
fwr->del_filter_to_l2tix = fwr->del_filter_to_l2tix =
cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
V_FW_FILTER_WR_PRIO(f->fs.prio)); V_FW_FILTER_WR_PRIO(f->fs.prio));
fwr->ethtype = cpu_to_be16(f->fs.val.ethtype); fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype); fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);

View File

@ -98,6 +98,8 @@ struct ch_filter_specification {
uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
uint32_t iq:10; /* ingress queue */ uint32_t iq:10; /* ingress queue */
uint32_t eport:2; /* egress port to switch packet out */
/* Filter rule value/mask pairs. */ /* Filter rule value/mask pairs. */
struct ch_filter_tuple val; struct ch_filter_tuple val;
struct ch_filter_tuple mask; struct ch_filter_tuple mask;
@ -105,7 +107,8 @@ struct ch_filter_specification {
enum { enum {
FILTER_PASS = 0, /* default */ FILTER_PASS = 0, /* default */
FILTER_DROP FILTER_DROP,
FILTER_SWITCH
}; };
enum filter_type { enum filter_type {

View File

@ -326,6 +326,28 @@ static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
return 0; return 0;
} }
static int
ch_rte_parse_atype_switch(const struct rte_flow_action *a,
struct ch_filter_specification *fs,
struct rte_flow_error *e)
{
const struct rte_flow_action_phy_port *port;
switch (a->type) {
case RTE_FLOW_ACTION_TYPE_PHY_PORT:
port = (const struct rte_flow_action_phy_port *)a->conf;
fs->eport = port->index;
break;
default:
/* We are not supposed to come here */
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"Action not supported");
}
return 0;
}
static int static int
cxgbe_rtef_parse_actions(struct rte_flow *flow, cxgbe_rtef_parse_actions(struct rte_flow *flow,
const struct rte_flow_action action[], const struct rte_flow_action action[],
@ -335,6 +357,7 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
const struct rte_flow_action_queue *q; const struct rte_flow_action_queue *q;
const struct rte_flow_action *a; const struct rte_flow_action *a;
char abit = 0; char abit = 0;
int ret;
for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) { for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
switch (a->type) { switch (a->type) {
@ -368,6 +391,19 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow,
case RTE_FLOW_ACTION_TYPE_COUNT: case RTE_FLOW_ACTION_TYPE_COUNT:
fs->hitcnts = 1; fs->hitcnts = 1;
break; break;
case RTE_FLOW_ACTION_TYPE_PHY_PORT:
/* We allow multiple switch actions, but switch is
* not compatible with either queue or drop
*/
if (abit++ && fs->action != FILTER_SWITCH)
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"overlapping action specified");
ret = ch_rte_parse_atype_switch(a, fs, e);
if (ret)
return ret;
fs->action = FILTER_SWITCH;
break;
default: default:
/* Not supported action : return error */ /* Not supported action : return error */
return rte_flow_error_set(e, ENOTSUP, return rte_flow_error_set(e, ENOTSUP,