ethdev: remove legacy mirroring API

A more fine-grain flow API action RTE_FLOW_ACTION_TYPE_SAMPLE should
be used instead of it.

Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Andrew Rybchenko 2021-09-29 11:39:50 +03:00 committed by Ferruh Yigit
parent 851f03e1ea
commit b225783dda
20 changed files with 7 additions and 1071 deletions

View File

@ -567,24 +567,6 @@ static void cmd_help_long_parsed(void *parsed_result,
"queue_mask (queue_mask_value)\n"
" Set rate limit for queues in VF of a port\n\n"
"set port (port_id) mirror-rule (rule_id)"
" (pool-mirror-up|pool-mirror-down|vlan-mirror)"
" (poolmask|vlanid[,vlanid]*) dst-pool (pool_id) (on|off)\n"
" Set pool or vlan type mirror rule on a port.\n"
" e.g., 'set port 0 mirror-rule 0 vlan-mirror 0,1"
" dst-pool 0 on' enable mirror traffic with vlan 0,1"
" to pool 0.\n\n"
"set port (port_id) mirror-rule (rule_id)"
" (uplink-mirror|downlink-mirror) dst-pool"
" (pool_id) (on|off)\n"
" Set uplink or downlink type mirror rule on a port.\n"
" e.g., 'set port 0 mirror-rule 0 uplink-mirror dst-pool"
" 0 on' enable mirror income traffic to pool 0.\n\n"
"reset port (port_id) mirror-rule (rule_id)\n"
" Reset a mirror rule.\n\n"
"set flush_rx (on|off)\n"
" Flush (default) or don't flush RX streams before"
" forwarding. Mainly used with PCAP drivers.\n\n"
@ -9494,268 +9476,6 @@ cmdline_parse_inst_t cmd_cfg_tunnel_udp_port = {
},
};
/* *** CONFIGURE VM MIRROR VLAN/POOL RULE *** */
struct cmd_set_mirror_mask_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
portid_t port_id;
cmdline_fixed_string_t mirror;
uint8_t rule_id;
cmdline_fixed_string_t what;
cmdline_fixed_string_t value;
cmdline_fixed_string_t dstpool;
uint8_t dstpool_id;
cmdline_fixed_string_t on;
};
cmdline_parse_token_string_t cmd_mirror_mask_set =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
set, "set");
cmdline_parse_token_string_t cmd_mirror_mask_port =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
port, "port");
cmdline_parse_token_num_t cmd_mirror_mask_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
port_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_mirror_mask_mirror =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
mirror, "mirror-rule");
cmdline_parse_token_num_t cmd_mirror_mask_ruleid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
rule_id, RTE_UINT8);
cmdline_parse_token_string_t cmd_mirror_mask_what =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
what, "pool-mirror-up#pool-mirror-down"
"#vlan-mirror");
cmdline_parse_token_string_t cmd_mirror_mask_value =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
value, NULL);
cmdline_parse_token_string_t cmd_mirror_mask_dstpool =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
dstpool, "dst-pool");
cmdline_parse_token_num_t cmd_mirror_mask_poolid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result,
dstpool_id, RTE_UINT8);
cmdline_parse_token_string_t cmd_mirror_mask_on =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result,
on, "on#off");
static void
cmd_set_mirror_mask_parsed(void *parsed_result,
__rte_unused struct cmdline *cl,
__rte_unused void *data)
{
int ret,nb_item,i;
struct cmd_set_mirror_mask_result *res = parsed_result;
struct rte_eth_mirror_conf mr_conf;
memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
unsigned int vlan_list[ETH_MIRROR_MAX_VLANS];
mr_conf.dst_pool = res->dstpool_id;
if (!strcmp(res->what, "pool-mirror-up")) {
mr_conf.pool_mask = strtoull(res->value, NULL, 16);
mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_UP;
} else if (!strcmp(res->what, "pool-mirror-down")) {
mr_conf.pool_mask = strtoull(res->value, NULL, 16);
mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_DOWN;
} else if (!strcmp(res->what, "vlan-mirror")) {
mr_conf.rule_type = ETH_MIRROR_VLAN;
nb_item = parse_item_list(res->value, "vlan",
ETH_MIRROR_MAX_VLANS, vlan_list, 1);
if (nb_item <= 0)
return;
for (i = 0; i < nb_item; i++) {
if (vlan_list[i] > RTE_ETHER_MAX_VLAN_ID) {
fprintf(stderr,
"Invalid vlan_id: must be < 4096\n");
return;
}
mr_conf.vlan.vlan_id[i] = (uint16_t)vlan_list[i];
mr_conf.vlan.vlan_mask |= 1ULL << i;
}
}
if (!strcmp(res->on, "on"))
ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
res->rule_id, 1);
else
ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
res->rule_id, 0);
if (ret < 0)
fprintf(stderr, "mirror rule add error: (%s)\n",
strerror(-ret));
}
cmdline_parse_inst_t cmd_set_mirror_mask = {
.f = cmd_set_mirror_mask_parsed,
.data = NULL,
.help_str = "set port <port_id> mirror-rule <rule_id> "
"pool-mirror-up|pool-mirror-down|vlan-mirror "
"<pool_mask|vlan_id[,vlan_id]*> dst-pool <pool_id> on|off",
.tokens = {
(void *)&cmd_mirror_mask_set,
(void *)&cmd_mirror_mask_port,
(void *)&cmd_mirror_mask_portid,
(void *)&cmd_mirror_mask_mirror,
(void *)&cmd_mirror_mask_ruleid,
(void *)&cmd_mirror_mask_what,
(void *)&cmd_mirror_mask_value,
(void *)&cmd_mirror_mask_dstpool,
(void *)&cmd_mirror_mask_poolid,
(void *)&cmd_mirror_mask_on,
NULL,
},
};
/* *** CONFIGURE VM MIRROR UPLINK/DOWNLINK RULE *** */
struct cmd_set_mirror_link_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
portid_t port_id;
cmdline_fixed_string_t mirror;
uint8_t rule_id;
cmdline_fixed_string_t what;
cmdline_fixed_string_t dstpool;
uint8_t dstpool_id;
cmdline_fixed_string_t on;
};
cmdline_parse_token_string_t cmd_mirror_link_set =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
set, "set");
cmdline_parse_token_string_t cmd_mirror_link_port =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
port, "port");
cmdline_parse_token_num_t cmd_mirror_link_portid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
port_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_mirror_link_mirror =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
mirror, "mirror-rule");
cmdline_parse_token_num_t cmd_mirror_link_ruleid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
rule_id, RTE_UINT8);
cmdline_parse_token_string_t cmd_mirror_link_what =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
what, "uplink-mirror#downlink-mirror");
cmdline_parse_token_string_t cmd_mirror_link_dstpool =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
dstpool, "dst-pool");
cmdline_parse_token_num_t cmd_mirror_link_poolid =
TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result,
dstpool_id, RTE_UINT8);
cmdline_parse_token_string_t cmd_mirror_link_on =
TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result,
on, "on#off");
static void
cmd_set_mirror_link_parsed(void *parsed_result,
__rte_unused struct cmdline *cl,
__rte_unused void *data)
{
int ret;
struct cmd_set_mirror_link_result *res = parsed_result;
struct rte_eth_mirror_conf mr_conf;
memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf));
if (!strcmp(res->what, "uplink-mirror"))
mr_conf.rule_type = ETH_MIRROR_UPLINK_PORT;
else
mr_conf.rule_type = ETH_MIRROR_DOWNLINK_PORT;
mr_conf.dst_pool = res->dstpool_id;
if (!strcmp(res->on, "on"))
ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
res->rule_id, 1);
else
ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf,
res->rule_id, 0);
/* check the return value and print it if is < 0 */
if (ret < 0)
fprintf(stderr, "mirror rule add error: (%s)\n",
strerror(-ret));
}
cmdline_parse_inst_t cmd_set_mirror_link = {
.f = cmd_set_mirror_link_parsed,
.data = NULL,
.help_str = "set port <port_id> mirror-rule <rule_id> "
"uplink-mirror|downlink-mirror dst-pool <pool_id> on|off",
.tokens = {
(void *)&cmd_mirror_link_set,
(void *)&cmd_mirror_link_port,
(void *)&cmd_mirror_link_portid,
(void *)&cmd_mirror_link_mirror,
(void *)&cmd_mirror_link_ruleid,
(void *)&cmd_mirror_link_what,
(void *)&cmd_mirror_link_dstpool,
(void *)&cmd_mirror_link_poolid,
(void *)&cmd_mirror_link_on,
NULL,
},
};
/* *** RESET VM MIRROR RULE *** */
struct cmd_rm_mirror_rule_result {
cmdline_fixed_string_t reset;
cmdline_fixed_string_t port;
portid_t port_id;
cmdline_fixed_string_t mirror;
uint8_t rule_id;
};
cmdline_parse_token_string_t cmd_rm_mirror_rule_reset =
TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
reset, "reset");
cmdline_parse_token_string_t cmd_rm_mirror_rule_port =
TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
port, "port");
cmdline_parse_token_num_t cmd_rm_mirror_rule_portid =
TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result,
port_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_rm_mirror_rule_mirror =
TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result,
mirror, "mirror-rule");
cmdline_parse_token_num_t cmd_rm_mirror_rule_ruleid =
TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result,
rule_id, RTE_UINT8);
static void
cmd_reset_mirror_rule_parsed(void *parsed_result,
__rte_unused struct cmdline *cl,
__rte_unused void *data)
{
int ret;
struct cmd_set_mirror_link_result *res = parsed_result;
/* check rule_id */
ret = rte_eth_mirror_rule_reset(res->port_id,res->rule_id);
if(ret < 0)
fprintf(stderr, "mirror rule remove error: (%s)\n",
strerror(-ret));
}
cmdline_parse_inst_t cmd_reset_mirror_rule = {
.f = cmd_reset_mirror_rule_parsed,
.data = NULL,
.help_str = "reset port <port_id> mirror-rule <rule_id>",
.tokens = {
(void *)&cmd_rm_mirror_rule_reset,
(void *)&cmd_rm_mirror_rule_port,
(void *)&cmd_rm_mirror_rule_portid,
(void *)&cmd_rm_mirror_rule_mirror,
(void *)&cmd_rm_mirror_rule_ruleid,
NULL,
},
};
/* ******************************************************************************** */
struct cmd_dump_result {
@ -17677,9 +17397,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_vf_mac_addr_filter,
(cmdline_parse_inst_t *)&cmd_queue_rate_limit,
(cmdline_parse_inst_t *)&cmd_tunnel_udp_config,
(cmdline_parse_inst_t *)&cmd_set_mirror_mask,
(cmdline_parse_inst_t *)&cmd_set_mirror_link,
(cmdline_parse_inst_t *)&cmd_reset_mirror_rule,
(cmdline_parse_inst_t *)&cmd_showport_rss_hash,
(cmdline_parse_inst_t *)&cmd_showport_rss_hash_key,
(cmdline_parse_inst_t *)&cmd_config_rss_hash_key,

View File

@ -408,17 +408,6 @@ Supports Tx rate limitation for a queue.
* **[related] API**: ``rte_eth_set_queue_rate_limit()``.
.. _nic_features_traffic_mirroring:
Traffic mirroring
-----------------
Supports adding traffic mirroring rules.
* **[implements] eth_dev_ops**: ``mirror_rule_set``, ``mirror_rule_reset``.
* **[related] API**: ``rte_eth_mirror_rule_set()``, ``rte_eth_mirror_rule_reset()``.
.. _nic_features_inline_crypto_doc:
Inline crypto

View File

@ -40,7 +40,6 @@ DCB =
VLAN filter =
Flow control =
Rate limitation =
Traffic mirroring =
Inline crypto =
Inline protocol =
CRC offload =

View File

@ -28,7 +28,6 @@ SR-IOV = Y
DCB = Y
VLAN filter = Y
Flow control = Y
Traffic mirroring = Y
CRC offload = Y
VLAN offload = Y
QinQ offload = P

View File

@ -26,7 +26,6 @@ SR-IOV = Y
DCB = Y
VLAN filter = Y
Flow control = Y
Traffic mirroring = Y
CRC offload = Y
VLAN offload = Y
QinQ offload = Y

View File

@ -28,7 +28,6 @@ DCB = Y
VLAN filter = Y
Flow control = Y
Rate limitation = Y
Traffic mirroring = Y
Inline crypto = Y
CRC offload = P
VLAN offload = P

View File

@ -111,12 +111,6 @@ Deprecation Notices
Existing ``rte_eth_rx_descriptor_status`` and ``rte_eth_tx_descriptor_status``
APIs can be used as replacement.
* ethdev: The port mirroring API can be replaced with a more fine grain flow API.
The structs ``rte_eth_mirror_conf``, ``rte_eth_vlan_mirror`` and the functions
``rte_eth_mirror_rule_set``, ``rte_eth_mirror_rule_reset`` will be marked
as deprecated in DPDK 20.11, along with the associated macros ``ETH_MIRROR_*``.
This API will be fully removed in DPDK 21.11.
* ethdev: Announce moving from dedicated modify function for each field,
to using the general ``rte_flow_modify_field`` action.

View File

@ -161,6 +161,13 @@ Removed Items
blacklist/whitelist are removed. Users must use the new
block/allow list arguments.
* ethdev: Removed the port mirroring API. A more fine-grain flow API
action ``RTE_FLOW_ACTION_TYPE_SAMPLE`` should be used instead.
The structures ``rte_eth_mirror_conf`` and ``rte_eth_vlan_mirror`` and
the functions ``rte_eth_mirror_rule_set`` and
``rte_eth_mirror_rule_reset`` along with the associated macros
``ETH_MIRROR_*`` are removed.
* i40e: Removed i40evf driver.
iavf already became the default VF driver for i40e devices,
so there is no need to maintain i40evf.

View File

@ -1610,31 +1610,6 @@ Set TX rate limitation for queues in VF on a port::
testpmd> set port (port_id) vf (vf_id) rate (rate_value) queue_mask (queue_mask)
set port - mirror rule
~~~~~~~~~~~~~~~~~~~~~~
Set pool or vlan type mirror rule for a port::
testpmd> set port (port_id) mirror-rule (rule_id) \
(pool-mirror-up|pool-mirror-down|vlan-mirror) \
(poolmask|vlanid[,vlanid]*) dst-pool (pool_id) (on|off)
Set link mirror rule for a port::
testpmd> set port (port_id) mirror-rule (rule_id) \
(uplink-mirror|downlink-mirror) dst-pool (pool_id) (on|off)
For example to enable mirror traffic with vlan 0,1 to pool 0::
set port 0 mirror-rule 0 vlan-mirror 0,1 dst-pool 0 on
reset port - mirror rule
~~~~~~~~~~~~~~~~~~~~~~~~
Reset a mirror rule for a port::
testpmd> reset port (port_id) mirror-rule (rule_id)
set flush_rx
~~~~~~~~~~~~

View File

@ -347,16 +347,6 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
static void i40e_configure_registers(struct i40e_hw *hw);
static void i40e_hw_init(struct rte_eth_dev *dev);
static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
uint16_t seid,
uint16_t rule_type,
uint16_t *entries,
uint16_t count,
uint16_t rule_id);
static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t sw_id, uint8_t on);
static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
static int i40e_timesync_enable(struct rte_eth_dev *dev);
static int i40e_timesync_disable(struct rte_eth_dev *dev);
@ -507,8 +497,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.txq_info_get = i40e_txq_info_get,
.rx_burst_mode_get = i40e_rx_burst_mode_get,
.tx_burst_mode_get = i40e_tx_burst_mode_get,
.mirror_rule_set = i40e_mirror_rule_set,
.mirror_rule_reset = i40e_mirror_rule_reset,
.timesync_enable = i40e_timesync_enable,
.timesync_disable = i40e_timesync_disable,
.timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
@ -1726,9 +1714,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
*/
i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, false, 0, NULL);
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
/* initialize RSS rule list */
TAILQ_INIT(&pf->rss_config_list);
@ -2600,7 +2585,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_mirror_rule *p_mirror;
struct i40e_filter_control_settings settings;
struct rte_flow *p_flow;
uint32_t reg;
@ -2620,25 +2604,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
ret = i40e_dev_stop(dev);
/* Remove all mirror rules */
while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
ret = i40e_aq_del_mirror_rule(hw,
pf->main_vsi->veb->seid,
p_mirror->rule_type,
p_mirror->entries,
p_mirror->num_entries,
p_mirror->id);
if (ret < 0)
PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
"status = %d, aq_err = %d.", ret,
hw->aq.asq_last_status);
/* remove mirror software resource anyway */
TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
rte_free(p_mirror);
pf->nb_mirror_rule--;
}
i40e_dev_free_queues(dev);
/* Disable interrupt */
@ -10236,323 +10201,6 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
return 0;
}
/**
* i40e_aq_add_mirror_rule
* @hw: pointer to the hardware structure
* @seid: VEB seid to add mirror rule to
* @dst_id: destination vsi seid
* @entries: Buffer which contains the entities to be mirrored
* @count: number of entities contained in the buffer
* @rule_id:the rule_id of the rule to be added
*
* Add a mirror rule for a given veb.
*
**/
static enum i40e_status_code
i40e_aq_add_mirror_rule(struct i40e_hw *hw,
uint16_t seid, uint16_t dst_id,
uint16_t rule_type, uint16_t *entries,
uint16_t count, uint16_t *rule_id)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_delete_mirror_rule cmd;
struct i40e_aqc_add_delete_mirror_rule_completion *resp =
(struct i40e_aqc_add_delete_mirror_rule_completion *)
&desc.params.raw;
uint16_t buff_len;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_mirror_rule);
memset(&cmd, 0, sizeof(cmd));
buff_len = sizeof(uint16_t) * count;
desc.datalen = rte_cpu_to_le_16(buff_len);
if (buff_len > 0)
desc.flags |= rte_cpu_to_le_16(
(uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
cmd.rule_type = rte_cpu_to_le_16(rule_type <<
I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
cmd.num_entries = rte_cpu_to_le_16(count);
cmd.seid = rte_cpu_to_le_16(seid);
cmd.destination = rte_cpu_to_le_16(dst_id);
rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
PMD_DRV_LOG(INFO,
"i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
hw->aq.asq_last_status, resp->rule_id,
resp->mirror_rules_used, resp->mirror_rules_free);
*rule_id = rte_le_to_cpu_16(resp->rule_id);
return status;
}
/**
* i40e_aq_del_mirror_rule
* @hw: pointer to the hardware structure
* @seid: VEB seid to add mirror rule to
* @entries: Buffer which contains the entities to be mirrored
* @count: number of entities contained in the buffer
* @rule_id:the rule_id of the rule to be delete
*
* Delete a mirror rule for a given veb.
*
**/
static enum i40e_status_code
i40e_aq_del_mirror_rule(struct i40e_hw *hw,
uint16_t seid, uint16_t rule_type, uint16_t *entries,
uint16_t count, uint16_t rule_id)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_delete_mirror_rule cmd;
uint16_t buff_len = 0;
enum i40e_status_code status;
void *buff = NULL;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_delete_mirror_rule);
memset(&cmd, 0, sizeof(cmd));
if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
I40E_AQ_FLAG_RD));
cmd.num_entries = count;
buff_len = sizeof(uint16_t) * count;
desc.datalen = rte_cpu_to_le_16(buff_len);
buff = (void *)entries;
} else
/* rule id is filled in destination field for deleting mirror rule */
cmd.destination = rte_cpu_to_le_16(rule_id);
cmd.rule_type = rte_cpu_to_le_16(rule_type <<
I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
cmd.seid = rte_cpu_to_le_16(seid);
rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
return status;
}
/**
* i40e_mirror_rule_set
* @dev: pointer to the hardware structure
* @mirror_conf: mirror rule info
* @sw_id: mirror rule's sw_id
* @on: enable/disable
*
* set a mirror rule.
*
**/
static int
i40e_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t sw_id, uint8_t on)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_mirror_rule *it, *mirr_rule = NULL;
struct i40e_mirror_rule *parent = NULL;
uint16_t seid, dst_seid, rule_id;
uint16_t i, j = 0;
int ret;
PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
PMD_DRV_LOG(ERR,
"mirror rule can not be configured without veb or vfs.");
return -ENOSYS;
}
if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
PMD_DRV_LOG(ERR, "mirror table is full.");
return -ENOSPC;
}
if (mirror_conf->dst_pool > pf->vf_num) {
PMD_DRV_LOG(ERR, "invalid destination pool %u.",
mirror_conf->dst_pool);
return -EINVAL;
}
seid = pf->main_vsi->veb->seid;
TAILQ_FOREACH(it, &pf->mirror_list, rules) {
if (sw_id <= it->index) {
mirr_rule = it;
break;
}
parent = it;
}
if (mirr_rule && sw_id == mirr_rule->index) {
if (on) {
PMD_DRV_LOG(ERR, "mirror rule exists.");
return -EEXIST;
} else {
ret = i40e_aq_del_mirror_rule(hw, seid,
mirr_rule->rule_type,
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"failed to remove mirror rule: ret = %d, aq_err = %d.",
ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
rte_free(mirr_rule);
pf->nb_mirror_rule--;
return 0;
}
} else if (!on) {
PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
return -ENOENT;
}
mirr_rule = rte_zmalloc("i40e_mirror_rule",
sizeof(struct i40e_mirror_rule) , 0);
if (!mirr_rule) {
PMD_DRV_LOG(ERR, "failed to allocate memory");
return I40E_ERR_NO_MEMORY;
}
switch (mirror_conf->rule_type) {
case ETH_MIRROR_VLAN:
for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
mirr_rule->entries[j] =
mirror_conf->vlan.vlan_id[i];
j++;
}
}
if (j == 0) {
PMD_DRV_LOG(ERR, "vlan is not specified.");
rte_free(mirr_rule);
return -EINVAL;
}
mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
break;
case ETH_MIRROR_VIRTUAL_POOL_UP:
case ETH_MIRROR_VIRTUAL_POOL_DOWN:
/* check if the specified pool bit is out of range */
if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
PMD_DRV_LOG(ERR, "pool mask is out of range.");
rte_free(mirr_rule);
return -EINVAL;
}
for (i = 0, j = 0; i < pf->vf_num; i++) {
if (mirror_conf->pool_mask & (1ULL << i)) {
mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
j++;
}
}
if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
/* add pf vsi to entries */
mirr_rule->entries[j] = pf->main_vsi_seid;
j++;
}
if (j == 0) {
PMD_DRV_LOG(ERR, "pool is not specified.");
rte_free(mirr_rule);
return -EINVAL;
}
/* egress and ingress in aq commands means from switch but not port */
mirr_rule->rule_type =
(mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
break;
case ETH_MIRROR_UPLINK_PORT:
/* egress and ingress in aq commands means from switch but not port*/
mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
break;
case ETH_MIRROR_DOWNLINK_PORT:
mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
break;
default:
PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
mirror_conf->rule_type);
rte_free(mirr_rule);
return -EINVAL;
}
/* If the dst_pool is equal to vf_num, consider it as PF */
if (mirror_conf->dst_pool == pf->vf_num)
dst_seid = pf->main_vsi_seid;
else
dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
mirr_rule->rule_type, mirr_rule->entries,
j, &rule_id);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"failed to add mirror rule: ret = %d, aq_err = %d.",
ret, hw->aq.asq_last_status);
rte_free(mirr_rule);
return -ENOSYS;
}
mirr_rule->index = sw_id;
mirr_rule->num_entries = j;
mirr_rule->id = rule_id;
mirr_rule->dst_vsi_seid = dst_seid;
if (parent)
TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
else
TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
pf->nb_mirror_rule++;
return 0;
}
/**
* i40e_mirror_rule_reset
* @dev: pointer to the device
* @sw_id: mirror rule's sw_id
*
* reset a mirror rule.
*
**/
static int
i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_mirror_rule *it, *mirr_rule = NULL;
uint16_t seid;
int ret;
PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
seid = pf->main_vsi->veb->seid;
TAILQ_FOREACH(it, &pf->mirror_list, rules) {
if (sw_id == it->index) {
mirr_rule = it;
break;
}
}
if (mirr_rule) {
ret = i40e_aq_del_mirror_rule(hw, seid,
mirr_rule->rule_type,
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
PMD_DRV_LOG(ERR,
"failed to remove mirror rule: status = %d, aq_err = %d.",
ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
rte_free(mirr_rule);
pf->nb_mirror_rule--;
} else {
PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
return -ENOENT;
}
return 0;
}
static uint64_t
i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
{

View File

@ -964,27 +964,6 @@ struct i40e_tunnel_filter_conf {
uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
};
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
* Mirror rule structure
*/
struct i40e_mirror_rule {
TAILQ_ENTRY(i40e_mirror_rule) rules;
uint8_t rule_type;
uint16_t index; /* the sw index of mirror rule */
uint16_t id; /* the rule id assigned by firmware */
uint16_t dst_vsi_seid; /* destination vsi for this mirror rule. */
uint16_t num_entries;
/* the info stores depend on the rule type.
If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here.
If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored.
*/
uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE];
};
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
TAILQ_HEAD(i40e_flow_list, rte_flow);
/* Struct to store Traffic Manager shaper profile. */
@ -1180,8 +1159,6 @@ struct i40e_pf {
struct i40e_rss_conf_list rss_config_list; /* RSS rule list */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];

View File

@ -281,11 +281,6 @@ static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
rte_ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
uint8_t rule_id);
static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
@ -529,8 +524,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.mac_addr_set = ixgbe_set_default_mac_addr,
.uc_hash_table_set = ixgbe_uc_hash_table_set,
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
.mirror_rule_reset = ixgbe_mirror_rule_reset,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
@ -5753,191 +5746,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
return new_val;
}
#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
static int
ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
uint32_t mp_msb = 0;
uint8_t i = 0;
int reg_index = 0;
uint64_t vlan_mask = 0;
const uint8_t pool_mask_offset = 32;
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint8_t mirror_type = 0;
if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
if (rule_id >= IXGBE_MAX_MIRROR_RULES)
return -EINVAL;
if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
mirror_conf->rule_type);
return -EINVAL;
}
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID
* index in VLVF
*/
for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter
* index
*/
reg_index = ixgbe_find_vlvf_slot(
hw,
mirror_conf->vlan.vlan_id[i],
false);
if (reg_index < 0)
return -EINVAL;
vlvf = IXGBE_READ_REG(hw,
IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
((vlvf & IXGBE_VLVF_VLANID_MASK) ==
mirror_conf->vlan.vlan_id[i]))
vlan_mask |= (1ULL << reg_index);
else
return -EINVAL;
}
}
if (on) {
mv_lsb = vlan_mask & 0xFFFFFFFF;
mv_msb = vlan_mask >> vlan_mask_offset;
mr_info->mr_conf[rule_id].vlan.vlan_mask =
mirror_conf->vlan.vlan_mask;
for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i))
mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
mirror_conf->vlan.vlan_id[i];
}
} else {
mv_lsb = 0;
mv_msb = 0;
mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
}
}
/**
* if enable pool mirror, write related pool mask register,if disable
* pool mirror, clear PFMRVM register
*/
if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
mirror_type |= IXGBE_MRCTL_VPME;
if (on) {
mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
mr_info->mr_conf[rule_id].pool_mask =
mirror_conf->pool_mask;
} else {
mp_lsb = 0;
mp_msb = 0;
mr_info->mr_conf[rule_id].pool_mask = 0;
}
}
if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
mirror_type |= IXGBE_MRCTL_UPME;
if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
mirror_type |= IXGBE_MRCTL_DPME;
/* read mirror control register and recalculate it */
mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
if (on) {
mr_ctl |= mirror_type;
mr_ctl &= mirror_rule_mask;
mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
} else {
mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
}
mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
/* write mirrror control register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
/* write pool mirrror control register */
if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
mp_msb);
}
/* write VLAN mirrror control register */
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
mv_msb);
}
return 0;
}
static int
ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
{
int mr_ctl = 0;
uint32_t lsb_val = 0;
uint32_t msb_val = 0;
const uint8_t rule_mr_offset = 4;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
if (rule_id >= IXGBE_MAX_MIRROR_RULES)
return -EINVAL;
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_mirror_conf));
/* clear PFVMCTL register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
/* clear pool mask register */
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
/* clear vlan mask register */
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
return 0;
}
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{

View File

@ -245,7 +245,6 @@ struct ixgbe_hwstrip {
* VF data which used by PF host only
*/
#define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */
#define IXGBE_MAX_UTA 128
struct ixgbe_uta_info {
@ -254,13 +253,6 @@ struct ixgbe_uta_info {
uint32_t uta_shadow[IXGBE_MAX_UTA];
};
#define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
struct ixgbe_mirror_info {
struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES];
/**< store PF mirror rules configuration*/
};
struct ixgbe_vf_info {
uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
@ -489,7 +481,6 @@ struct ixgbe_adapter {
struct ixgbe_vfta shadow_vfta;
struct ixgbe_hwstrip hwstrip;
struct ixgbe_dcb_config dcb_config;
struct ixgbe_mirror_info mr_data;
struct ixgbe_vf_info *vfdata;
struct ixgbe_uta_info uta_info;
#ifdef RTE_LIBRTE_IXGBE_BYPASS

View File

@ -70,8 +70,6 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
@ -103,7 +101,6 @@ int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
return ret;
}
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;

View File

@ -173,13 +173,6 @@ struct txgbe_uta_info {
uint32_t uta_shadow[TXGBE_MAX_UTA];
};
#define TXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
struct txgbe_mirror_info {
struct rte_eth_mirror_conf mr_conf[TXGBE_MAX_MIRROR_RULES];
/* store PF mirror rules configuration */
};
struct txgbe_vf_info {
uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES];
@ -356,7 +349,6 @@ struct txgbe_adapter {
struct txgbe_vfta shadow_vfta;
struct txgbe_hwstrip hwstrip;
struct txgbe_dcb_config dcb_config;
struct txgbe_mirror_info mr_data;
struct txgbe_vf_info *vfdata;
struct txgbe_uta_info uta_info;
struct txgbe_filter_info filter;

View File

@ -68,7 +68,6 @@ txgbe_mb_intr_setup(struct rte_eth_dev *dev)
int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
{
struct txgbe_vf_info **vfinfo = TXGBE_DEV_VFDATA(eth_dev);
struct txgbe_mirror_info *mirror_info = TXGBE_DEV_MR_INFO(eth_dev);
struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(eth_dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
uint16_t vf_num;
@ -99,7 +98,6 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev)
return ret;
}
memset(mirror_info, 0, sizeof(struct txgbe_mirror_info));
memset(uta_info, 0, sizeof(struct txgbe_uta_info));
hw->mac.mc_filter_type = 0;

View File

@ -420,16 +420,6 @@ typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
uint16_t tx_rate);
/**< @internal Set queue TX rate */
typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
uint8_t on);
/**< @internal Add a traffic mirroring rule on an Ethernet device */
typedef int (*eth_mirror_rule_reset_t)(struct rte_eth_dev *dev,
uint8_t rule_id);
/**< @internal Remove a traffic mirroring rule on an Ethernet device */
typedef int (*eth_udp_tunnel_port_add_t)(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *tunnel_udp);
/**< @internal Add tunneling UDP port */
@ -894,9 +884,6 @@ struct eth_dev_ops {
eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array. */
eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap. */
eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule. */
eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule. */
eth_udp_tunnel_port_add_t udp_tunnel_port_add; /** Add UDP tunnel port. */
eth_udp_tunnel_port_del_t udp_tunnel_port_del; /** Del UDP tunnel port. */

View File

@ -4505,67 +4505,6 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
queue_idx, tx_rate));
}
int
rte_eth_mirror_rule_set(uint16_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (mirror_conf == NULL) {
RTE_ETHDEV_LOG(ERR,
"Cannot set ethdev port %u mirror rule from NULL config\n",
port_id);
return -EINVAL;
}
if (mirror_conf->rule_type == 0) {
RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
return -EINVAL;
}
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
ETH_64_POOLS - 1);
return -EINVAL;
}
if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
(mirror_conf->pool_mask == 0)) {
RTE_ETHDEV_LOG(ERR,
"Invalid mirror pool, pool mask can not be 0\n");
return -EINVAL;
}
if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
mirror_conf->vlan.vlan_mask == 0) {
RTE_ETHDEV_LOG(ERR,
"Invalid vlan mask, vlan mask can not be 0\n");
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
mirror_conf, rule_id, on));
}
int
rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev, rule_id));
}
RTE_INIT(eth_dev_init_cb_lists)
{
uint16_t i;

View File

@ -844,39 +844,6 @@ rte_eth_rss_hf_refine(uint64_t rss_hf)
#define ETH_VMDQ_ACCEPT_MULTICAST 0x0010 /**< multicast promiscuous. */
/**@}*/
/** Maximum nb. of vlan per mirror rule */
#define ETH_MIRROR_MAX_VLANS 64
/**@{@name Mirroring type
* @see rte_eth_mirror_conf.rule_type
*/
#define ETH_MIRROR_VIRTUAL_POOL_UP 0x01 /**< Virtual Pool uplink Mirroring. */
#define ETH_MIRROR_UPLINK_PORT 0x02 /**< Uplink Port Mirroring. */
#define ETH_MIRROR_DOWNLINK_PORT 0x04 /**< Downlink Port Mirroring. */
#define ETH_MIRROR_VLAN 0x08 /**< VLAN Mirroring. */
#define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10 /**< Virtual Pool downlink Mirroring. */
/**@}*/
/**
* A structure used to configure VLAN traffic mirror of an Ethernet port.
*/
struct rte_eth_vlan_mirror {
uint64_t vlan_mask; /**< mask for valid VLAN ID. */
/** VLAN ID list for vlan mirroring. */
uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
};
/**
* A structure used to configure traffic mirror of an Ethernet port.
*/
struct rte_eth_mirror_conf {
uint8_t rule_type; /**< Mirroring rule type */
uint8_t dst_pool; /**< Destination pool for this mirror rule. */
uint64_t pool_mask; /**< Bitmap of pool for pool mirroring */
/** VLAN ID setting for VLAN mirroring. */
struct rte_eth_vlan_mirror vlan;
};
/**
* A structure used to configure 64 entries of Redirection Table of the
* Receive Side Scaling (RSS) feature of an Ethernet port. To configure
@ -3989,50 +3956,6 @@ int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
*/
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
/**
* Set a traffic mirroring rule on an Ethernet device
*
* @param port_id
* The port identifier of the Ethernet device.
* @param mirror_conf
* The pointer to the traffic mirroring structure describing the mirroring rule.
* The *rte_eth_vm_mirror_conf* structure includes the type of mirroring rule,
* destination pool and the value of rule if enable vlan or pool mirroring.
*
* @param rule_id
* The index of traffic mirroring rule, we support four separated rules.
* @param on
* 1 - Enable a mirroring rule.
* 0 - Disable a mirroring rule.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-ENODEV) if *port_id* invalid.
* - (-EIO) if device is removed.
* - (-EINVAL) if the mr_conf information is not correct.
*/
int rte_eth_mirror_rule_set(uint16_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
uint8_t on);
/**
* Reset a traffic mirroring rule on an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param rule_id
* The index of traffic mirroring rule, we support four separated rules.
* @return
* - (0) if successful.
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-ENODEV) if *port_id* invalid.
* - (-EIO) if device is removed.
* - (-EINVAL) if bad parameter.
*/
int rte_eth_mirror_rule_reset(uint16_t port_id,
uint8_t rule_id);
/**
* Set the rate limitation for a queue on an Ethernet device.
*

View File

@ -84,8 +84,6 @@ DPDK_22 {
rte_eth_link_get;
rte_eth_link_get_nowait;
rte_eth_macaddr_get;
rte_eth_mirror_rule_reset;
rte_eth_mirror_rule_set;
rte_eth_promiscuous_disable;
rte_eth_promiscuous_enable;
rte_eth_promiscuous_get;