net/hns3: support Rx interrupt
This patch adds supports of receive packets through interrupt mode for hns3 PF/VF driver. The following ops functions should be implemented defined in the struct eth_dev_ops: rx_queue_intr_enable rx_queue_intr_disable Signed-off-by: Hao Chen <chenhao164@huawei.com> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
This commit is contained in:
parent
4f19f4140e
commit
02a7b55657
@ -5,6 +5,7 @@
|
|||||||
;
|
;
|
||||||
[Features]
|
[Features]
|
||||||
Link status = Y
|
Link status = Y
|
||||||
|
Rx interrupt = Y
|
||||||
MTU update = Y
|
MTU update = Y
|
||||||
Jumbo frame = Y
|
Jumbo frame = Y
|
||||||
Promiscuous mode = Y
|
Promiscuous mode = Y
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
;
|
;
|
||||||
[Features]
|
[Features]
|
||||||
Link status = Y
|
Link status = Y
|
||||||
|
Rx interrupt = Y
|
||||||
MTU update = Y
|
MTU update = Y
|
||||||
Jumbo frame = Y
|
Jumbo frame = Y
|
||||||
Unicast MAC filter = Y
|
Unicast MAC filter = Y
|
||||||
|
@ -22,6 +22,7 @@ Features of the HNS3 PMD are:
|
|||||||
- Port hardware statistics
|
- Port hardware statistics
|
||||||
- Jumbo frames
|
- Jumbo frames
|
||||||
- Link state information
|
- Link state information
|
||||||
|
- Interrupt mode for RX
|
||||||
- VLAN stripping
|
- VLAN stripping
|
||||||
- NUMA support
|
- NUMA support
|
||||||
|
|
||||||
|
@ -61,6 +61,12 @@ New Features
|
|||||||
A new API has been added to wait for a memory location to be updated with a
|
A new API has been added to wait for a memory location to be updated with a
|
||||||
16-bit, 32-bit, 64-bit value.
|
16-bit, 32-bit, 64-bit value.
|
||||||
|
|
||||||
|
* **Updated Hisilicon hns3 driver.**
|
||||||
|
|
||||||
|
Updated Hisilicon hns3 driver with new features and improvements, including:
|
||||||
|
|
||||||
|
* Added support for Rx interrupt.
|
||||||
|
|
||||||
* **Updated Mellanox mlx5 driver.**
|
* **Updated Mellanox mlx5 driver.**
|
||||||
|
|
||||||
Updated Mellanox mlx5 driver with new features and improvements, including:
|
Updated Mellanox mlx5 driver with new features and improvements, including:
|
||||||
|
@ -209,6 +209,10 @@ enum hns3_opcode_type {
|
|||||||
/* SFP command */
|
/* SFP command */
|
||||||
HNS3_OPC_SFP_GET_SPEED = 0x7104,
|
HNS3_OPC_SFP_GET_SPEED = 0x7104,
|
||||||
|
|
||||||
|
/* Interrupts commands */
|
||||||
|
HNS3_OPC_ADD_RING_TO_VECTOR = 0x1503,
|
||||||
|
HNS3_OPC_DEL_RING_TO_VECTOR = 0x1504,
|
||||||
|
|
||||||
/* Error INT commands */
|
/* Error INT commands */
|
||||||
HNS3_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
|
HNS3_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
|
||||||
HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
|
HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
|
||||||
@ -673,6 +677,30 @@ struct hns3_tqp_map_cmd {
|
|||||||
uint8_t rsv[18];
|
uint8_t rsv[18];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define HNS3_RING_TYPE_B 0
|
||||||
|
#define HNS3_RING_TYPE_TX 0
|
||||||
|
#define HNS3_RING_TYPE_RX 1
|
||||||
|
#define HNS3_RING_GL_IDX_S 0
|
||||||
|
#define HNS3_RING_GL_IDX_M GENMASK(1, 0)
|
||||||
|
#define HNS3_RING_GL_RX 0
|
||||||
|
#define HNS3_RING_GL_TX 1
|
||||||
|
|
||||||
|
#define HNS3_VECTOR_ELEMENTS_PER_CMD 10
|
||||||
|
|
||||||
|
#define HNS3_INT_TYPE_S 0
|
||||||
|
#define HNS3_INT_TYPE_M GENMASK(1, 0)
|
||||||
|
#define HNS3_TQP_ID_S 2
|
||||||
|
#define HNS3_TQP_ID_M GENMASK(12, 2)
|
||||||
|
#define HNS3_INT_GL_IDX_S 13
|
||||||
|
#define HNS3_INT_GL_IDX_M GENMASK(14, 13)
|
||||||
|
struct hns3_ctrl_vector_chain_cmd {
|
||||||
|
uint8_t int_vector_id;
|
||||||
|
uint8_t int_cause_num;
|
||||||
|
uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD];
|
||||||
|
uint8_t vfid;
|
||||||
|
uint8_t rsv;
|
||||||
|
};
|
||||||
|
|
||||||
struct hns3_config_max_frm_size_cmd {
|
struct hns3_config_max_frm_size_cmd {
|
||||||
uint16_t max_frm_size;
|
uint16_t max_frm_size;
|
||||||
uint8_t min_frm_size;
|
uint8_t min_frm_size;
|
||||||
|
@ -2021,6 +2021,40 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
|
|||||||
return hns3_check_mq_mode(dev);
|
return hns3_check_mq_mode(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
|
||||||
|
bool mmap, uint16_t queue_id)
|
||||||
|
{
|
||||||
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
struct hns3_cmd_desc desc;
|
||||||
|
struct hns3_ctrl_vector_chain_cmd *req =
|
||||||
|
(struct hns3_ctrl_vector_chain_cmd *)desc.data;
|
||||||
|
enum hns3_cmd_status status;
|
||||||
|
enum hns3_opcode_type op;
|
||||||
|
uint16_t tqp_type_and_id = 0;
|
||||||
|
|
||||||
|
op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
|
||||||
|
hns3_cmd_setup_basic_desc(&desc, op, false);
|
||||||
|
req->int_vector_id = vector_id;
|
||||||
|
|
||||||
|
hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
|
||||||
|
HNS3_RING_TYPE_RX);
|
||||||
|
hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
|
||||||
|
hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
|
||||||
|
HNS3_RING_GL_RX);
|
||||||
|
req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
|
||||||
|
|
||||||
|
req->int_cause_num = 1;
|
||||||
|
status = hns3_cmd_send(hw, &desc, 1);
|
||||||
|
if (status) {
|
||||||
|
hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
|
||||||
|
queue_id, vector_id, status);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
hns3_dev_configure(struct rte_eth_dev *dev)
|
hns3_dev_configure(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
@ -4020,15 +4054,83 @@ err_config_mac_mode:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
hns3_dev_start(struct rte_eth_dev *eth_dev)
|
hns3_map_rx_interrupt(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct hns3_adapter *hns = eth_dev->data->dev_private;
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||||
struct hns3_hw *hw = &hns->hw;
|
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||||
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
uint32_t intr_vector;
|
||||||
|
uint8_t base = 0;
|
||||||
|
uint8_t vec = 0;
|
||||||
|
uint16_t q_id;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (dev->data->dev_conf.intr_conf.rxq == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* disable uio/vfio intr/eventfd mapping */
|
||||||
|
rte_intr_disable(intr_handle);
|
||||||
|
|
||||||
|
/* check and configure queue intr-vector mapping */
|
||||||
|
if (rte_intr_cap_multiple(intr_handle) ||
|
||||||
|
!RTE_ETH_DEV_SRIOV(dev).active) {
|
||||||
|
intr_vector = dev->data->nb_rx_queues;
|
||||||
|
/* creates event fd for each intr vector when MSIX is used */
|
||||||
|
if (rte_intr_efd_enable(intr_handle, intr_vector))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
|
||||||
|
intr_handle->intr_vec =
|
||||||
|
rte_zmalloc("intr_vec",
|
||||||
|
dev->data->nb_rx_queues * sizeof(int), 0);
|
||||||
|
if (intr_handle->intr_vec == NULL) {
|
||||||
|
hns3_err(hw, "Failed to allocate %d rx_queues"
|
||||||
|
" intr_vec", dev->data->nb_rx_queues);
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto alloc_intr_vec_error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rte_intr_allow_others(intr_handle)) {
|
||||||
|
vec = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
base = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
}
|
||||||
|
if (rte_intr_dp_is_en(intr_handle)) {
|
||||||
|
for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
|
||||||
|
ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
|
||||||
|
if (ret)
|
||||||
|
goto bind_vector_error;
|
||||||
|
intr_handle->intr_vec[q_id] = vec;
|
||||||
|
if (vec < base + intr_handle->nb_efd - 1)
|
||||||
|
vec++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rte_intr_enable(intr_handle);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
bind_vector_error:
|
||||||
|
rte_intr_efd_disable(intr_handle);
|
||||||
|
if (intr_handle->intr_vec) {
|
||||||
|
free(intr_handle->intr_vec);
|
||||||
|
intr_handle->intr_vec = NULL;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
alloc_intr_vec_error:
|
||||||
|
rte_intr_efd_disable(intr_handle);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
hns3_dev_start(struct rte_eth_dev *dev)
|
||||||
|
{
|
||||||
|
struct hns3_adapter *hns = dev->data->dev_private;
|
||||||
|
struct hns3_hw *hw = &hns->hw;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
PMD_INIT_FUNC_TRACE();
|
PMD_INIT_FUNC_TRACE();
|
||||||
if (rte_atomic16_read(&hw->reset.resetting))
|
if (rte_atomic16_read(&hw->reset.resetting))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
rte_spinlock_lock(&hw->lock);
|
rte_spinlock_lock(&hw->lock);
|
||||||
hw->adapter_state = HNS3_NIC_STARTING;
|
hw->adapter_state = HNS3_NIC_STARTING;
|
||||||
|
|
||||||
@ -4041,8 +4143,12 @@ hns3_dev_start(struct rte_eth_dev *eth_dev)
|
|||||||
|
|
||||||
hw->adapter_state = HNS3_NIC_STARTED;
|
hw->adapter_state = HNS3_NIC_STARTED;
|
||||||
rte_spinlock_unlock(&hw->lock);
|
rte_spinlock_unlock(&hw->lock);
|
||||||
hns3_set_rxtx_function(eth_dev);
|
|
||||||
hns3_mp_req_start_rxtx(eth_dev);
|
ret = hns3_map_rx_interrupt(dev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
hns3_set_rxtx_function(dev);
|
||||||
|
hns3_mp_req_start_rxtx(dev);
|
||||||
|
|
||||||
hns3_info(hw, "hns3 dev start successful!");
|
hns3_info(hw, "hns3 dev start successful!");
|
||||||
return 0;
|
return 0;
|
||||||
@ -4070,18 +4176,50 @@ hns3_do_stop(struct hns3_adapter *hns)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hns3_dev_stop(struct rte_eth_dev *eth_dev)
|
hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct hns3_adapter *hns = eth_dev->data->dev_private;
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||||
|
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||||
|
uint8_t base = 0;
|
||||||
|
uint8_t vec = 0;
|
||||||
|
uint16_t q_id;
|
||||||
|
|
||||||
|
if (dev->data->dev_conf.intr_conf.rxq == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* unmap the ring with vector */
|
||||||
|
if (rte_intr_allow_others(intr_handle)) {
|
||||||
|
vec = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
base = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
}
|
||||||
|
if (rte_intr_dp_is_en(intr_handle)) {
|
||||||
|
for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
|
||||||
|
(void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
|
||||||
|
if (vec < base + intr_handle->nb_efd - 1)
|
||||||
|
vec++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Clean datapath event and queue/vec mapping */
|
||||||
|
rte_intr_efd_disable(intr_handle);
|
||||||
|
if (intr_handle->intr_vec) {
|
||||||
|
rte_free(intr_handle->intr_vec);
|
||||||
|
intr_handle->intr_vec = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
hns3_dev_stop(struct rte_eth_dev *dev)
|
||||||
|
{
|
||||||
|
struct hns3_adapter *hns = dev->data->dev_private;
|
||||||
struct hns3_hw *hw = &hns->hw;
|
struct hns3_hw *hw = &hns->hw;
|
||||||
|
|
||||||
PMD_INIT_FUNC_TRACE();
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
|
||||||
hw->adapter_state = HNS3_NIC_STOPPING;
|
hw->adapter_state = HNS3_NIC_STOPPING;
|
||||||
hns3_set_rxtx_function(eth_dev);
|
hns3_set_rxtx_function(dev);
|
||||||
rte_wmb();
|
rte_wmb();
|
||||||
/* Disable datapath on secondary process. */
|
/* Disable datapath on secondary process. */
|
||||||
hns3_mp_req_stop_rxtx(eth_dev);
|
hns3_mp_req_stop_rxtx(dev);
|
||||||
/* Prevent crashes when queues are still in use. */
|
/* Prevent crashes when queues are still in use. */
|
||||||
rte_delay_ms(hw->tqps_num);
|
rte_delay_ms(hw->tqps_num);
|
||||||
|
|
||||||
@ -4092,6 +4230,7 @@ hns3_dev_stop(struct rte_eth_dev *eth_dev)
|
|||||||
hw->adapter_state = HNS3_NIC_CONFIGURED;
|
hw->adapter_state = HNS3_NIC_CONFIGURED;
|
||||||
}
|
}
|
||||||
rte_spinlock_unlock(&hw->lock);
|
rte_spinlock_unlock(&hw->lock);
|
||||||
|
hns3_unmap_rx_interrupt(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -4748,6 +4887,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
|
|||||||
.tx_queue_setup = hns3_tx_queue_setup,
|
.tx_queue_setup = hns3_tx_queue_setup,
|
||||||
.rx_queue_release = hns3_dev_rx_queue_release,
|
.rx_queue_release = hns3_dev_rx_queue_release,
|
||||||
.tx_queue_release = hns3_dev_tx_queue_release,
|
.tx_queue_release = hns3_dev_tx_queue_release,
|
||||||
|
.rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
|
||||||
|
.rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
|
||||||
.dev_configure = hns3_dev_configure,
|
.dev_configure = hns3_dev_configure,
|
||||||
.flow_ctrl_get = hns3_flow_ctrl_get,
|
.flow_ctrl_get = hns3_flow_ctrl_get,
|
||||||
.flow_ctrl_set = hns3_flow_ctrl_set,
|
.flow_ctrl_set = hns3_flow_ctrl_set,
|
||||||
|
@ -1208,6 +1208,36 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
|
|||||||
hw->io_base = NULL;
|
hw->io_base = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
|
||||||
|
bool mmap, uint16_t queue_id)
|
||||||
|
|
||||||
|
{
|
||||||
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
struct hns3_vf_bind_vector_msg bind_msg;
|
||||||
|
uint16_t code;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
memset(&bind_msg, 0, sizeof(bind_msg));
|
||||||
|
code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
|
||||||
|
HNS3_MBX_UNMAP_RING_TO_VECTOR;
|
||||||
|
bind_msg.vector_id = vector_id;
|
||||||
|
bind_msg.ring_num = 1;
|
||||||
|
bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
|
||||||
|
bind_msg.param[0].tqp_index = queue_id;
|
||||||
|
bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
|
||||||
|
|
||||||
|
ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
|
||||||
|
sizeof(bind_msg), false, NULL, 0);
|
||||||
|
if (ret) {
|
||||||
|
hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
|
||||||
|
queue_id, vector_id, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
hns3vf_do_stop(struct hns3_adapter *hns)
|
hns3vf_do_stop(struct hns3_adapter *hns)
|
||||||
{
|
{
|
||||||
@ -1225,18 +1255,51 @@ hns3vf_do_stop(struct hns3_adapter *hns)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
|
hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct hns3_adapter *hns = eth_dev->data->dev_private;
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||||
|
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||||
|
uint8_t base = 0;
|
||||||
|
uint8_t vec = 0;
|
||||||
|
uint16_t q_id;
|
||||||
|
|
||||||
|
if (dev->data->dev_conf.intr_conf.rxq == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* unmap the ring with vector */
|
||||||
|
if (rte_intr_allow_others(intr_handle)) {
|
||||||
|
vec = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
base = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
}
|
||||||
|
if (rte_intr_dp_is_en(intr_handle)) {
|
||||||
|
for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
|
||||||
|
(void)hns3vf_bind_ring_with_vector(dev, vec, false,
|
||||||
|
q_id);
|
||||||
|
if (vec < base + intr_handle->nb_efd - 1)
|
||||||
|
vec++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Clean datapath event and queue/vec mapping */
|
||||||
|
rte_intr_efd_disable(intr_handle);
|
||||||
|
if (intr_handle->intr_vec) {
|
||||||
|
rte_free(intr_handle->intr_vec);
|
||||||
|
intr_handle->intr_vec = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
hns3vf_dev_stop(struct rte_eth_dev *dev)
|
||||||
|
{
|
||||||
|
struct hns3_adapter *hns = dev->data->dev_private;
|
||||||
struct hns3_hw *hw = &hns->hw;
|
struct hns3_hw *hw = &hns->hw;
|
||||||
|
|
||||||
PMD_INIT_FUNC_TRACE();
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
|
||||||
hw->adapter_state = HNS3_NIC_STOPPING;
|
hw->adapter_state = HNS3_NIC_STOPPING;
|
||||||
hns3_set_rxtx_function(eth_dev);
|
hns3_set_rxtx_function(dev);
|
||||||
rte_wmb();
|
rte_wmb();
|
||||||
/* Disable datapath on secondary process. */
|
/* Disable datapath on secondary process. */
|
||||||
hns3_mp_req_stop_rxtx(eth_dev);
|
hns3_mp_req_stop_rxtx(dev);
|
||||||
/* Prevent crashes when queues are still in use. */
|
/* Prevent crashes when queues are still in use. */
|
||||||
rte_delay_ms(hw->tqps_num);
|
rte_delay_ms(hw->tqps_num);
|
||||||
|
|
||||||
@ -1246,8 +1309,10 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
|
|||||||
hns3_dev_release_mbufs(hns);
|
hns3_dev_release_mbufs(hns);
|
||||||
hw->adapter_state = HNS3_NIC_CONFIGURED;
|
hw->adapter_state = HNS3_NIC_CONFIGURED;
|
||||||
}
|
}
|
||||||
rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
|
rte_eal_alarm_cancel(hns3vf_service_handler, dev);
|
||||||
rte_spinlock_unlock(&hw->lock);
|
rte_spinlock_unlock(&hw->lock);
|
||||||
|
|
||||||
|
hns3vf_unmap_rx_interrupt(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1329,15 +1394,84 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
hns3vf_dev_start(struct rte_eth_dev *eth_dev)
|
hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
|
||||||
{
|
{
|
||||||
struct hns3_adapter *hns = eth_dev->data->dev_private;
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||||
struct hns3_hw *hw = &hns->hw;
|
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||||
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
uint32_t intr_vector;
|
||||||
|
uint8_t base = 0;
|
||||||
|
uint8_t vec = 0;
|
||||||
|
uint16_t q_id;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (dev->data->dev_conf.intr_conf.rxq == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* disable uio/vfio intr/eventfd mapping */
|
||||||
|
rte_intr_disable(intr_handle);
|
||||||
|
|
||||||
|
/* check and configure queue intr-vector mapping */
|
||||||
|
if (rte_intr_cap_multiple(intr_handle) ||
|
||||||
|
!RTE_ETH_DEV_SRIOV(dev).active) {
|
||||||
|
intr_vector = dev->data->nb_rx_queues;
|
||||||
|
/* It creates event fd for each intr vector when MSIX is used */
|
||||||
|
if (rte_intr_efd_enable(intr_handle, intr_vector))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
|
||||||
|
intr_handle->intr_vec =
|
||||||
|
rte_zmalloc("intr_vec",
|
||||||
|
dev->data->nb_rx_queues * sizeof(int), 0);
|
||||||
|
if (intr_handle->intr_vec == NULL) {
|
||||||
|
hns3_err(hw, "Failed to allocate %d rx_queues"
|
||||||
|
" intr_vec", dev->data->nb_rx_queues);
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto vf_alloc_intr_vec_error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rte_intr_allow_others(intr_handle)) {
|
||||||
|
vec = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
base = RTE_INTR_VEC_RXTX_OFFSET;
|
||||||
|
}
|
||||||
|
if (rte_intr_dp_is_en(intr_handle)) {
|
||||||
|
for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
|
||||||
|
ret = hns3vf_bind_ring_with_vector(dev, vec, true,
|
||||||
|
q_id);
|
||||||
|
if (ret)
|
||||||
|
goto vf_bind_vector_error;
|
||||||
|
intr_handle->intr_vec[q_id] = vec;
|
||||||
|
if (vec < base + intr_handle->nb_efd - 1)
|
||||||
|
vec++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rte_intr_enable(intr_handle);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
vf_bind_vector_error:
|
||||||
|
rte_intr_efd_disable(intr_handle);
|
||||||
|
if (intr_handle->intr_vec) {
|
||||||
|
free(intr_handle->intr_vec);
|
||||||
|
intr_handle->intr_vec = NULL;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
vf_alloc_intr_vec_error:
|
||||||
|
rte_intr_efd_disable(intr_handle);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
hns3vf_dev_start(struct rte_eth_dev *dev)
|
||||||
|
{
|
||||||
|
struct hns3_adapter *hns = dev->data->dev_private;
|
||||||
|
struct hns3_hw *hw = &hns->hw;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
PMD_INIT_FUNC_TRACE();
|
PMD_INIT_FUNC_TRACE();
|
||||||
if (rte_atomic16_read(&hw->reset.resetting))
|
if (rte_atomic16_read(&hw->reset.resetting))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
rte_spinlock_lock(&hw->lock);
|
rte_spinlock_lock(&hw->lock);
|
||||||
hw->adapter_state = HNS3_NIC_STARTING;
|
hw->adapter_state = HNS3_NIC_STARTING;
|
||||||
ret = hns3vf_do_start(hns, true);
|
ret = hns3vf_do_start(hns, true);
|
||||||
@ -1348,11 +1482,14 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev)
|
|||||||
}
|
}
|
||||||
hw->adapter_state = HNS3_NIC_STARTED;
|
hw->adapter_state = HNS3_NIC_STARTED;
|
||||||
rte_spinlock_unlock(&hw->lock);
|
rte_spinlock_unlock(&hw->lock);
|
||||||
hns3_set_rxtx_function(eth_dev);
|
|
||||||
hns3_mp_req_start_rxtx(eth_dev);
|
ret = hns3vf_map_rx_interrupt(dev);
|
||||||
rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
|
if (ret)
|
||||||
eth_dev);
|
return ret;
|
||||||
return 0;
|
hns3_set_rxtx_function(dev);
|
||||||
|
hns3_mp_req_start_rxtx(dev);
|
||||||
|
rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -1685,6 +1822,8 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
|
|||||||
.tx_queue_setup = hns3_tx_queue_setup,
|
.tx_queue_setup = hns3_tx_queue_setup,
|
||||||
.rx_queue_release = hns3_dev_rx_queue_release,
|
.rx_queue_release = hns3_dev_rx_queue_release,
|
||||||
.tx_queue_release = hns3_dev_tx_queue_release,
|
.tx_queue_release = hns3_dev_tx_queue_release,
|
||||||
|
.rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
|
||||||
|
.rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
|
||||||
.dev_configure = hns3vf_dev_configure,
|
.dev_configure = hns3vf_dev_configure,
|
||||||
.mac_addr_add = hns3vf_add_mac_addr,
|
.mac_addr_add = hns3vf_add_mac_addr,
|
||||||
.mac_addr_remove = hns3vf_remove_mac_addr,
|
.mac_addr_remove = hns3vf_remove_mac_addr,
|
||||||
|
@ -104,6 +104,19 @@ struct hns3_mbx_pf_to_vf_cmd {
|
|||||||
uint16_t msg[8];
|
uint16_t msg[8];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct hns3_ring_chain_param {
|
||||||
|
uint8_t ring_type;
|
||||||
|
uint8_t tqp_index;
|
||||||
|
uint8_t int_gl_index;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4
|
||||||
|
struct hns3_vf_bind_vector_msg {
|
||||||
|
uint8_t vector_id;
|
||||||
|
uint8_t ring_num;
|
||||||
|
struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM];
|
||||||
|
};
|
||||||
|
|
||||||
struct hns3_vf_rst_cmd {
|
struct hns3_vf_rst_cmd {
|
||||||
uint8_t dest_vfid;
|
uint8_t dest_vfid;
|
||||||
uint8_t vf_rst;
|
uint8_t vf_rst;
|
||||||
|
@ -83,6 +83,9 @@
|
|||||||
|
|
||||||
#define HNS3_RING_EN_B 0
|
#define HNS3_RING_EN_B 0
|
||||||
|
|
||||||
|
#define HNS3_VECTOR_REG_OFFSET 0x4
|
||||||
|
#define HNS3_VECTOR_VF_OFFSET 0x100000
|
||||||
|
|
||||||
#define HNS3_TQP_REG_OFFSET 0x80000
|
#define HNS3_TQP_REG_OFFSET 0x80000
|
||||||
#define HNS3_TQP_REG_SIZE 0x200
|
#define HNS3_TQP_REG_SIZE 0x200
|
||||||
|
|
||||||
|
@ -395,6 +395,47 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
|
||||||
|
{
|
||||||
|
uint32_t addr, value;
|
||||||
|
|
||||||
|
addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
|
||||||
|
value = en ? 1 : 0;
|
||||||
|
|
||||||
|
hns3_write_dev(hw, addr, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
|
||||||
|
{
|
||||||
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||||
|
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||||
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
|
||||||
|
if (dev->data->dev_conf.intr_conf.rxq == 0)
|
||||||
|
return -ENOTSUP;
|
||||||
|
|
||||||
|
/* enable the vectors */
|
||||||
|
hns3_tqp_intr_enable(hw, queue_id, true);
|
||||||
|
|
||||||
|
return rte_intr_ack(intr_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
|
||||||
|
{
|
||||||
|
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
|
||||||
|
if (dev->data->dev_conf.intr_conf.rxq == 0)
|
||||||
|
return -ENOTSUP;
|
||||||
|
|
||||||
|
/* disable the vectors */
|
||||||
|
hns3_tqp_intr_enable(hw, queue_id, false);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
|
hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
|
||||||
{
|
{
|
||||||
|
@ -295,6 +295,8 @@ void hns3_dev_rx_queue_release(void *queue);
|
|||||||
void hns3_dev_tx_queue_release(void *queue);
|
void hns3_dev_tx_queue_release(void *queue);
|
||||||
void hns3_free_all_queues(struct rte_eth_dev *dev);
|
void hns3_free_all_queues(struct rte_eth_dev *dev);
|
||||||
int hns3_reset_all_queues(struct hns3_adapter *hns);
|
int hns3_reset_all_queues(struct hns3_adapter *hns);
|
||||||
|
int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
|
||||||
|
int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
|
||||||
int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
|
int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
|
||||||
int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
|
int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
|
||||||
void hns3_dev_release_mbufs(struct hns3_adapter *hns);
|
void hns3_dev_release_mbufs(struct hns3_adapter *hns);
|
||||||
@ -311,4 +313,5 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|||||||
uint16_t nb_pkts);
|
uint16_t nb_pkts);
|
||||||
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
|
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
|
||||||
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
|
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
|
||||||
|
void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
|
||||||
#endif /* _HNS3_RXTX_H_ */
|
#endif /* _HNS3_RXTX_H_ */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user