Report EQE data upon CQ completion in mlx5core.
Report EQE data upon CQ completion to let upper layers use this data. Linux commit: 4e0e2ea1886afe8c001971ff767f6670312a9b04 MFC after: 1 week Sponsored by: Mellanox Technologies // NVIDIA Networking
This commit is contained in:
parent
ffdb195f31
commit
f34f0a65b2
@ -32,7 +32,7 @@
|
||||
#include <dev/mlx5/driver.h>
|
||||
#include <dev/mlx5/mlx5_ifc.h>
|
||||
|
||||
|
||||
struct mlx5_eqe;
|
||||
struct mlx5_core_cq {
|
||||
u32 cqn;
|
||||
int cqe_sz;
|
||||
@ -40,7 +40,7 @@ struct mlx5_core_cq {
|
||||
__be32 *arm_db;
|
||||
unsigned vector;
|
||||
int irqn;
|
||||
void (*comp) (struct mlx5_core_cq *);
|
||||
void (*comp) (struct mlx5_core_cq *, struct mlx5_eqe *);
|
||||
void (*event) (struct mlx5_core_cq *, int);
|
||||
struct mlx5_uar *uar;
|
||||
u32 cons_index;
|
||||
|
@ -1021,7 +1021,7 @@ void mlx5_unregister_debugfs(void);
|
||||
int mlx5_eq_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
|
||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
|
||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
|
||||
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
|
||||
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
|
||||
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
|
||||
|
@ -55,13 +55,16 @@ mlx5_cq_table_write_unlock(struct mlx5_cq_table *table)
|
||||
NET_EPOCH_WAIT();
|
||||
}
|
||||
|
||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
|
||||
void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
|
||||
{
|
||||
struct mlx5_cq_table *table = &dev->priv.cq_table;
|
||||
struct mlx5_core_cq *cq;
|
||||
struct epoch_tracker et;
|
||||
u32 cqn;
|
||||
bool do_lock;
|
||||
|
||||
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
|
||||
|
||||
NET_EPOCH_ENTER(et);
|
||||
|
||||
do_lock = atomic_read(&table->writercount) != 0;
|
||||
@ -78,7 +81,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
|
||||
|
||||
if (likely(cq != NULL)) {
|
||||
++cq->arm_sn;
|
||||
cq->comp(cq);
|
||||
cq->comp(cq, eqe);
|
||||
} else {
|
||||
mlx5_core_warn(dev,
|
||||
"Completion event for bogus CQ 0x%x\n", cqn);
|
||||
|
@ -246,8 +246,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||
eq->eqn, eqe_type_str(eqe->type));
|
||||
switch (eqe->type) {
|
||||
case MLX5_EVENT_TYPE_COMP:
|
||||
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
|
||||
mlx5_cq_completion(dev, cqn);
|
||||
mlx5_cq_completion(dev, eqe);
|
||||
break;
|
||||
|
||||
case MLX5_EVENT_TYPE_PATH_MIG:
|
||||
|
@ -149,7 +149,7 @@ MALLOC_DECLARE(M_MLX5EN);
|
||||
struct mlx5_core_dev;
|
||||
struct mlx5e_cq;
|
||||
|
||||
typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *);
|
||||
typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *, struct mlx5_eqe *);
|
||||
|
||||
#define mlx5_en_err(_dev, format, ...) \
|
||||
if_printf(_dev, "ERR: ""%s:%d:(pid %d): " format, \
|
||||
@ -1107,8 +1107,8 @@ int mlx5e_open_locked(struct ifnet *);
|
||||
int mlx5e_close_locked(struct ifnet *);
|
||||
|
||||
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
|
||||
void mlx5e_rx_cq_comp(struct mlx5_core_cq *);
|
||||
void mlx5e_tx_cq_comp(struct mlx5_core_cq *);
|
||||
mlx5e_cq_comp_t mlx5e_rx_cq_comp;
|
||||
mlx5e_cq_comp_t mlx5e_tx_cq_comp;
|
||||
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
|
||||
|
||||
void mlx5e_dim_work(struct work_struct *);
|
||||
|
@ -1898,7 +1898,7 @@ mlx5e_drain_sq(struct mlx5e_sq *sq)
|
||||
mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
mtx_unlock(&sq->lock);
|
||||
msleep(1);
|
||||
sq->cq.mcq.comp(&sq->cq.mcq);
|
||||
sq->cq.mcq.comp(&sq->cq.mcq, NULL);
|
||||
mtx_lock(&sq->lock);
|
||||
}
|
||||
mtx_unlock(&sq->lock);
|
||||
@ -1916,7 +1916,7 @@ mlx5e_drain_sq(struct mlx5e_sq *sq)
|
||||
mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
mtx_unlock(&sq->lock);
|
||||
msleep(1);
|
||||
sq->cq.mcq.comp(&sq->cq.mcq);
|
||||
sq->cq.mcq.comp(&sq->cq.mcq, NULL);
|
||||
mtx_lock(&sq->lock);
|
||||
}
|
||||
mtx_unlock(&sq->lock);
|
||||
@ -2229,7 +2229,7 @@ mlx5e_open_channel(struct mlx5e_priv *priv,
|
||||
|
||||
/* poll receive queue initially */
|
||||
NET_EPOCH_ENTER(et);
|
||||
c->rq.cq.mcq.comp(&c->rq.cq.mcq);
|
||||
c->rq.cq.mcq.comp(&c->rq.cq.mcq, NULL);
|
||||
NET_EPOCH_EXIT(et);
|
||||
|
||||
return (0);
|
||||
@ -3805,7 +3805,7 @@ mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
|
||||
while (!mlx5_wq_ll_is_empty(&rq->wq)) {
|
||||
msleep(1);
|
||||
NET_EPOCH_ENTER(et);
|
||||
rq->cq.mcq.comp(&rq->cq.mcq);
|
||||
rq->cq.mcq.comp(&rq->cq.mcq, NULL);
|
||||
NET_EPOCH_EXIT(et);
|
||||
}
|
||||
|
||||
@ -3838,7 +3838,7 @@ mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
|
||||
rq->enabled = 1;
|
||||
|
||||
NET_EPOCH_ENTER(et);
|
||||
rq->cq.mcq.comp(&rq->cq.mcq);
|
||||
rq->cq.mcq.comp(&rq->cq.mcq, NULL);
|
||||
NET_EPOCH_EXIT(et);
|
||||
}
|
||||
|
||||
|
@ -232,7 +232,7 @@ mlx5e_rl_open_channel(struct mlx5e_rl_worker *rlw, int eq_ix,
|
||||
*ppsq = sq;
|
||||
|
||||
/* poll TX queue initially */
|
||||
sq->cq.mcq.comp(&sq->cq.mcq);
|
||||
sq->cq.mcq.comp(&sq->cq.mcq, NULL);
|
||||
|
||||
return (0);
|
||||
|
||||
|
@ -537,7 +537,7 @@ wq_ll_pop:
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
|
||||
mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
|
||||
{
|
||||
struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
|
||||
int i = 0;
|
||||
|
@ -871,7 +871,7 @@ select_queue:
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
|
||||
mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
|
||||
{
|
||||
struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <rdma/ib_cache.h>
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
|
||||
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe __unused)
|
||||
{
|
||||
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
|
||||
|
||||
|
@ -2459,7 +2459,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
|
||||
* lock/unlock above locks Now need to arm all involved CQs.
|
||||
*/
|
||||
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
|
||||
mcq->comp(mcq);
|
||||
mcq->comp(mcq, NULL);
|
||||
}
|
||||
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user