From 401032c67d5b3bfd8b710cd0909190399604ce20 Mon Sep 17 00:00:00 2001 From: Navdeep Parhar Date: Fri, 3 Mar 2017 03:07:54 +0000 Subject: [PATCH] cxgbe/iw_cxgbe: Implement sq/rq drain operation. ULPs can set a qp's state to ERROR and then post a work request on the sq and/or rq. When the reply for that work request comes back it is guaranteed that all previous work requests posted on that queue have been drained. Obtained from: Chelsio Communications MFC after: 3 days Sponsored by: Chelsio Communications --- sys/dev/cxgbe/iw_cxgbe/cq.c | 12 +++++ sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h | 2 + sys/dev/cxgbe/iw_cxgbe/qp.c | 73 +++++++++++++++++++++++++++++-- sys/dev/cxgbe/iw_cxgbe/t4.h | 2 + 4 files changed, 86 insertions(+), 3 deletions(-) diff --git a/sys/dev/cxgbe/iw_cxgbe/cq.c b/sys/dev/cxgbe/iw_cxgbe/cq.c index 5c040e97377d..54eb026a19d5 100644 --- a/sys/dev/cxgbe/iw_cxgbe/cq.c +++ b/sys/dev/cxgbe/iw_cxgbe/cq.c @@ -449,6 +449,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, goto skip_cqe; } + /* + * Special cqe for drain WR completions... + */ + if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + *cookie = CQE_DRAIN_COOKIE(hw_cqe); + *cqe = *hw_cqe; + goto skip_cqe; + } + /* * Gotta tweak READ completions: * 1) the cqe doesn't contain the sq_wptr from the wr. @@ -665,6 +674,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) case FW_RI_FAST_REGISTER: wc->opcode = IB_WC_FAST_REG_MR; break; + case C4IW_DRAIN_OPCODE: + wc->opcode = IB_WC_SEND; + break; default: printf("Unexpected opcode %d " "in the CQE received for QPID = 0x%0x\n", diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h index 207135800e91..de27d5d171cf 100644 --- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h +++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h @@ -559,6 +559,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } +#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN + static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c index 4dc2fba70d8f..58a89c5af665 100644 --- a/sys/dev/cxgbe/iw_cxgbe/qp.c +++ b/sys/dev/cxgbe/iw_cxgbe/qp.c @@ -577,6 +577,66 @@ void c4iw_qp_rem_ref(struct ib_qp *qp) wake_up(&(to_c4iw_qp(qp)->wait)); } +static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +{ + struct t4_cqe cqe = {}; + struct c4iw_cq *schp; + unsigned long flag; + struct t4_cq *cq; + + schp = to_c4iw_cq(qhp->ibqp.send_cq); + cq = &schp->cq; + + PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid); + cqe.u.drain_cookie = wr->wr_id; + cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | + V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | + V_CQE_TYPE(1) | + V_CQE_SWCQE(1) | + V_CQE_QPID(qhp->wq.sq.qid)); + + spin_lock_irqsave(&schp->lock, flag); + cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); + spin_unlock_irqrestore(&schp->lock, flag); + + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); +} + +static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + struct t4_cqe cqe = {}; + struct c4iw_cq *rchp; + unsigned long flag; + struct t4_cq *cq; + + rchp = to_c4iw_cq(qhp->ibqp.recv_cq); + cq = &rchp->cq; + + PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid); + cqe.u.drain_cookie = wr->wr_id; + cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | + V_CQE_OPCODE(C4IW_DRAIN_OPCODE) | + V_CQE_TYPE(0) | + V_CQE_SWCQE(1) | + V_CQE_QPID(qhp->wq.sq.qid)); + + spin_lock_irqsave(&rchp->lock, flag); + cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); + spin_unlock_irqrestore(&rchp->lock, flag); + + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); +} + int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { @@ -595,7 +655,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); - return -EINVAL; + complete_sq_drain_wr(qhp, wr); + return err; } num_wrs = t4_sq_avail(&qhp->wq); if (num_wrs == 0) { @@ -708,7 +769,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); - return -EINVAL; + complete_rq_drain_wr(qhp, wr); + return err; } num_wrs = t4_rq_avail(&qhp->wq); if (num_wrs == 0) { @@ -1303,7 +1365,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, } break; case C4IW_QP_STATE_CLOSING: - if (!internal) { + + /* + * Allow kernel users to move to ERROR for qp draining. + */ + if (!internal && (qhp->ibqp.uobject || attrs->next_state != + C4IW_QP_STATE_ERROR)) { ret = -EINVAL; goto out; } diff --git a/sys/dev/cxgbe/iw_cxgbe/t4.h b/sys/dev/cxgbe/iw_cxgbe/t4.h index 4219fd058375..baab4d044bd4 100644 --- a/sys/dev/cxgbe/iw_cxgbe/t4.h +++ b/sys/dev/cxgbe/iw_cxgbe/t4.h @@ -203,6 +203,7 @@ struct t4_cqe { __be32 wrid_hi; __be32 wrid_low; } gen; + u64 drain_cookie; } u; __be64 reserved; __be64 bits_type_ts; @@ -261,6 +262,7 @@ struct t4_cqe { /* generic accessor macros */ #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) +#define CQE_DRAIN_COOKIE(x) (x)->u.drain_cookie; /* macros for flit 3 of the cqe */ #define S_CQE_GENBIT 63