cxgbe/iw_cxgbe: Implement sq/rq drain operation.

ULPs can set a qp's state to ERROR and then post a work request on the
sq and/or rq.  When the reply for that work request comes back it is
guaranteed that all previous work requests posted on that queue have
been drained.

Obtained from:	Chelsio Communications
MFC after:	3 days
Sponsored by:	Chelsio Communications
This commit is contained in:
Navdeep Parhar 2017-03-03 03:07:54 +00:00
parent baf4abfc39
commit 401032c67d
4 changed files with 86 additions and 3 deletions

View File

@ -449,6 +449,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
goto skip_cqe; goto skip_cqe;
} }
/*
* Special cqe for drain WR completions...
*/
if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
*cookie = CQE_DRAIN_COOKIE(hw_cqe);
*cqe = *hw_cqe;
goto skip_cqe;
}
/* /*
* Gotta tweak READ completions: * Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr. * 1) the cqe doesn't contain the sq_wptr from the wr.
@ -665,6 +674,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
case FW_RI_FAST_REGISTER: case FW_RI_FAST_REGISTER:
wc->opcode = IB_WC_FAST_REG_MR; wc->opcode = IB_WC_FAST_REG_MR;
break; break;
case C4IW_DRAIN_OPCODE:
wc->opcode = IB_WC_SEND;
break;
default: default:
printf("Unexpected opcode %d " printf("Unexpected opcode %d "
"in the CQE received for QPID = 0x%0x\n", "in the CQE received for QPID = 0x%0x\n",

View File

@ -559,6 +559,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR; return IB_QPS_ERR;
} }
#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
static inline u32 c4iw_ib_to_tpt_access(int a) static inline u32 c4iw_ib_to_tpt_access(int a)
{ {
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |

View File

@ -577,6 +577,66 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait)); wake_up(&(to_c4iw_qp(qp)->wait));
} }
static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *schp;
unsigned long flag;
struct t4_cq *cq;
schp = to_c4iw_cq(qhp->ibqp.send_cq);
cq = &schp->cq;
PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
V_CQE_TYPE(1) |
V_CQE_SWCQE(1) |
V_CQE_QPID(qhp->wq.sq.qid));
spin_lock_irqsave(&schp->lock, flag);
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
spin_unlock_irqrestore(&schp->lock, flag);
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *rchp;
unsigned long flag;
struct t4_cq *cq;
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
cq = &rchp->cq;
PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
V_CQE_TYPE(0) |
V_CQE_SWCQE(1) |
V_CQE_QPID(qhp->wq.sq.qid));
spin_lock_irqsave(&rchp->lock, flag);
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
spin_unlock_irqrestore(&rchp->lock, flag);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
@ -595,7 +655,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag); spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) { if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
return -EINVAL; complete_sq_drain_wr(qhp, wr);
return err;
} }
num_wrs = t4_sq_avail(&qhp->wq); num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) { if (num_wrs == 0) {
@ -708,7 +769,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag); spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) { if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
return -EINVAL; complete_rq_drain_wr(qhp, wr);
return err;
} }
num_wrs = t4_rq_avail(&qhp->wq); num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) { if (num_wrs == 0) {
@ -1303,7 +1365,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
} }
break; break;
case C4IW_QP_STATE_CLOSING: case C4IW_QP_STATE_CLOSING:
if (!internal) {
/*
* Allow kernel users to move to ERROR for qp draining.
*/
if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
C4IW_QP_STATE_ERROR)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }

View File

@ -203,6 +203,7 @@ struct t4_cqe {
__be32 wrid_hi; __be32 wrid_hi;
__be32 wrid_low; __be32 wrid_low;
} gen; } gen;
u64 drain_cookie;
} u; } u;
__be64 reserved; __be64 reserved;
__be64 bits_type_ts; __be64 bits_type_ts;
@ -261,6 +262,7 @@ struct t4_cqe {
/* generic accessor macros */ /* generic accessor macros */
#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
#define CQE_DRAIN_COOKIE(x) (x)->u.drain_cookie;
/* macros for flit 3 of the cqe */ /* macros for flit 3 of the cqe */
#define S_CQE_GENBIT 63 #define S_CQE_GENBIT 63