mana: Add RX fencing

RX fencing allows the driver to know that any prior change to the RQs has
finished, e.g. when the RQs are disabled/enabled or the hashkey/indirection
table are changed, RX fencing is required.

Remove the previous 'sleep' workaround and add the real support for
RX fencing as the PF driver supports the MANA_FENCE_RQ request now (any
old PF driver not supporting the request won't be used in production).

MFC after:	2 weeks
Sponsored by:	Microsoft
This commit is contained in:
Wei Hu 2022-01-13 01:47:44 -08:00
parent 4873653519
commit aa108bc7c5
2 changed files with 68 additions and 5 deletions

View File

@ -414,6 +414,8 @@ struct mana_rxq {
mana_handle_t rxobj;
struct completion fence_event;
struct mana_cq rx_cq;
struct ifnet *ndev;

View File

@ -1221,6 +1221,63 @@ mana_create_eq(struct mana_context *ac)
return err;
}
static int
mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
{
struct mana_fence_rq_resp resp = {};
struct mana_fence_rq_req req = {};
int err;
init_completion(&rxq->fence_event);
mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
sizeof(req), sizeof(resp));
req.wq_obj_handle = rxq->rxobj;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
rxq->rxq_idx, err);
return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
if (err || resp.hdr.status) {
if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
rxq->rxq_idx, err, resp.hdr.status);
if (!err)
err = EPROTO;
return err;
}
if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
rxq->rxq_idx);
return ETIMEDOUT;
}
return 0;
}
static void
mana_fence_rqs(struct mana_port_context *apc)
{
unsigned int rxq_idx;
struct mana_rxq *rxq;
int err;
for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
rxq = apc->rxqs[rxq_idx];
err = mana_fence_rq(apc, rxq);
/* In case of any error, use sleep instead. */
if (err)
gdma_msleep(100);
}
}
static int
mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
{
@ -1564,7 +1621,7 @@ mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
case CQE_RX_OBJECT_FENCE:
if_printf(ndev, "RX Fencing is unsupported\n");
complete(&rxq->fence_event);
return;
default:
@ -2368,6 +2425,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab)
{
uint32_t queue_idx;
int err;
int i;
if (update_tab) {
@ -2377,7 +2435,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
}
}
return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
if (err)
return err;
mana_fence_rqs(apc);
return 0;
}
static int
@ -2532,9 +2596,6 @@ mana_dealloc_queues(struct ifnet *ndev)
return err;
}
/* TODO: Implement RX fencing */
gdma_msleep(1000);
mana_destroy_vport(apc);
return 0;