mlx5en: Statically allocate and free the channel structure(s).

By allocating the worst case size channel structure array
at attach time we can eliminate various NULL checks in the
fast path. And also reduce the chance for use-after-free
issues in the transmit fast path.

This change is also a requirement for implementing
backpressure support.

Submitted by:   hselasky@
Approved by:    hselasky (mentor)
MFC after:      1 week
Sponsored by:   Mellanox Technologies
This commit is contained in:
Slava Shwartsman 2018-12-05 14:23:31 +00:00
parent b3cf149325
commit 3230c29d72
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=341583
5 changed files with 58 additions and 109 deletions

View File

@ -596,7 +596,7 @@ struct mlx5e_sq {
#define MLX5E_CEV_STATE_INITIAL 0 /* timer not started */
#define MLX5E_CEV_STATE_SEND_NOPS 1 /* send NOPs */
#define MLX5E_CEV_STATE_HOLD_NOPS 2 /* don't send NOPs yet */
u16 stopped; /* set if SQ is stopped */
u16 running; /* set if SQ is running */
struct callout cev_callout;
union {
u32 d32[2];
@ -769,7 +769,6 @@ struct mlx5e_priv {
u32 tdn;
struct mlx5_core_mr mr;
struct mlx5e_channel *volatile *channel;
u32 tisn[MLX5E_MAX_TX_NUM_TC];
u32 rqtn;
u32 tirn[MLX5E_NUM_TT];
@ -814,6 +813,8 @@ struct mlx5e_priv {
int clbr_curr;
struct mlx5e_clbr_point clbr_points[2];
u_int clbr_gen;
struct mlx5e_channel channel[];
};
#define MLX5E_NET_IP_ALIGN 2

View File

@ -1035,7 +1035,7 @@ mlx5e_ethtool_debug_channel_info(SYSCTL_HANDLER_ARGS)
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
goto out;
for (i = 0; i < priv->params.num_channels; i++) {
c = priv->channel[i];
c = &priv->channel[i];
rq = &c->rq;
sbuf_printf(&sb, "channel %d rq %d cq %d\n",
c->ix, rq->rqn, rq->cq.mcq.cqn);

View File

@ -473,7 +473,6 @@ mlx5e_update_stats_work(struct work_struct *work)
update_stats_work);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_vport_stats *s = &priv->stats.vport;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
struct buf_ring *sq_br;
#if (__FreeBSD_version < 1100000)
@ -507,9 +506,9 @@ mlx5e_update_stats_work(struct work_struct *work)
/* Collect firts the SW counters and then HW for consistency */
for (i = 0; i < priv->params.num_channels; i++) {
struct mlx5e_rq *rq = &priv->channel[i]->rq;
rq_stats = &priv->channel[i]->rq.stats;
struct mlx5e_channel *pch = priv->channel + i;
struct mlx5e_rq *rq = &pch->rq;
struct mlx5e_rq_stats *rq_stats = &pch->rq.stats;
/* collect stats from LRO */
rq_stats->sw_lro_queued = rq->lro.lro_queued;
@ -522,8 +521,8 @@ mlx5e_update_stats_work(struct work_struct *work)
rx_wqe_err += rq_stats->wqe_err;
for (j = 0; j < priv->num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
sq_br = priv->channel[i]->sq[j].br;
sq_stats = &pch->sq[j].stats;
sq_br = pch->sq[j].br;
tso_packets += sq_stats->tso_packets;
tso_bytes += sq_stats->tso_bytes;
@ -1202,7 +1201,7 @@ mlx5e_refresh_sq_inline(struct mlx5e_priv *priv)
return;
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_refresh_sq_inline_sub(priv, priv->channel[i]);
mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]);
}
static int
@ -1388,6 +1387,8 @@ mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_disable_sq;
WRITE_ONCE(sq->running, 1);
return (0);
err_disable_sq:
@ -1462,19 +1463,20 @@ mlx5e_drain_sq(struct mlx5e_sq *sq)
/*
* Check if already stopped.
*
* NOTE: The "stopped" variable is only written when both the
* priv's configuration lock and the SQ's lock is locked. It
* can therefore safely be read when only one of the two locks
* is locked. This function is always called when the priv's
* configuration lock is locked.
* NOTE: Serialization of this function is managed by the
* caller ensuring the priv's state lock is locked or in case
* of rate limit support, a single thread manages drain and
* resume of SQs. The "running" variable can therefore safely
* be read without any locks.
*/
if (sq->stopped != 0)
if (READ_ONCE(sq->running) == 0)
return;
mtx_lock(&sq->lock);
/* don't put more packets into the SQ */
sq->stopped = 1;
WRITE_ONCE(sq->running, 0);
/* serialize access to DMA rings */
mtx_lock(&sq->lock);
/* teardown event factor timer, if any */
sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
@ -1768,15 +1770,14 @@ mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
static int
mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel *volatile *cp)
struct mlx5e_channel *c)
{
struct mlx5e_channel *c;
int err;
c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
memset(c, 0, sizeof(*c));
c->priv = priv;
c->ix = ix;
c->cpu = 0;
c->ifp = priv->ifp;
c->mkey_be = cpu_to_be32(priv->mr.key);
c->num_tc = priv->num_tc;
@ -1803,9 +1804,6 @@ mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_sqs;
/* store channel pointer */
*cp = c;
/* poll receive queue initially */
c->rq.cq.mcq.comp(&c->rq.cq.mcq);
@ -1823,39 +1821,24 @@ mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
err_free:
/* destroy mutexes */
mlx5e_chan_mtx_destroy(c);
free(c, M_MLX5EN);
return (err);
}
static void
mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
mlx5e_close_channel(struct mlx5e_channel *c)
{
struct mlx5e_channel *c = *pp;
/* check if channel is already closed */
if (c == NULL)
return;
mlx5e_close_rq(&c->rq);
}
static void
mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
mlx5e_close_channel_wait(struct mlx5e_channel *c)
{
struct mlx5e_channel *c = *pp;
/* check if channel is already closed */
if (c == NULL)
return;
/* ensure channel pointer is no longer used */
*pp = NULL;
mlx5e_close_rq_wait(&c->rq);
mlx5e_close_sqs_wait(c);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
/* destroy mutexes */
mlx5e_chan_mtx_destroy(c);
free(c, M_MLX5EN);
}
static int
@ -2012,14 +1995,10 @@ static int
mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
void *ptr;
int err;
int i;
int j;
priv->channel = malloc(priv->params.num_channels *
sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
mlx5e_build_channel_param(priv, &cparam);
for (i = 0; i < priv->params.num_channels; i++) {
err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
@ -2028,7 +2007,7 @@ mlx5e_open_channels(struct mlx5e_priv *priv)
}
for (j = 0; j < priv->params.num_channels; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq);
if (err)
goto err_close_channels;
}
@ -2036,39 +2015,22 @@ mlx5e_open_channels(struct mlx5e_priv *priv)
return (0);
err_close_channels:
for (i--; i >= 0; i--) {
while (i--) {
mlx5e_close_channel(&priv->channel[i]);
mlx5e_close_channel_wait(&priv->channel[i]);
}
/* remove "volatile" attribute from "channel" pointer */
ptr = __DECONST(void *, priv->channel);
priv->channel = NULL;
free(ptr, M_MLX5EN);
return (err);
}
static void
mlx5e_close_channels(struct mlx5e_priv *priv)
{
void *ptr;
int i;
if (priv->channel == NULL)
return;
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(&priv->channel[i]);
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel_wait(&priv->channel[i]);
/* remove "volatile" attribute from "channel" pointer */
ptr = __DECONST(void *, priv->channel);
priv->channel = NULL;
free(ptr, M_MLX5EN);
}
static int
@ -2134,9 +2096,6 @@ mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *
int err;
int i;
if (c == NULL)
return (EINVAL);
err = mlx5e_refresh_rq_params(priv, &c->rq);
if (err)
goto done;
@ -2155,13 +2114,14 @@ mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
{
int i;
if (priv->channel == NULL)
/* check if channels are closed */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return (EINVAL);
for (i = 0; i < priv->params.num_channels; i++) {
int err;
err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]);
err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]);
if (err)
return (err);
}
@ -2255,7 +2215,7 @@ mlx5e_open_rqt(struct mlx5e_priv *priv)
/* apply receive side scaling stride, if any */
ix -= ix % (int)priv->params.channels_rsss;
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn);
}
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
@ -2322,7 +2282,7 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn,
priv->channel[0]->rq.rqn);
priv->channel[0].rq.rqn);
break;
default:
MLX5_SET(tirc, tirc, disp_type,
@ -3253,7 +3213,7 @@ mlx5e_resume_sq(struct mlx5e_sq *sq)
int err;
/* check if already enabled */
if (sq->stopped == 0)
if (READ_ONCE(sq->running) != 0)
return;
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
@ -3276,11 +3236,8 @@ mlx5e_resume_sq(struct mlx5e_sq *sq)
"mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
}
mtx_lock(&sq->lock);
sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
sq->stopped = 0;
mtx_unlock(&sq->lock);
WRITE_ONCE(sq->running, 1);
}
static void
@ -3351,18 +3308,14 @@ mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
{
int i;
if (priv->channel == NULL)
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return;
for (i = 0; i < priv->params.num_channels; i++) {
if (!priv->channel[i])
continue;
if (value)
mlx5e_disable_tx_dma(priv->channel[i]);
mlx5e_disable_tx_dma(&priv->channel[i]);
else
mlx5e_enable_tx_dma(priv->channel[i]);
mlx5e_enable_tx_dma(&priv->channel[i]);
}
}
@ -3371,18 +3324,14 @@ mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
{
int i;
if (priv->channel == NULL)
if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
return;
for (i = 0; i < priv->params.num_channels; i++) {
if (!priv->channel[i])
continue;
if (value)
mlx5e_disable_rx_dma(priv->channel[i]);
mlx5e_disable_rx_dma(&priv->channel[i]);
else
mlx5e_enable_rx_dma(priv->channel[i]);
mlx5e_enable_rx_dma(&priv->channel[i]);
}
}
@ -3587,7 +3536,13 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
return (NULL);
}
priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
/*
* Try to allocate the priv and make room for worst-case
* number of channel structures:
*/
priv = malloc(sizeof(*priv) +
(sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
M_MLX5EN, M_WAITOK | M_ZERO);
mlx5e_priv_mtx_init(priv);
ifp = priv->ifp = if_alloc(IFT_ETHER);

View File

@ -458,9 +458,9 @@ mlx5e_rlw_channel_set_rate_locked(struct mlx5e_rl_worker *rlw,
howmany(rate, 1000), burst);
}
/* set new rate, if SQ is not stopped */
/* set new rate, if SQ is running */
sq = channel->sq;
if (sq != NULL && sq->stopped == 0) {
if (sq != NULL && READ_ONCE(sq->running) != 0) {
error = mlx5e_rl_modify_sq(sq, index);
if (error != 0)
atomic_add_64(&rlw->priv->rl.stats.tx_modify_rate_failure, 1ULL);

View File

@ -81,17 +81,10 @@ static struct mlx5e_sq *
mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
{
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5e_channel * volatile *ppch;
struct mlx5e_channel *pch;
struct mlx5e_sq *sq;
u32 ch;
u32 tc;
ppch = priv->channel;
/* check if channels are successfully opened */
if (unlikely(ppch == NULL))
return (NULL);
/* obtain VLAN information if present */
if (mb->m_flags & M_VLANTAG) {
tc = (mb->m_pkthdr.ether_vtag >> 13);
@ -116,7 +109,7 @@ mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
struct mlx5e_rl_channel, m_snd_tag)->sq;
/* check if valid */
if (sq != NULL && sq->stopped == 0)
if (sq != NULL && sq->running != 0)
return (sq);
/* FALLTHROUGH */
@ -146,10 +139,10 @@ mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
#endif
}
/* check if channel is allocated and not stopped */
pch = ppch[ch];
if (likely(pch != NULL && pch->sq[tc].stopped == 0))
return (&pch->sq[tc]);
/* check if send queue is running */
sq = &priv->channel[ch].sq[tc];
if (likely(READ_ONCE(sq->running) != 0))
return (sq);
return (NULL);
}
@ -552,7 +545,7 @@ mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
int err = 0;
if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
sq->stopped != 0)) {
READ_ONCE(sq->running) == 0)) {
m_freem(mb);
return (ENETDOWN);
}