Factor out TCP rateset destruction code.

Ensure the epoch_call() function is not called more than one time
before the callback has been executed, by always checking the
RS_FUNERAL_SCHD flag before invoking epoch_call().

The "rs_number_dead" is balanced again after r353353.

Discussed with:	rrs@
Sponsored by:	Mellanox Technologies
This commit is contained in:
Hans Petter Selasky 2019-10-09 17:08:40 +00:00
parent 38c0ca1481
commit eabddb25a3

View File

@ -270,6 +270,23 @@ rs_destroy(epoch_context_t ctx)
}
}
static void
rs_defer_destroy(struct tcp_rate_set *rs)
{
mtx_assert(&rs_mtx, MA_OWNED);
/* Check if already pending. */
if (rs->rs_flags & RS_FUNERAL_SCHD)
return;
rs_number_dead++;
/* Set flag to only defer once. */
rs->rs_flags |= RS_FUNERAL_SCHD;
epoch_call(net_epoch, &rs->rs_epoch_ctx, rs_destroy);
}
#ifdef INET
extern counter_u64_t rate_limit_set_ok;
extern counter_u64_t rate_limit_active;
@ -989,7 +1006,6 @@ tcp_rl_ifnet_departure(void *arg __unused, struct ifnet *ifp)
(rs->rs_if_dunit == ifp->if_dunit)) {
CK_LIST_REMOVE(rs, next);
rs_number_alive--;
rs_number_dead++;
rs->rs_flags |= RS_IS_DEAD;
for (i = 0; i < rs->rs_rate_cnt; i++) {
if (rs->rs_rlt[i].flags & HDWRPACE_TAGPRESENT) {
@ -999,14 +1015,8 @@ tcp_rl_ifnet_departure(void *arg __unused, struct ifnet *ifp)
}
rs->rs_rlt[i].flags = HDWRPACE_IFPDEPARTED;
}
if (rs->rs_flows_using == 0) {
/*
* No references left, so we can schedule the
* destruction after the epoch (with a caveat).
*/
rs->rs_flags |= RS_FUNERAL_SCHD;
epoch_call(net_epoch, &rs->rs_epoch_ctx, rs_destroy);
}
if (rs->rs_flows_using == 0)
rs_defer_destroy(rs);
break;
}
}
@ -1024,7 +1034,6 @@ tcp_rl_shutdown(void *arg __unused, int howto __unused)
CK_LIST_FOREACH_SAFE(rs, &int_rs, next, nrs) {
CK_LIST_REMOVE(rs, next);
rs_number_alive--;
rs_number_dead++;
rs->rs_flags |= RS_IS_DEAD;
for (i = 0; i < rs->rs_rate_cnt; i++) {
if (rs->rs_rlt[i].flags & HDWRPACE_TAGPRESENT) {
@ -1034,20 +1043,8 @@ tcp_rl_shutdown(void *arg __unused, int howto __unused)
}
rs->rs_rlt[i].flags = HDWRPACE_IFPDEPARTED;
}
if (rs->rs_flows_using != 0) {
/*
* We dont hold a reference
* so we have nothing left to
* do.
*/
} else {
/*
* No references left, so we can destroy it
* after the epoch.
*/
rs->rs_flags |= RS_FUNERAL_SCHD;
epoch_call(net_epoch, &rs->rs_epoch_ctx, rs_destroy);
}
if (rs->rs_flows_using == 0)
rs_defer_destroy(rs);
}
mtx_unlock(&rs_mtx);
}
@ -1190,16 +1187,8 @@ tcp_rel_pacing_rate(const struct tcp_hwrate_limit_table *crte, struct tcpcb *tp)
/*
* Is it dead?
*/
if ((rs->rs_flags & RS_IS_DEAD) &&
((rs->rs_flags & RS_FUNERAL_SCHD) == 0)){
/*
* We were the last,
* and a funeral is not pending, so
* we must schedule it.
*/
rs->rs_flags |= RS_FUNERAL_SCHD;
epoch_call(net_epoch, &rs->rs_epoch_ctx, rs_destroy);
}
if (rs->rs_flags & RS_IS_DEAD)
rs_defer_destroy(rs);
mtx_unlock(&rs_mtx);
}
in_pcbdetach_txrtlmt(tp->t_inpcb);