cxgbe(4): Keep track of the clusters that have to be freed by the

custom free routine (rxb_free) in the driver.  Fail MOD_UNLOAD with
EBUSY if any such cluster has been handed up to the kernel but hasn't
been freed yet.  This prevents a panic later when the cluster finally
needs to be freed but rxb_free is gone from the kernel.

MFC after:	1 week
This commit is contained in:
np 2014-07-23 22:29:22 +00:00
parent 86c15d6913
commit 153afd26ad
3 changed files with 95 additions and 30 deletions

View File

@ -985,6 +985,8 @@ void t4_nm_intr(void *);
/* t4_sge.c */
void t4_sge_modload(void);
void t4_sge_modunload(void);
uint64_t t4_sge_extfree_refs(void);
void t4_init_sge_cpl_handlers(struct adapter *);
void t4_tweak_chip_settings(struct adapter *);
int t4_read_chip_settings(struct adapter *);

View File

@ -8259,6 +8259,9 @@ tweak_tunables(void)
t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
}
static struct sx mlu; /* mod load unload */
SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
static int
mod_event(module_t mod, int cmd, void *arg)
{
@ -8267,41 +8270,67 @@ mod_event(module_t mod, int cmd, void *arg)
switch (cmd) {
case MOD_LOAD:
if (atomic_fetchadd_int(&loaded, 1))
break;
t4_sge_modload();
sx_init(&t4_list_lock, "T4/T5 adapters");
SLIST_INIT(&t4_list);
sx_xlock(&mlu);
if (loaded++ == 0) {
t4_sge_modload();
sx_init(&t4_list_lock, "T4/T5 adapters");
SLIST_INIT(&t4_list);
#ifdef TCP_OFFLOAD
sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
SLIST_INIT(&t4_uld_list);
sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
SLIST_INIT(&t4_uld_list);
#endif
t4_tracer_modload();
tweak_tunables();
t4_tracer_modload();
tweak_tunables();
}
sx_xunlock(&mlu);
break;
case MOD_UNLOAD:
if (atomic_fetchadd_int(&loaded, -1) > 1)
break;
t4_tracer_modunload();
sx_xlock(&mlu);
if (--loaded == 0) {
int tries;
sx_slock(&t4_list_lock);
if (!SLIST_EMPTY(&t4_list)) {
rc = EBUSY;
sx_sunlock(&t4_list_lock);
goto done_unload;
}
#ifdef TCP_OFFLOAD
sx_slock(&t4_uld_list_lock);
if (!SLIST_EMPTY(&t4_uld_list)) {
rc = EBUSY;
sx_sunlock(&t4_uld_list_lock);
break;
}
sx_sunlock(&t4_uld_list_lock);
sx_destroy(&t4_uld_list_lock);
sx_slock(&t4_uld_list_lock);
if (!SLIST_EMPTY(&t4_uld_list)) {
rc = EBUSY;
sx_sunlock(&t4_uld_list_lock);
sx_sunlock(&t4_list_lock);
goto done_unload;
}
#endif
tries = 0;
while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
uprintf("%ju clusters with custom free routine "
"still is use.\n", t4_sge_extfree_refs());
pause("t4unload", 2 * hz);
}
#ifdef TCP_OFFLOAD
sx_sunlock(&t4_uld_list_lock);
#endif
sx_slock(&t4_list_lock);
if (!SLIST_EMPTY(&t4_list)) {
rc = EBUSY;
sx_sunlock(&t4_list_lock);
break;
if (t4_sge_extfree_refs() == 0) {
t4_tracer_modunload();
#ifdef TCP_OFFLOAD
sx_destroy(&t4_uld_list_lock);
#endif
sx_destroy(&t4_list_lock);
t4_sge_modunload();
loaded = 0;
} else {
rc = EBUSY;
loaded++; /* undo earlier decrement */
}
}
sx_sunlock(&t4_list_lock);
sx_destroy(&t4_list_lock);
done_unload:
sx_xunlock(&mlu);
break;
}

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/time.h>
#include <sys/sysctl.h>
#include <sys/smp.h>
#include <sys/counter.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
@ -258,6 +259,9 @@ static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
static counter_u64_t extfree_refs;
static counter_u64_t extfree_rels;
/*
* Called on MOD_LOAD. Validates and calculates the SGE tunables.
*/
@ -329,6 +333,30 @@ t4_sge_modload(void)
" using 0 instead.\n", cong_drop);
cong_drop = 0;
}
extfree_refs = counter_u64_alloc(M_WAITOK);
extfree_rels = counter_u64_alloc(M_WAITOK);
counter_u64_zero(extfree_refs);
counter_u64_zero(extfree_rels);
}
void
t4_sge_modunload(void)
{
counter_u64_free(extfree_refs);
counter_u64_free(extfree_rels);
}
uint64_t
t4_sge_extfree_refs(void)
{
uint64_t refs, rels;
rels = counter_u64_fetch(extfree_rels);
refs = counter_u64_fetch(extfree_refs);
return (refs - rels);
}
void
@ -1513,6 +1541,7 @@ rxb_free(struct mbuf *m, void *arg1, void *arg2)
caddr_t cl = arg2;
uma_zfree(zone, cl);
counter_u64_add(extfree_rels, 1);
}
/*
@ -1574,7 +1603,8 @@ get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int total, int flags)
fl->mbuf_inlined++;
m_extaddref(m, payload, padded_len, &clm->refcount, rxb_free,
swz->zone, sd->cl);
sd->nmbuf++;
if (sd->nmbuf++ == 0)
counter_u64_add(extfree_refs, 1);
} else {
@ -1591,7 +1621,8 @@ get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int total, int flags)
if (clm != NULL) {
m_extaddref(m, payload, padded_len, &clm->refcount,
rxb_free, swz->zone, sd->cl);
sd->nmbuf++;
if (sd->nmbuf++ == 0)
counter_u64_add(extfree_refs, 1);
} else {
m_cljset(m, sd->cl, swz->type);
sd->cl = NULL; /* consumed, not a recycle candidate */
@ -3280,6 +3311,7 @@ refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
fl->cl_recycled++;
counter_u64_add(extfree_rels, 1);
goto recycled;
}
sd->cl = NULL; /* gave up my reference */
@ -3381,9 +3413,11 @@ free_fl_sdesc(struct adapter *sc, struct sge_fl *fl)
cll = &sd->cll;
clm = cl_metadata(sc, fl, cll, sd->cl);
if (sd->nmbuf == 0 ||
(clm && atomic_fetchadd_int(&clm->refcount, -1) == 1)) {
if (sd->nmbuf == 0)
uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) {
uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
counter_u64_add(extfree_rels, 1);
}
sd->cl = NULL;
}