Cover a race between doselwakeup() and selfdfree(). If doselwakeup()

loop finds the selfd entry and clears its sf_si pointer, which is
handled by selfdfree() in parallel, NULL sf_si makes selfdfree() free
the memory.  The result is the race and accesses to the freed memory.

Refcount the selfd ownership.  One reference is for the sf_link
linkage, which is unconditionally dereferenced by selfdfree().
Another reference is for sf_threads, both selfdfree() and
doselwakeup() race to deref it, the winner unlinks and than frees the
selfd entry.

Reported by:	Larry Rosenman <ler@lerctr.org>
Tested by:	Larry Rosenman <ler@lerctr.org>, pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
kib 2015-07-09 09:22:21 +00:00
parent 6dc0345fb0
commit 58e696649e

View File

@ -153,6 +153,7 @@ struct selfd {
struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
struct seltd *sf_td; /* (k) owning seltd. */
void *sf_cookie; /* (k) fd or pollfd. */
u_int sf_refs;
};
static uma_zone_t selfd_zone;
@ -1685,10 +1686,13 @@ selfdfree(struct seltd *stp, struct selfd *sfp)
STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
if (sfp->sf_si != NULL) {
mtx_lock(sfp->sf_mtx);
if (sfp->sf_si != NULL)
if (sfp->sf_si != NULL) {
TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
refcount_release(&sfp->sf_refs);
}
mtx_unlock(sfp->sf_mtx);
}
if (refcount_release(&sfp->sf_refs))
uma_zfree(selfd_zone, sfp);
}
@ -1745,6 +1749,7 @@ selrecord(selector, sip)
*/
sfp->sf_si = sip;
sfp->sf_mtx = mtxp;
refcount_init(&sfp->sf_refs, 2);
STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
/*
* Now that we've locked the sip, check for initialization.
@ -1809,6 +1814,8 @@ doselwakeup(sip, pri)
stp->st_flags |= SELTD_PENDING;
cv_broadcastpri(&stp->st_wait, pri);
mtx_unlock(&stp->st_mtx);
if (refcount_release(&sfp->sf_refs))
uma_zfree(selfd_zone, sfp);
}
mtx_unlock(sip->si_mtx);
}