pf: Initialize pf_kpool mutexes earlier

There are some error paths in ioctl handlers that will call
pf_krule_free() before the rule's rpool.mtx field is initialized,
causing a panic with INVARIANTS enabled.

Fix the problem by introducing pf_krule_alloc() and initializing the
mutex there.  This does mean that the rule->krule and pool->kpool
conversion functions need to stop zeroing the input structure, but I
don't see a nicer way to handle this except perhaps by guarding the
mtx_destroy() with a mtx_initialized() check.

Constify some related functions while here and add a regression test
based on a syzkaller reproducer.

Reported by:	syzbot+77cd12872691d219c158@syzkaller.appspotmail.com
Reviewed by:	kp
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D34115
This commit is contained in:
Mark Johnston 2022-01-31 16:14:00 -05:00
parent b4cc5d63b6
commit 773e3a71b2
4 changed files with 47 additions and 21 deletions

View File

@ -169,7 +169,7 @@ pf_counter_u64_periodic(struct pf_counter_u64 *pfcu64)
} }
static inline u_int64_t static inline u_int64_t
pf_counter_u64_fetch(struct pf_counter_u64 *pfcu64) pf_counter_u64_fetch(const struct pf_counter_u64 *pfcu64)
{ {
struct pf_counter_u64_pcpu *pcpu; struct pf_counter_u64_pcpu *pcpu;
u_int64_t sum; u_int64_t sum;
@ -263,7 +263,7 @@ pf_counter_u64_add(struct pf_counter_u64 *pfcu64, uint32_t n)
} }
static inline u_int64_t static inline u_int64_t
pf_counter_u64_fetch(struct pf_counter_u64 *pfcu64) pf_counter_u64_fetch(const struct pf_counter_u64 *pfcu64)
{ {
return (counter_u64_fetch(pfcu64->counter)); return (counter_u64_fetch(pfcu64->counter));
@ -2155,6 +2155,7 @@ struct pf_kruleset *pf_find_kruleset(const char *);
struct pf_kruleset *pf_find_or_create_kruleset(const char *); struct pf_kruleset *pf_find_or_create_kruleset(const char *);
void pf_rs_initialize(void); void pf_rs_initialize(void);
struct pf_krule *pf_krule_alloc(void);
void pf_krule_free(struct pf_krule *); void pf_krule_free(struct pf_krule *);
#endif #endif

View File

@ -1512,6 +1512,16 @@ pf_altq_get_nth_active(u_int32_t n)
} }
#endif /* ALTQ */ #endif /* ALTQ */
struct pf_krule *
pf_krule_alloc(void)
{
struct pf_krule *rule;
rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
return (rule);
}
void void
pf_krule_free(struct pf_krule *rule) pf_krule_free(struct pf_krule *rule)
{ {
@ -1584,14 +1594,12 @@ pf_kpool_to_pool(const struct pf_kpool *kpool, struct pf_pool *pool)
pool->opts = kpool->opts; pool->opts = kpool->opts;
} }
static int static void
pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool) pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
{ {
_Static_assert(sizeof(pool->key) == sizeof(kpool->key), ""); _Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), ""); _Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
bzero(kpool, sizeof(*kpool));
bcopy(&pool->key, &kpool->key, sizeof(kpool->key)); bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter)); bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
@ -1599,12 +1607,10 @@ pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
kpool->proxy_port[0] = pool->proxy_port[0]; kpool->proxy_port[0] = pool->proxy_port[0];
kpool->proxy_port[1] = pool->proxy_port[1]; kpool->proxy_port[1] = pool->proxy_port[1];
kpool->opts = pool->opts; kpool->opts = pool->opts;
return (0);
} }
static void static void
pf_krule_to_rule(struct pf_krule *krule, struct pf_rule *rule) pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
{ {
bzero(rule, sizeof(*rule)); bzero(rule, sizeof(*rule));
@ -1727,8 +1733,6 @@ pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
if (ret != 0) if (ret != 0)
return (ret); return (ret);
bzero(krule, sizeof(*krule));
bcopy(&rule->src, &krule->src, sizeof(rule->src)); bcopy(&rule->src, &krule->src, sizeof(rule->src));
bcopy(&rule->dst, &krule->dst, sizeof(rule->dst)); bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
@ -1757,9 +1761,7 @@ pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
if (ret != 0) if (ret != 0)
return (ret); return (ret);
ret = pf_pool_to_kpool(&rule->rpool, &krule->rpool); pf_pool_to_kpool(&rule->rpool, &krule->rpool);
if (ret != 0)
return (ret);
/* Don't allow userspace to set evaulations, packets or bytes. */ /* Don't allow userspace to set evaulations, packets or bytes. */
/* kif, anchor, overload_tbl are not copied over. */ /* kif, anchor, overload_tbl are not copied over. */
@ -1862,8 +1864,6 @@ pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
int rs_num; int rs_num;
int error = 0; int error = 0;
mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) { if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
error = EINVAL; error = EINVAL;
goto errout_unlocked; goto errout_unlocked;
@ -2357,7 +2357,7 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
if (! nvlist_exists_nvlist(nvl, "rule")) if (! nvlist_exists_nvlist(nvl, "rule"))
ERROUT(EINVAL); ERROUT(EINVAL);
rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO); rule = pf_krule_alloc();
error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"), error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
rule); rule);
if (error) if (error)
@ -2390,10 +2390,10 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
struct pfioc_rule *pr = (struct pfioc_rule *)addr; struct pfioc_rule *pr = (struct pfioc_rule *)addr;
struct pf_krule *rule; struct pf_krule *rule;
rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK | M_ZERO); rule = pf_krule_alloc();
error = pf_rule_to_krule(&pr->rule, rule); error = pf_rule_to_krule(&pr->rule, rule);
if (error != 0) { if (error != 0) {
free(rule, M_PFRULE); pf_krule_free(rule);
break; break;
} }
@ -2647,7 +2647,7 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
} }
if (pcr->action != PF_CHANGE_REMOVE) { if (pcr->action != PF_CHANGE_REMOVE) {
newrule = malloc(sizeof(*newrule), M_PFRULE, M_WAITOK | M_ZERO); newrule = pf_krule_alloc();
error = pf_rule_to_krule(&pcr->rule, newrule); error = pf_rule_to_krule(&pcr->rule, newrule);
if (error != 0) { if (error != 0) {
free(newrule, M_PFRULE); free(newrule, M_PFRULE);

View File

@ -223,8 +223,6 @@ pf_nvpool_to_pool(const nvlist_t *nvl, struct pf_kpool *kpool)
{ {
int error = 0; int error = 0;
bzero(kpool, sizeof(*kpool));
PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key))); PFNV_CHK(pf_nvbinary(nvl, "key", &kpool->key, sizeof(kpool->key)));
if (nvlist_exists_nvlist(nvl, "counter")) { if (nvlist_exists_nvlist(nvl, "counter")) {

View File

@ -869,6 +869,32 @@ ATF_TC_CLEANUP(rpool_mtx, tc)
COMMON_CLEANUP(); COMMON_CLEANUP();
} }
ATF_TC_WITH_CLEANUP(rpool_mtx2);
ATF_TC_HEAD(rpool_mtx2, tc)
{
atf_tc_set_md_var(tc, "require.user", "root");
}
ATF_TC_BODY(rpool_mtx2, tc)
{
struct pfioc_rule rule;
COMMON_HEAD();
memset(&rule, 0, sizeof(rule));
rule.pool_ticket = 1000000;
rule.action = PF_CHANGE_ADD_HEAD;
rule.rule.af = AF_INET;
ioctl(dev, DIOCCHANGERULE, &rule);
}
ATF_TC_CLEANUP(rpool_mtx2, tc)
{
COMMON_CLEANUP();
}
ATF_TP_ADD_TCS(tp) ATF_TP_ADD_TCS(tp)
{ {
@ -893,6 +919,7 @@ ATF_TP_ADD_TCS(tp)
ATF_TP_ADD_TC(tp, getsrcnodes); ATF_TP_ADD_TC(tp, getsrcnodes);
ATF_TP_ADD_TC(tp, tag); ATF_TP_ADD_TC(tp, tag);
ATF_TP_ADD_TC(tp, rpool_mtx); ATF_TP_ADD_TC(tp, rpool_mtx);
ATF_TP_ADD_TC(tp, rpool_mtx2);
return (atf_no_error()); return (atf_no_error());
} }