pf: Don't allocate per-table entry counters unless required.

pf by default does not do per-table address accounting unless the
"counters" keyword is specified in the corresponding pf.conf table
definition.  Yet, we always allocate 12 per-CPU counters per table.  For
large tables this carries a lot of overhead, so only allocate counters
when they will actually be used.

A further enhancement might be to use a dedicated UMA zone to allocate
counter arrays for table entries, since close to half of the structure
size comes from counter pointers.  A related issue is the cost of
zeroing counters, since counter_u64_zero() calls smp_rendezvous() on
some architectures.

Reported by:	loos, Jim Pingle <jimp@netgate.com>
Reviewed by:	kp
MFC after:	2 weeks
Sponsored by:	Rubicon Communications, LLC (Netgate)
Differential Revision:	https://reviews.freebsd.org/D24803
This commit is contained in:
Mark Johnston 2020-05-11 18:47:38 +00:00
parent cf1509179c
commit 21121f9bbe
2 changed files with 60 additions and 47 deletions

View File

@ -189,6 +189,7 @@ The
.Ar counters .Ar counters
flag enables per-address packet and byte counters which can be displayed with flag enables per-address packet and byte counters which can be displayed with
.Xr pfctl 8 . .Xr pfctl 8 .
Note that this feature carries significant memory overhead for large tables.
.El .El
.Pp .Pp
For example, For example,

View File

@ -144,9 +144,9 @@ static void pfr_mark_addrs(struct pfr_ktable *);
static struct pfr_kentry static struct pfr_kentry
*pfr_lookup_addr(struct pfr_ktable *, *pfr_lookup_addr(struct pfr_ktable *,
struct pfr_addr *, int); struct pfr_addr *, int);
static bool pfr_create_kentry_counter(struct pfr_kcounters *, static bool pfr_create_kentry_counter(struct pfr_kentry *, int,
int, int); int);
static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *); static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, bool);
static void pfr_destroy_kentries(struct pfr_kentryworkq *); static void pfr_destroy_kentries(struct pfr_kentryworkq *);
static void pfr_destroy_kentry_counter(struct pfr_kcounters *, static void pfr_destroy_kentry_counter(struct pfr_kcounters *,
int, int); int, int);
@ -155,8 +155,8 @@ static void pfr_insert_kentries(struct pfr_ktable *,
struct pfr_kentryworkq *, long); struct pfr_kentryworkq *, long);
static void pfr_remove_kentries(struct pfr_ktable *, static void pfr_remove_kentries(struct pfr_ktable *,
struct pfr_kentryworkq *); struct pfr_kentryworkq *);
static void pfr_clstats_kentries(struct pfr_kentryworkq *, long, static void pfr_clstats_kentries(struct pfr_ktable *,
int); struct pfr_kentryworkq *, long, int);
static void pfr_reset_feedback(struct pfr_addr *, int); static void pfr_reset_feedback(struct pfr_addr *, int);
static void pfr_prepare_network(union sockaddr_union *, int, int); static void pfr_prepare_network(union sockaddr_union *, int, int);
static int pfr_route_kentry(struct pfr_ktable *, static int pfr_route_kentry(struct pfr_ktable *,
@ -285,7 +285,8 @@ pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
ad->pfra_fback = PFR_FB_NONE; ad->pfra_fback = PFR_FB_NONE;
} }
if (p == NULL && q == NULL) { if (p == NULL && q == NULL) {
p = pfr_create_kentry(ad); p = pfr_create_kentry(ad,
(kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
if (p == NULL) if (p == NULL)
senderr(ENOMEM); senderr(ENOMEM);
if (pfr_route_kentry(tmpkt, p)) { if (pfr_route_kentry(tmpkt, p)) {
@ -451,7 +452,8 @@ pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
ad.pfra_fback = PFR_FB_DUPLICATE; ad.pfra_fback = PFR_FB_DUPLICATE;
goto _skip; goto _skip;
} }
p = pfr_create_kentry(&ad); p = pfr_create_kentry(&ad,
(kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
if (p == NULL) if (p == NULL)
senderr(ENOMEM); senderr(ENOMEM);
if (pfr_route_kentry(tmpkt, p)) { if (pfr_route_kentry(tmpkt, p)) {
@ -485,7 +487,7 @@ pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
if (!(flags & PFR_FLAG_DUMMY)) { if (!(flags & PFR_FLAG_DUMMY)) {
pfr_insert_kentries(kt, &addq, tzero); pfr_insert_kentries(kt, &addq, tzero);
pfr_remove_kentries(kt, &delq); pfr_remove_kentries(kt, &delq);
pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
} else } else
pfr_destroy_kentries(&addq); pfr_destroy_kentries(&addq);
if (nadd != NULL) if (nadd != NULL)
@ -623,7 +625,7 @@ pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
pfr_walktree, &w); pfr_walktree, &w);
if (!rv && (flags & PFR_FLAG_CLSTATS)) { if (!rv && (flags & PFR_FLAG_CLSTATS)) {
pfr_enqueue_addrs(kt, &workq, NULL, 0); pfr_enqueue_addrs(kt, &workq, NULL, 0);
pfr_clstats_kentries(&workq, tzero, 0); pfr_clstats_kentries(kt, &workq, tzero, 0);
} }
if (rv) if (rv)
return (rv); return (rv);
@ -671,7 +673,7 @@ pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
} }
if (!(flags & PFR_FLAG_DUMMY)) if (!(flags & PFR_FLAG_DUMMY))
pfr_clstats_kentries(&workq, 0, 0); pfr_clstats_kentries(kt, &workq, 0, 0);
if (nzero != NULL) if (nzero != NULL)
*nzero = xzero; *nzero = xzero;
return (0); return (0);
@ -784,31 +786,28 @@ pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
} }
static bool static bool
pfr_create_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op) pfr_create_kentry_counter(struct pfr_kentry *ke, int pfr_dir, int pfr_op)
{ {
kc->pfrkc_packets[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT); counter_u64_t c;
if (! kc->pfrkc_packets[pfr_dir][pfr_op])
c = counter_u64_alloc(M_NOWAIT);
if (c == NULL)
return (false); return (false);
ke->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op] = c;
kc->pfrkc_bytes[pfr_dir][pfr_op] = counter_u64_alloc(M_NOWAIT); c = counter_u64_alloc(M_NOWAIT);
if (! kc->pfrkc_bytes[pfr_dir][pfr_op]) { if (c == NULL)
/* Previous allocation will be freed through
* pfr_destroy_kentry() */
return (false); return (false);
} ke->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op] = c;
kc->pfrkc_tzero = 0;
return (true); return (true);
} }
static struct pfr_kentry * static struct pfr_kentry *
pfr_create_kentry(struct pfr_addr *ad) pfr_create_kentry(struct pfr_addr *ad, bool counters)
{ {
struct pfr_kentry *ke; struct pfr_kentry *ke;
int pfr_dir, pfr_op; int pfr_dir, pfr_op;
ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO); ke = uma_zalloc(V_pfr_kentry_z, M_NOWAIT | M_ZERO);
if (ke == NULL) if (ke == NULL)
return (NULL); return (NULL);
@ -819,14 +818,16 @@ pfr_create_kentry(struct pfr_addr *ad)
ke->pfrke_af = ad->pfra_af; ke->pfrke_af = ad->pfra_af;
ke->pfrke_net = ad->pfra_net; ke->pfrke_net = ad->pfra_net;
ke->pfrke_not = ad->pfra_not; ke->pfrke_not = ad->pfra_not;
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) ke->pfrke_counters.pfrkc_tzero = 0;
for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { if (counters)
if (! pfr_create_kentry_counter(&ke->pfrke_counters, for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir++)
pfr_dir, pfr_op)) { for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op++) {
pfr_destroy_kentry(ke); if (!pfr_create_kentry_counter(ke, pfr_dir,
return (NULL); pfr_op)) {
pfr_destroy_kentry(ke);
return (NULL);
}
} }
}
return (ke); return (ke);
} }
@ -844,8 +845,12 @@ pfr_destroy_kentries(struct pfr_kentryworkq *workq)
static void static void
pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op) pfr_destroy_kentry_counter(struct pfr_kcounters *kc, int pfr_dir, int pfr_op)
{ {
counter_u64_free(kc->pfrkc_packets[pfr_dir][pfr_op]); counter_u64_t c;
counter_u64_free(kc->pfrkc_bytes[pfr_dir][pfr_op]);
if ((c = kc->pfrkc_packets[pfr_dir][pfr_op]) != NULL)
counter_u64_free(c);
if ((c = kc->pfrkc_bytes[pfr_dir][pfr_op]) != NULL)
counter_u64_free(c);
} }
static void static void
@ -890,7 +895,7 @@ pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, long tzero)
p = pfr_lookup_addr(kt, ad, 1); p = pfr_lookup_addr(kt, ad, 1);
if (p != NULL) if (p != NULL)
return (0); return (0);
p = pfr_create_kentry(ad); p = pfr_create_kentry(ad, (kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
if (p == NULL) if (p == NULL)
return (ENOMEM); return (ENOMEM);
@ -930,22 +935,28 @@ pfr_clean_node_mask(struct pfr_ktable *kt,
} }
static void static void
pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) pfr_clear_kentry_counters(struct pfr_kentry *p, int pfr_dir, int pfr_op)
{
counter_u64_zero(p->pfrke_counters.pfrkc_packets[pfr_dir][pfr_op]);
counter_u64_zero(p->pfrke_counters.pfrkc_bytes[pfr_dir][pfr_op]);
}
static void
pfr_clstats_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
long tzero, int negchange)
{ {
struct pfr_kentry *p; struct pfr_kentry *p;
int pfr_dir, pfr_op; int pfr_dir, pfr_op;
SLIST_FOREACH(p, workq, pfrke_workq) { SLIST_FOREACH(p, workq, pfrke_workq) {
if (negchange) if (negchange)
p->pfrke_not = !p->pfrke_not; p->pfrke_not = !p->pfrke_not;
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { if ((kt->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0)
for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX; pfr_op ++) { for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir++)
counter_u64_zero(p->pfrke_counters. for (pfr_op = 0; pfr_op < PFR_OP_ADDR_MAX;
pfrkc_packets[pfr_dir][pfr_op]); pfr_op++)
counter_u64_zero(p->pfrke_counters. pfr_clear_kentry_counters(p, pfr_dir,
pfrkc_bytes[pfr_dir][pfr_op]); pfr_op);
}
}
p->pfrke_counters.pfrkc_tzero = tzero; p->pfrke_counters.pfrkc_tzero = tzero;
} }
} }
@ -1551,7 +1562,8 @@ pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
senderr(EINVAL); senderr(EINVAL);
if (pfr_lookup_addr(shadow, ad, 1) != NULL) if (pfr_lookup_addr(shadow, ad, 1) != NULL)
continue; continue;
p = pfr_create_kentry(ad); p = pfr_create_kentry(ad,
(shadow->pfrkt_flags & PFR_TFLAG_COUNTERS) != 0);
if (p == NULL) if (p == NULL)
senderr(ENOMEM); senderr(ENOMEM);
if (pfr_route_kentry(shadow, p)) { if (pfr_route_kentry(shadow, p)) {
@ -1707,7 +1719,7 @@ pfr_commit_ktable(struct pfr_ktable *kt, long tzero)
pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY); pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
pfr_insert_kentries(kt, &addq, tzero); pfr_insert_kentries(kt, &addq, tzero);
pfr_remove_kentries(kt, &delq); pfr_remove_kentries(kt, &delq);
pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); pfr_clstats_kentries(kt, &changeq, tzero, INVERT_NEG_FLAG);
pfr_destroy_kentries(&garbageq); pfr_destroy_kentries(&garbageq);
} else { } else {
/* kt cannot contain addresses */ /* kt cannot contain addresses */
@ -1888,7 +1900,7 @@ pfr_clstats_ktable(struct pfr_ktable *kt, long tzero, int recurse)
if (recurse) { if (recurse) {
pfr_enqueue_addrs(kt, &addrq, NULL, 0); pfr_enqueue_addrs(kt, &addrq, NULL, 0);
pfr_clstats_kentries(&addrq, tzero, 0); pfr_clstats_kentries(kt, &addrq, tzero, 0);
} }
for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) { for (pfr_dir = 0; pfr_dir < PFR_DIR_MAX; pfr_dir ++) {
for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) { for (pfr_op = 0; pfr_op < PFR_OP_TABLE_MAX; pfr_op ++) {