pf: Change pf_krule counters to use counter_u64

This improves the cache behaviour of pf and results in improved
throughput.

MFC after:	2 weeks
Sponsored by:	Orange Business Services
Differential Revision:	https://reviews.freebsd.org/D27760
This commit is contained in:
Kristof Provost 2020-12-05 21:41:42 +01:00
parent c7bdafe2f1
commit c3adacdad4
5 changed files with 86 additions and 41 deletions

View File

@ -315,9 +315,9 @@ struct pf_krule {
TAILQ_ENTRY(pf_krule) entries;
struct pf_pool rpool;
u_int64_t evaluations;
u_int64_t packets[2];
u_int64_t bytes[2];
counter_u64_t evaluations;
counter_u64_t packets[2];
counter_u64_t bytes[2];
struct pfi_kif *kif;
struct pf_kanchor *anchor;

View File

@ -3537,7 +3537,7 @@ pf_test_rule(struct pf_krule **rm, struct pf_state **sm, int direction,
}
while (r != NULL) {
r->evaluations++;
counter_u64_add(r->evaluations, 1);
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != direction)
@ -3977,7 +3977,7 @@ pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kif *kif,
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
while (r != NULL) {
r->evaluations++;
counter_u64_add(r->evaluations, 1);
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != direction)
@ -6201,16 +6201,18 @@ pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *
if (action == PF_PASS || r->action == PF_DROP) {
dirndx = (dir == PF_OUT);
r->packets[dirndx]++;
r->bytes[dirndx] += pd.tot_len;
counter_u64_add(r->packets[dirndx], 1);
counter_u64_add(r->bytes[dirndx], pd.tot_len);
if (a != NULL) {
a->packets[dirndx]++;
a->bytes[dirndx] += pd.tot_len;
counter_u64_add(a->packets[dirndx], 1);
counter_u64_add(a->bytes[dirndx], pd.tot_len);
}
if (s != NULL) {
if (s->nat_rule.ptr != NULL) {
s->nat_rule.ptr->packets[dirndx]++;
s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
counter_u64_add(s->nat_rule.ptr->packets[dirndx],
1);
counter_u64_add(s->nat_rule.ptr->bytes[dirndx],
pd.tot_len);
}
if (s->src_node != NULL) {
counter_u64_add(s->src_node->packets[dirndx],
@ -6601,16 +6603,18 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
if (action == PF_PASS || r->action == PF_DROP) {
dirndx = (dir == PF_OUT);
r->packets[dirndx]++;
r->bytes[dirndx] += pd.tot_len;
counter_u64_add(r->packets[dirndx], 1);
counter_u64_add(r->bytes[dirndx], pd.tot_len);
if (a != NULL) {
a->packets[dirndx]++;
a->bytes[dirndx] += pd.tot_len;
counter_u64_add(a->packets[dirndx], 1);
counter_u64_add(a->bytes[dirndx], pd.tot_len);
}
if (s != NULL) {
if (s->nat_rule.ptr != NULL) {
s->nat_rule.ptr->packets[dirndx]++;
s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
counter_u64_add(s->nat_rule.ptr->packets[dirndx],
1);
counter_u64_add(s->nat_rule.ptr->bytes[dirndx],
pd.tot_len);
}
if (s->src_node != NULL) {
counter_u64_add(s->src_node->packets[dirndx],

View File

@ -283,6 +283,11 @@ pfattach_vnet(void)
V_pf_default_rule.nr = -1;
V_pf_default_rule.rtableid = -1;
V_pf_default_rule.evaluations = counter_u64_alloc(M_WAITOK);
for (int i = 0; i < 2; i++) {
V_pf_default_rule.packets[i] = counter_u64_alloc(M_WAITOK);
V_pf_default_rule.bytes[i] = counter_u64_alloc(M_WAITOK);
}
V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
@ -461,6 +466,11 @@ pf_free_rule(struct pf_krule *rule)
pfi_kif_unref(rule->kif);
pf_kanchor_remove(rule);
pf_empty_pool(&rule->rpool.list);
counter_u64_free(rule->evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_free(rule->packets[i]);
counter_u64_free(rule->bytes[i]);
}
counter_u64_free(rule->states_cur);
counter_u64_free(rule->states_tot);
counter_u64_free(rule->src_nodes);
@ -1453,10 +1463,10 @@ pf_krule_to_rule(const struct pf_krule *krule, struct pf_rule *rule)
bcopy(&krule->rpool, &rule->rpool, sizeof(krule->rpool));
rule->evaluations = krule->evaluations;
rule->evaluations = counter_u64_fetch(krule->evaluations);
for (int i = 0; i < 2; i++) {
rule->packets[i] = krule->packets[i];
rule->bytes[i] = krule->bytes[i];
rule->packets[i] = counter_u64_fetch(krule->packets[i]);
rule->bytes[i] = counter_u64_fetch(krule->bytes[i]);
}
/* kif, anchor, overload_tbl are not copied over. */
@ -1808,6 +1818,11 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
if (rule->ifname[0])
kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
rule->evaluations = counter_u64_alloc(M_WAITOK);
for (int i = 0; i < 2; i++) {
rule->packets[i] = counter_u64_alloc(M_WAITOK);
rule->bytes[i] = counter_u64_alloc(M_WAITOK);
}
rule->states_cur = counter_u64_alloc(M_WAITOK);
rule->states_tot = counter_u64_alloc(M_WAITOK);
rule->src_nodes = counter_u64_alloc(M_WAITOK);
@ -1920,8 +1935,11 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
}
rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
rule->evaluations = rule->packets[0] = rule->packets[1] =
rule->bytes[0] = rule->bytes[1] = 0;
counter_u64_zero(rule->evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_zero(rule->packets[i]);
counter_u64_zero(rule->bytes[i]);
}
TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
rule, entries);
ruleset->rules[rs_num].inactive.rcount++;
@ -1931,6 +1949,11 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
#undef ERROUT
DIOCADDRULE_error:
PF_RULES_WUNLOCK();
counter_u64_free(rule->evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_free(rule->packets[i]);
counter_u64_free(rule->bytes[i]);
}
counter_u64_free(rule->states_cur);
counter_u64_free(rule->states_tot);
counter_u64_free(rule->src_nodes);
@ -2016,9 +2039,11 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
pf_addr_copyout(&pr->rule.dst.addr);
if (pr->action == PF_GET_CLR_CNTR) {
rule->evaluations = 0;
rule->packets[0] = rule->packets[1] = 0;
rule->bytes[0] = rule->bytes[1] = 0;
counter_u64_zero(rule->evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_zero(rule->packets[i]);
counter_u64_zero(rule->bytes[i]);
}
counter_u64_zero(rule->states_tot);
}
PF_RULES_WUNLOCK();
@ -2062,6 +2087,13 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
if (newrule->ifname[0])
kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
newrule->evaluations = counter_u64_alloc(M_WAITOK);
for (int i = 0; i < 2; i++) {
newrule->packets[i] =
counter_u64_alloc(M_WAITOK);
newrule->bytes[i] =
counter_u64_alloc(M_WAITOK);
}
newrule->states_cur = counter_u64_alloc(M_WAITOK);
newrule->states_tot = counter_u64_alloc(M_WAITOK);
newrule->src_nodes = counter_u64_alloc(M_WAITOK);
@ -2174,9 +2206,6 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
}
newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
newrule->evaluations = 0;
newrule->packets[0] = newrule->packets[1] = 0;
newrule->bytes[0] = newrule->bytes[1] = 0;
}
pf_empty_pool(&V_pf_pabuf);
@ -2236,6 +2265,11 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
DIOCCHANGERULE_error:
PF_RULES_WUNLOCK();
if (newrule != NULL) {
counter_u64_free(newrule->evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_free(newrule->packets[i]);
counter_u64_free(newrule->bytes[i]);
}
counter_u64_free(newrule->states_cur);
counter_u64_free(newrule->states_tot);
counter_u64_free(newrule->src_nodes);
@ -2621,9 +2655,11 @@ pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td
PF_RULES_WLOCK();
TAILQ_FOREACH(rule,
ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
rule->evaluations = 0;
rule->packets[0] = rule->packets[1] = 0;
rule->bytes[0] = rule->bytes[1] = 0;
counter_u64_zero(rule->evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_zero(rule->packets[i]);
counter_u64_zero(rule->bytes[i]);
}
}
PF_RULES_WUNLOCK();
break;
@ -4650,6 +4686,11 @@ pf_unload_vnet(void)
uma_zdestroy(V_pf_tag_z);
/* Free counters last as we updated them during shutdown. */
counter_u64_free(V_pf_default_rule.evaluations);
for (int i = 0; i < 2; i++) {
counter_u64_free(V_pf_default_rule.packets[i]);
counter_u64_free(V_pf_default_rule.bytes[i]);
}
counter_u64_free(V_pf_default_rule.states_cur);
counter_u64_free(V_pf_default_rule.states_tot);
counter_u64_free(V_pf_default_rule.src_nodes);

View File

@ -149,7 +149,7 @@ pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
dst = &r->dst;
}
r->evaluations++;
counter_u64_add(r->evaluations, 1);
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != direction)

View File

@ -1012,7 +1012,7 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
while (r != NULL) {
r->evaluations++;
counter_u64_add(r->evaluations, 1);
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != dir)
@ -1039,8 +1039,8 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
if (r == NULL || r->action == PF_NOSCRUB)
return (PF_PASS);
else {
r->packets[dir == PF_OUT]++;
r->bytes[dir == PF_OUT] += pd->tot_len;
counter_u64_add(r->packets[dir == PF_OUT], 1);
counter_u64_add(r->bytes[dir == PF_OUT], pd->tot_len);
}
/* Check for illegal packets */
@ -1155,7 +1155,7 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
while (r != NULL) {
r->evaluations++;
counter_u64_add(r->evaluations, 1);
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != dir)
@ -1181,8 +1181,8 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
if (r == NULL || r->action == PF_NOSCRUB)
return (PF_PASS);
else {
r->packets[dir == PF_OUT]++;
r->bytes[dir == PF_OUT] += pd->tot_len;
counter_u64_add(r->packets[dir == PF_OUT], 1);
counter_u64_add(r->bytes[dir == PF_OUT], pd->tot_len);
}
/* Check for illegal packets */
@ -1309,7 +1309,7 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
while (r != NULL) {
r->evaluations++;
counter_u64_add(r->evaluations, 1);
if (pfi_kif_match(r->kif, kif) == r->ifnot)
r = r->skip[PF_SKIP_IFP].ptr;
else if (r->direction && r->direction != dir)
@ -1343,8 +1343,8 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
if (rm == NULL || rm->action == PF_NOSCRUB)
return (PF_PASS);
else {
r->packets[dir == PF_OUT]++;
r->bytes[dir == PF_OUT] += pd->tot_len;
counter_u64_add(r->packets[dir == PF_OUT], 1);
counter_u64_add(r->bytes[dir == PF_OUT], pd->tot_len);
}
if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)