diff --git a/sys/netpfil/ipfw/ip_fw2.c b/sys/netpfil/ipfw/ip_fw2.c index 5415da82ce37..ab3233e0b8cc 100644 --- a/sys/netpfil/ipfw/ip_fw2.c +++ b/sys/netpfil/ipfw/ip_fw2.c @@ -816,7 +816,10 @@ jump_fast(struct ip_fw_chain *chain, struct ip_fw *f, int num, /* make sure we do not jump backward */ if (jump_backwards == 0 && i <= f->rulenum) i = f->rulenum + 1; - f_pos = ipfw_find_rule(chain, i, 0); + if (chain->idxmap != NULL) + f_pos = chain->idxmap[i]; + else + f_pos = ipfw_find_rule(chain, i, 0); /* update the cache */ if (num != IP_FW_TABLEARG) { f->next_rule = (void *)(uintptr_t)f_pos; @@ -2688,6 +2691,7 @@ vnet_ipfw_init(const void *unused) rule->cmd[0].opcode = default_to_accept ? O_ACCEPT : O_DENY; chain->default_rule = chain->map[0] = rule; chain->id = rule->id = 1; + ipfw_init_skipto_cache(chain); /* Pre-calculate rules length for legacy dump format */ chain->static_len = sizeof(struct ip_fw_rule0); @@ -2750,6 +2754,7 @@ vnet_ipfw_uninit(const void *unused) } if (chain->map) free(chain->map, M_IPFW); + ipfw_destroy_skipto_cache(chain); IPFW_WUNLOCK(chain); IPFW_UH_WUNLOCK(chain); ipfw_destroy_tables(chain); diff --git a/sys/netpfil/ipfw/ip_fw_private.h b/sys/netpfil/ipfw/ip_fw_private.h index 641b2041f0de..70558e2ea909 100644 --- a/sys/netpfil/ipfw/ip_fw_private.h +++ b/sys/netpfil/ipfw/ip_fw_private.h @@ -264,6 +264,7 @@ struct ip_fw_chain { int n_rules; /* number of static rules */ LIST_HEAD(nat_list, cfg_nat) nat; /* list of nat entries */ void *tablestate; /* runtime table info */ + int *idxmap; /* skipto array of rules */ #if defined( __linux__ ) || defined( _WIN32 ) spinlock_t rwmtx; #else @@ -275,6 +276,7 @@ struct ip_fw_chain { struct ip_fw *default_rule; struct tables_config *tblcfg; /* tables module data */ void *ifcfg; /* interface module data */ + int *idxmap_back; /* standby skipto array of rules */ #if defined( __linux__ ) || defined( _WIN32 ) spinlock_t uh_lock; #else @@ -495,6 +497,8 @@ void ipfw_iface_del_notify(struct ip_fw_chain *ch, struct ipfw_ifc *ic); int ipfw_list_ifaces(struct ip_fw_chain *ch, struct sockopt_data *sd); /* In ip_fw_sockopt.c */ +void ipfw_init_skipto_cache(struct ip_fw_chain *chain); +void ipfw_destroy_skipto_cache(struct ip_fw_chain *chain); int ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id); int ipfw_ctl(struct sockopt *sopt); int ipfw_ctl3(struct sockopt *sopt); diff --git a/sys/netpfil/ipfw/ip_fw_sockopt.c b/sys/netpfil/ipfw/ip_fw_sockopt.c index bbab77d70c92..3302a83f46f2 100644 --- a/sys/netpfil/ipfw/ip_fw_sockopt.c +++ b/sys/netpfil/ipfw/ip_fw_sockopt.c @@ -197,6 +197,104 @@ ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id) return hi; } +/* + * Builds skipto cache on rule set @map. + */ +static void +update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map) +{ + int *smap, rulenum; + int i, mi; + + IPFW_UH_WLOCK_ASSERT(chain); + + mi = 0; + rulenum = map[mi]->rulenum; + smap = chain->idxmap_back; + + if (smap == NULL) + return; + + for (i = 0; i < 65536; i++) { + smap[i] = mi; + /* Use the same rule index until i < rulenum */ + if (i != rulenum || i == 65535) + continue; + /* Find next rule with num > i */ + rulenum = map[++mi]->rulenum; + while (rulenum == i) + rulenum = map[++mi]->rulenum; + } +} + +/* + * Swaps prepared (backup) index with current one. + */ +static void +swap_skipto_cache(struct ip_fw_chain *chain) +{ + int *map; + + IPFW_UH_WLOCK_ASSERT(chain); + IPFW_WLOCK_ASSERT(chain); + + map = chain->idxmap; + chain->idxmap = chain->idxmap_back; + chain->idxmap_back = map; +} + +/* + * Allocate and initialize skipto cache. + */ +void +ipfw_init_skipto_cache(struct ip_fw_chain *chain) +{ + int *idxmap, *idxmap_back; + + idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW, + M_WAITOK | M_ZERO); + idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW, + M_WAITOK | M_ZERO); + + /* + * Note we may be called at any time after initialization, + * for example, on first skipto rule, so we need to + * provide valid chain->idxmap on return + */ + + IPFW_UH_WLOCK(chain); + if (chain->idxmap != NULL) { + IPFW_UH_WUNLOCK(chain); + free(idxmap, M_IPFW); + free(idxmap_back, M_IPFW); + return; + } + + /* Set backup pointer first to permit building cache */ + chain->idxmap_back = idxmap_back; + update_skipto_cache(chain, chain->map); + IPFW_WLOCK(chain); + /* It is now safe to set chain->idxmap ptr */ + chain->idxmap = idxmap; + swap_skipto_cache(chain); + IPFW_WUNLOCK(chain); + IPFW_UH_WUNLOCK(chain); +} + +/* + * Destroys skipto cache. + */ +void +ipfw_destroy_skipto_cache(struct ip_fw_chain *chain) +{ + + if (chain->idxmap != NULL) + free(chain->idxmap, M_IPFW); + if (chain->idxmap != NULL) + free(chain->idxmap_back, M_IPFW); +} + + /* * allocate a new map, returns the chain locked. extra is the number * of entries to add or delete. @@ -240,6 +338,7 @@ swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len) chain->n_rules = new_len; old_map = chain->map; chain->map = new_map; + swap_skipto_cache(chain); IPFW_WUNLOCK(chain); return old_map; } @@ -498,6 +597,7 @@ commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count) } krule->id = chain->id + 1; + update_skipto_cache(chain, map); map = swap_map(chain, map, chain->n_rules + 1); chain->static_len += RULEUSIZE0(krule); IPFW_UH_WUNLOCK(chain); @@ -668,7 +768,9 @@ del_entry(struct ip_fw_chain *chain, uint32_t arg) /* 3. copy the final part of the map */ bcopy(chain->map + end, map + ofs, (chain->n_rules - end) * sizeof(struct ip_fw *)); - /* 4. swap the maps (under BH_LOCK) */ + /* 3.5. recalculate skipto cache */ + update_skipto_cache(chain, map); + /* 4. swap the maps (under UH_WLOCK + WHLOCK) */ map = swap_map(chain, map, chain->n_rules - n); /* 5. now remove the rules deleted from the old map */ if (cmd == 1) @@ -726,7 +828,6 @@ del_entry(struct ip_fw_chain *chain, uint32_t arg) free(map, M_IPFW); return error; } - /* * Clear counters for a specific rule. * Normally run under IPFW_UH_RLOCK, but these are idempotent ops