Merge: r258322 from projects/pf branch

Split functions that initialize various pf parts into their
    vimage parts and global parts.
    Since global parts appeared to be only mutex initializations, just
    abandon them and use MTX_SYSINIT() instead.
    Kill my incorrect VNET_FOREACH() iterator and instead use correct
    approach with VNET_SYSINIT().

PR:			194515
Differential Revision:	D1309
Submitted by: 		glebius, Nikos Vassiliadis <nvass@gmx.com>
Reviewed by: 		trociny, zec, gnn
This commit is contained in:
Craig Rodrigues 2015-01-06 08:39:06 +00:00
parent bc6ee3cc93
commit c75820c756
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=276746
6 changed files with 53 additions and 48 deletions

View File

@ -829,7 +829,6 @@ typedef int pflog_packet_t(struct pfi_kif *, struct mbuf *, sa_family_t,
struct pf_ruleset *, struct pf_pdesc *, int);
extern pflog_packet_t *pflog_packet_ptr;
#define V_pf_end_threads VNET(pf_end_threads)
#endif /* _KERNEL */
#define PFSYNC_FLAG_SRCNODE 0x04

View File

@ -290,8 +290,6 @@ static void pf_route6(struct mbuf **, struct pf_rule *, int,
int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
VNET_DECLARE(int, pf_end_threads);
VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
#define PACKET_LOOPED(pd) ((pd)->pf_mtag && \

View File

@ -115,7 +115,8 @@ pfi_initialize(void)
V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
PFI_MTYPE, M_WAITOK);
mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF);
if (IS_DEFAULT_VNET(curvnet))
mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF);
kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK);
PF_RULES_WLOCK();
@ -129,18 +130,20 @@ pfi_initialize(void)
pfi_attach_ifnet(ifp);
IFNET_RUNLOCK();
pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
if (IS_DEFAULT_VNET(curvnet)) {
pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY);
pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY);
pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
}
}
void

View File

@ -189,7 +189,6 @@ static struct cdevsw pf_cdevsw = {
static volatile VNET_DEFINE(int, pf_pfil_hooked);
#define V_pf_pfil_hooked VNET(pf_pfil_hooked)
VNET_DEFINE(int, pf_end_threads);
struct rwlock pf_rules_lock;
struct sx pf_ioctl_lock;
@ -276,10 +275,13 @@ pfattach(void)
for (int i = 0; i < SCNT_MAX; i++)
V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0,
"pf purge")) != 0)
/* XXXGL: leaked all above. */
return (error);
if (IS_DEFAULT_VNET(curvnet)) {
if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0,
"pf purge")) != 0) {
/* XXXGL: leaked all above. */
return (error);
}
}
if ((error = swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET,
INTR_MPSAFE, &V_pf_swi_cookie)) != 0)
/* XXXGL: leaked all above. */
@ -3759,11 +3761,6 @@ pf_unload(void)
}
PF_RULES_WLOCK();
shutdown_pf();
V_pf_end_threads = 1;
while (V_pf_end_threads < 2) {
wakeup_one(pf_purge_thread);
rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0);
}
pf_normalize_cleanup();
pfi_cleanup();
pfr_cleanup();
@ -3813,3 +3810,6 @@ static moduledata_t pf_mod = {
DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST);
MODULE_VERSION(pf, PF_MODVER);
VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY - 255,
vnet_pf_init, NULL);

View File

@ -162,7 +162,8 @@ pf_normalize_init(void)
uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
if (IS_DEFAULT_VNET(curvnet))
mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
TAILQ_INIT(&V_pf_fragqueue);
TAILQ_INIT(&V_pf_cachequeue);

View File

@ -184,9 +184,13 @@ static struct pfr_kentry
static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
struct pfr_ktablehead pfr_ktables;
VNET_DEFINE(struct pfr_ktablehead, pfr_ktables);
#define V_pfr_ktables VNET(pfr_ktables)
struct pfr_table pfr_nulltable;
int pfr_ktable_cnt;
VNET_DEFINE(int, pfr_ktable_cnt);
#define V_pfr_ktable_cnt VNET(pfr_ktable_cnt)
void
pfr_initialize(void)
@ -1083,7 +1087,7 @@ pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
return (ENOENT);
SLIST_INIT(&workq);
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
if (pfr_skip_table(filter, p, flags))
continue;
if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
@ -1118,7 +1122,7 @@ pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
flags & PFR_FLAG_USERIOCTL))
senderr(EINVAL);
key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
if (p == NULL) {
p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
if (p == NULL)
@ -1134,7 +1138,7 @@ pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
/* find or create root table */
bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor));
r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
if (r != NULL) {
p->pfrkt_root = r;
goto _skip;
@ -1190,7 +1194,7 @@ pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
SLIST_FOREACH(q, &workq, pfrkt_workq)
if (!pfr_ktable_compare(p, q))
@ -1229,7 +1233,7 @@ pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
*size = n;
return (0);
}
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
if (pfr_skip_table(filter, p, flags))
continue;
if (n-- <= 0)
@ -1264,7 +1268,7 @@ pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
return (0);
}
SLIST_INIT(&workq);
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
if (pfr_skip_table(filter, p, flags))
continue;
if (n-- <= 0)
@ -1296,7 +1300,7 @@ pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t));
if (pfr_validate_table(&key.pfrkt_t, 0, 0))
return (EINVAL);
p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
if (p != NULL) {
SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
xzero++;
@ -1328,7 +1332,7 @@ pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
if (pfr_validate_table(&key.pfrkt_t, 0,
flags & PFR_FLAG_USERIOCTL))
return (EINVAL);
p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
~clrflag;
@ -1370,7 +1374,7 @@ pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
if (rs == NULL)
return (ENOMEM);
SLIST_INIT(&workq);
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
pfr_skip_table(trs, p, 0))
continue;
@ -1415,7 +1419,7 @@ pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
return (EBUSY);
tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
SLIST_INIT(&tableq);
kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl);
if (kt == NULL) {
kt = pfr_create_ktable(tbl, 0, 1);
if (kt == NULL)
@ -1428,7 +1432,7 @@ pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
/* find or create root table */
bzero(&key, sizeof(key));
strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key);
if (rt != NULL) {
kt->pfrkt_root = rt;
goto _skip;
@ -1505,7 +1509,7 @@ pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
if (rs == NULL || !rs->topen || ticket != rs->tticket)
return (0);
SLIST_INIT(&workq);
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
pfr_skip_table(trs, p, 0))
continue;
@ -1541,7 +1545,7 @@ pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
return (EBUSY);
SLIST_INIT(&workq);
RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) {
if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
pfr_skip_table(trs, p, 0))
continue;
@ -1687,7 +1691,7 @@ pfr_table_count(struct pfr_table *filter, int flags)
PF_RULES_ASSERT();
if (flags & PFR_FLAG_ALLRSETS)
return (pfr_ktable_cnt);
return (V_pfr_ktable_cnt);
if (filter->pfrt_anchor[0]) {
rs = pf_find_ruleset(filter->pfrt_anchor);
return ((rs != NULL) ? rs->tables : -1);
@ -1720,8 +1724,8 @@ pfr_insert_ktable(struct pfr_ktable *kt)
PF_RULES_WASSERT();
RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
pfr_ktable_cnt++;
RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt);
V_pfr_ktable_cnt++;
if (kt->pfrkt_root != NULL)
if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
pfr_setflags_ktable(kt->pfrkt_root,
@ -1752,14 +1756,14 @@ pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
if (!(newf & PFR_TFLAG_ACTIVE))
newf &= ~PFR_TFLAG_USRMASK;
if (!(newf & PFR_TFLAG_SETMASK)) {
RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt);
if (kt->pfrkt_root != NULL)
if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
pfr_setflags_ktable(kt->pfrkt_root,
kt->pfrkt_root->pfrkt_flags &
~PFR_TFLAG_REFDANCHOR);
pfr_destroy_ktable(kt, 1);
pfr_ktable_cnt--;
V_pfr_ktable_cnt--;
return;
}
if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
@ -1880,7 +1884,7 @@ static struct pfr_ktable *
pfr_lookup_table(struct pfr_table *tbl)
{
/* struct pfr_ktable start like a struct pfr_table */
return (RB_FIND(pfr_ktablehead, &pfr_ktables,
return (RB_FIND(pfr_ktablehead, &V_pfr_ktables,
(struct pfr_ktable *)tbl));
}