CK: update consumers to use CK macros across the board

r334189 changed the fields to have names distinct from those in queue.h
in order to expose the oversights as compile time errors
This commit is contained in:
mmacy 2018-05-24 23:21:23 +00:00
parent 6d3bf5b542
commit c937b516d8
9 changed files with 44 additions and 43 deletions

View File

@ -175,7 +175,7 @@ static LIST_HEAD(pmc_ownerhash, pmc_owner) *pmc_ownerhash;
* List of PMC owners with system-wide sampling PMCs.
*/
static LIST_HEAD(, pmc_owner) pmc_ss_owners;
static CK_LIST_HEAD(, pmc_owner) pmc_ss_owners;
/*
* List of free thread entries. This is protected by the spin
@ -5435,7 +5435,7 @@ pmc_initialize(void)
mtx_init(&pmc_processhash_mtx, "pmc-process-hash", "pmc-leaf",
MTX_SPIN);
LIST_INIT(&pmc_ss_owners);
CK_LIST_INIT(&pmc_ss_owners);
pmc_ss_count = 0;
/* allocate a pool of spin mutexes */
@ -5583,7 +5583,7 @@ pmc_cleanup(void)
pmc_ownerhash = NULL;
}
KASSERT(LIST_EMPTY(&pmc_ss_owners),
KASSERT(CK_LIST_EMPTY(&pmc_ss_owners),
("[pmc,%d] Global SS owner list not empty", __LINE__));
KASSERT(pmc_ss_count == 0,
("[pmc,%d] Global SS count not empty", __LINE__));

View File

@ -496,7 +496,7 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
SLIST_INIT(&sc->sc_ports);
CK_SLIST_INIT(&sc->sc_ports);
/* Initialise pseudo media types */
ifmedia_init(&sc->sc_media, 0, lagg_media_change,
@ -554,7 +554,7 @@ lagg_clone_destroy(struct ifnet *ifp)
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
/* Shutdown and remove lagg ports */
while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
lagg_port_destroy(lp, 1);
/* Unhook the aggregation protocol */
@ -656,7 +656,7 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
return (EPROTONOSUPPORT);
/* Allow the first Ethernet member to define the MTU */
if (SLIST_EMPTY(&sc->sc_ports))
if (CK_SLIST_EMPTY(&sc->sc_ports))
sc->sc_ifp->if_mtu = ifp->if_mtu;
else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
if_printf(sc->sc_ifp, "invalid MTU for %s\n",
@ -693,7 +693,7 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
lp->lp_ifcapenable = ifp->if_capenable;
if (SLIST_EMPTY(&sc->sc_ports)) {
if (CK_SLIST_EMPTY(&sc->sc_ports)) {
bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
lagg_proto_lladdr(sc);
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
@ -702,7 +702,7 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
}
lagg_setflags(lp, 1);
if (SLIST_EMPTY(&sc->sc_ports))
if (CK_SLIST_EMPTY(&sc->sc_ports))
sc->sc_primary = lp;
/* Change the interface type */
@ -728,16 +728,16 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
LAGG_RLOCK();
CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
if (tlp->lp_ifp->if_index < ifp->if_index && (
SLIST_NEXT(tlp, lp_entries) == NULL ||
SLIST_NEXT(tlp, lp_entries)->lp_ifp->if_index >
CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
ifp->if_index))
break;
}
LAGG_RUNLOCK();
if (tlp != NULL)
SLIST_INSERT_AFTER(tlp, lp, lp_entries);
CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
else
SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
sc->sc_count++;
lagg_setmulti(lp);
@ -827,7 +827,7 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
if (lp == sc->sc_primary) {
uint8_t lladdr[ETHER_ADDR_LEN];
if ((lp0 = SLIST_FIRST(&sc->sc_ports)) == NULL)
if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
bzero(&lladdr, ETHER_ADDR_LEN);
else
bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN);
@ -1802,7 +1802,7 @@ lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
rval = lp;
goto found;
}
if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
LAGG_PORTACTIVE(lp_next)) {
rval = lp_next;
goto found;
@ -1858,10 +1858,10 @@ lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
p = atomic_fetchadd_32(&sc->sc_seq, 1);
p %= sc->sc_count;
lp = SLIST_FIRST(&sc->sc_ports);
lp = CK_SLIST_FIRST(&sc->sc_ports);
while (p--)
lp = SLIST_NEXT(lp, lp_entries);
lp = CK_SLIST_NEXT(lp, lp_entries);
/*
* Check the port's link state. This will return the next active

View File

@ -217,7 +217,7 @@ struct lagg_softc {
uint32_t sc_flags;
int sc_destroying; /* destroying lagg */
SLIST_HEAD(__tplhd, lagg_port) sc_ports; /* list of interfaces */
CK_SLIST_HEAD(__tplhd, lagg_port) sc_ports; /* list of interfaces */
SLIST_ENTRY(lagg_softc) sc_entries;
eventhandler_tag vlan_attach;
@ -251,7 +251,7 @@ struct lagg_port {
const struct sockaddr *, struct route *);
struct lagg_counters port_counters; /* ifp counters copy */
SLIST_ENTRY(lagg_port) lp_entries;
CK_SLIST_ENTRY(lagg_port) lp_entries;
struct epoch_context lp_epoch_ctx;
};

View File

@ -207,7 +207,7 @@ htable_prefix_free_cb(struct lltable *llt, struct llentry *lle, void *farg)
if (llt->llt_match_prefix(pmd->addr, pmd->mask, pmd->flags, lle)) {
LLE_WLOCK(lle);
LIST_INSERT_HEAD(&pmd->dchain, lle, lle_chain);
CK_LIST_INSERT_HEAD(&pmd->dchain, lle, lle_chain);
}
return (0);
@ -233,7 +233,7 @@ htable_prefix_free(struct lltable *llt, const struct sockaddr *addr,
llentries_unlink(llt, &pmd.dchain);
IF_AFDATA_WUNLOCK(llt->llt_ifp);
LIST_FOREACH_SAFE(lle, &pmd.dchain, lle_chain, next)
CK_LIST_FOREACH_SAFE(lle, &pmd.dchain, lle_chain, next)
lltable_free_entry(llt, lle);
}
@ -250,7 +250,7 @@ llentries_unlink(struct lltable *llt, struct llentries *head)
{
struct llentry *lle, *next;
LIST_FOREACH_SAFE(lle, head, lle_chain, next)
CK_LIST_FOREACH_SAFE(lle, head, lle_chain, next)
llt->llt_unlink_entry(lle);
}
@ -496,7 +496,7 @@ lltable_free_cb(struct lltable *llt, struct llentry *lle, void *farg)
dchain = (struct llentries *)farg;
LLE_WLOCK(lle);
LIST_INSERT_HEAD(dchain, lle, lle_chain);
CK_LIST_INSERT_HEAD(dchain, lle, lle_chain);
return (0);
}
@ -521,7 +521,7 @@ lltable_free(struct lltable *llt)
llentries_unlink(llt, &dchain);
IF_AFDATA_WUNLOCK(llt->llt_ifp);
LIST_FOREACH_SAFE(lle, &dchain, lle_chain, next) {
CK_LIST_FOREACH_SAFE(lle, &dchain, lle_chain, next) {
if (callout_stop(&lle->lle_timer) > 0)
LLE_REMREF(lle);
llentry_free(lle);
@ -846,7 +846,7 @@ llatbl_lle_show(struct llentry_sa *la)
lle = &la->base;
db_printf("lle=%p\n", lle);
db_printf(" lle_next=%p\n", lle->lle_next.le_next);
db_printf(" lle_next=%p\n", lle->lle_next.cle_next);
db_printf(" lle_lock=%p\n", &lle->lle_lock);
db_printf(" lle_tbl=%p\n", lle->lle_tbl);
db_printf(" lle_head=%p\n", lle->lle_head);

View File

@ -35,13 +35,14 @@ __FBSDID("$FreeBSD$");
#include <sys/_rwlock.h>
#include <netinet/in.h>
#include <sys/epoch.h>
#include <sys/ck.h>
struct ifnet;
struct sysctl_req;
struct rt_msghdr;
struct rt_addrinfo;
struct llentry;
LIST_HEAD(llentries, llentry);
CK_LIST_HEAD(llentries, llentry);
#define LLE_MAX_LINKHDR 24 /* Full IB header */
/*
@ -49,7 +50,7 @@ LIST_HEAD(llentries, llentry);
* a shared lock
*/
struct llentry {
LIST_ENTRY(llentry) lle_next;
CK_LIST_ENTRY(llentry) lle_next;
union {
struct in_addr addr4;
struct in6_addr addr6;
@ -77,7 +78,7 @@ struct llentry {
int lle_refcnt;
char *ll_addr; /* link-layer address */
LIST_ENTRY(llentry) lle_chain; /* chain of deleted items */
CK_LIST_ENTRY(llentry) lle_chain; /* chain of deleted items */
struct callout lle_timer;
struct rwlock lle_lock;
struct mtx req_mtx;

View File

@ -238,9 +238,9 @@ typedef void (if_snd_tag_free_t)(struct m_snd_tag *);
*/
struct ifnet {
/* General book keeping of interface lists. */
STAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained (CK_) */
CK_STAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained (CK_) */
LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */
STAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if (CK_) */
CK_STAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if (CK_) */
/* protected by if_addr_lock */
u_char if_alloctype; /* if_type at time of allocation */
@ -452,18 +452,18 @@ struct ifg_group {
char ifg_group[IFNAMSIZ];
u_int ifg_refcnt;
void *ifg_pf_kif;
STAILQ_HEAD(, ifg_member) ifg_members; /* (CK_) */
STAILQ_ENTRY(ifg_group) ifg_next; /* (CK_) */
CK_STAILQ_HEAD(, ifg_member) ifg_members; /* (CK_) */
CK_STAILQ_ENTRY(ifg_group) ifg_next; /* (CK_) */
};
struct ifg_member {
STAILQ_ENTRY(ifg_member) ifgm_next; /* (CK_) */
CK_STAILQ_ENTRY(ifg_member) ifgm_next; /* (CK_) */
struct ifnet *ifgm_ifp;
};
struct ifg_list {
struct ifg_group *ifgl_group;
STAILQ_ENTRY(ifg_list) ifgl_next; /* (CK_) */
CK_STAILQ_ENTRY(ifg_list) ifgl_next; /* (CK_) */
};
#ifdef _SYS_EVENTHANDLER_H_

View File

@ -93,7 +93,7 @@ in_show_in_ifaddr(struct in_ifaddr *ia)
IA_DB_RPINTF("%p", ia_hash.le_next);
IA_DB_RPINTF("%p", ia_hash.le_prev);
IA_DB_RPINTF_DPTR("%p", ia_hash.le_prev);
IA_DB_RPINTF("%p", ia_link.stqe_next);
IA_DB_RPINTF("%p", ia_link.cstqe_next);
IA_DB_RPINTF_PTR("%p", ia_addr);
IA_DB_RPINTF_PTR("%p", ia_dstaddr);
IA_DB_RPINTF_PTR("%p", ia_sockmask);

View File

@ -1125,7 +1125,7 @@ in6_alloc_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int flags)
ifa_ref(&ia->ia_ifa); /* in6_ifaddrhead */
IN6_IFADDR_WLOCK();
CK_STAILQ_INSERT_TAIL(&V_in6_ifaddrhead, ia, ia_link);
LIST_INSERT_HEAD(IN6ADDR_HASH(&ia->ia_addr.sin6_addr), ia, ia6_hash);
CK_LIST_INSERT_HEAD(IN6ADDR_HASH(&ia->ia_addr.sin6_addr), ia, ia6_hash);
IN6_IFADDR_WUNLOCK();
return (ia);
@ -1334,7 +1334,7 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp)
*/
IN6_IFADDR_WLOCK();
CK_STAILQ_REMOVE(&V_in6_ifaddrhead, ia, in6_ifaddr, ia_link);
LIST_REMOVE(ia, ia6_hash);
CK_LIST_REMOVE(ia, ia6_hash);
IN6_IFADDR_WUNLOCK();
/*
@ -1499,7 +1499,7 @@ in6ifa_ifwithaddr(const struct in6_addr *addr, uint32_t zoneid)
struct in6_ifaddr *ia;
IN6_IFADDR_RLOCK(&in6_ifa_tracker);
LIST_FOREACH(ia, IN6ADDR_HASH(addr), ia6_hash) {
CK_LIST_FOREACH(ia, IN6ADDR_HASH(addr), ia6_hash) {
if (IN6_ARE_ADDR_EQUAL(IA6_IN6(ia), addr)) {
if (zoneid != 0 &&
zoneid != ia->ia_addr.sin6_scope_id)
@ -1676,7 +1676,7 @@ in6_localip(struct in6_addr *in6)
struct in6_ifaddr *ia;
IN6_IFADDR_RLOCK(&in6_ifa_tracker);
LIST_FOREACH(ia, IN6ADDR_HASH(in6), ia6_hash) {
CK_LIST_FOREACH(ia, IN6ADDR_HASH(in6), ia6_hash) {
if (IN6_ARE_ADDR_EQUAL(in6, &ia->ia_addr.sin6_addr)) {
IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);
return (1);
@ -1723,7 +1723,7 @@ in6_is_addr_deprecated(struct sockaddr_in6 *sa6)
struct in6_ifaddr *ia;
IN6_IFADDR_RLOCK(&in6_ifa_tracker);
LIST_FOREACH(ia, IN6ADDR_HASH(&sa6->sin6_addr), ia6_hash) {
CK_LIST_FOREACH(ia, IN6ADDR_HASH(&sa6->sin6_addr), ia6_hash) {
if (IN6_ARE_ADDR_EQUAL(IA6_IN6(ia), &sa6->sin6_addr)) {
if (ia->ia6_flags & IN6_IFF_DEPRECATED) {
IN6_IFADDR_RUNLOCK(&in6_ifa_tracker);

View File

@ -127,7 +127,7 @@ struct in6_ifaddr {
struct sockaddr_in6 ia_dstaddr; /* space for destination addr */
struct sockaddr_in6 ia_prefixmask; /* prefix mask */
u_int32_t ia_plen; /* prefix length */
STAILQ_ENTRY(in6_ifaddr) ia_link; /* list of IPv6 addresses */
CK_STAILQ_ENTRY(in6_ifaddr) ia_link; /* list of IPv6 addresses */
int ia6_flags;
struct in6_addrlifetime ia6_lifetime;
@ -142,12 +142,12 @@ struct in6_ifaddr {
/* multicast addresses joined from the kernel */
LIST_HEAD(, in6_multi_mship) ia6_memberships;
/* entry in bucket of inet6 addresses */
LIST_ENTRY(in6_ifaddr) ia6_hash;
CK_LIST_ENTRY(in6_ifaddr) ia6_hash;
};
/* List of in6_ifaddr's. */
STAILQ_HEAD(in6_ifaddrhead, in6_ifaddr);
LIST_HEAD(in6_ifaddrlisthead, in6_ifaddr);
CK_STAILQ_HEAD(in6_ifaddrhead, in6_ifaddr);
CK_LIST_HEAD(in6_ifaddrlisthead, in6_ifaddr);
#endif /* _KERNEL */
/* control structure to manage address selection policy */