Use new struct mbufq instead of struct ifqueue to manage packet queues in
IPv4 multicast code. Sponsored by: Netflix Sponsored by: Nginx, Inc.
This commit is contained in:
parent
c578b6aca0
commit
058e08bea9
@ -87,7 +87,7 @@ __FBSDID("$FreeBSD$");
|
||||
static struct igmp_ifinfo *
|
||||
igi_alloc_locked(struct ifnet *);
|
||||
static void igi_delete_locked(const struct ifnet *);
|
||||
static void igmp_dispatch_queue(struct ifqueue *, int, const int);
|
||||
static void igmp_dispatch_queue(struct mbufq *, int, const int);
|
||||
static void igmp_fasttimo_vnet(void);
|
||||
static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *);
|
||||
static int igmp_handle_state_change(struct in_multi *,
|
||||
@ -122,15 +122,15 @@ static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
|
||||
static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
|
||||
static struct mbuf *
|
||||
igmp_v3_encap_report(struct ifnet *, struct mbuf *);
|
||||
static int igmp_v3_enqueue_group_record(struct ifqueue *,
|
||||
static int igmp_v3_enqueue_group_record(struct mbufq *,
|
||||
struct in_multi *, const int, const int, const int);
|
||||
static int igmp_v3_enqueue_filter_change(struct ifqueue *,
|
||||
static int igmp_v3_enqueue_filter_change(struct mbufq *,
|
||||
struct in_multi *);
|
||||
static void igmp_v3_process_group_timers(struct igmp_ifinfo *,
|
||||
struct ifqueue *, struct ifqueue *, struct in_multi *,
|
||||
struct mbufq *, struct mbufq *, struct in_multi *,
|
||||
const int);
|
||||
static int igmp_v3_merge_state_changes(struct in_multi *,
|
||||
struct ifqueue *);
|
||||
struct mbufq *);
|
||||
static void igmp_v3_suppress_group_record(struct in_multi *);
|
||||
static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
|
||||
static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
|
||||
@ -475,15 +475,12 @@ sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
|
||||
* VIMAGE: Assumes the vnet pointer has been set.
|
||||
*/
|
||||
static void
|
||||
igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop)
|
||||
igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
for (;;) {
|
||||
_IF_DEQUEUE(ifq, m);
|
||||
if (m == NULL)
|
||||
break;
|
||||
CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m);
|
||||
while ((m = mbufq_dequeue(mq)) != NULL) {
|
||||
CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
|
||||
if (loop)
|
||||
m->m_flags |= M_IGMP_LOOP;
|
||||
netisr_dispatch(NETISR_IGMP, m);
|
||||
@ -579,13 +576,8 @@ igi_alloc_locked(/*const*/ struct ifnet *ifp)
|
||||
igi->igi_qi = IGMP_QI_INIT;
|
||||
igi->igi_qri = IGMP_QRI_INIT;
|
||||
igi->igi_uri = IGMP_URI_INIT;
|
||||
|
||||
SLIST_INIT(&igi->igi_relinmhead);
|
||||
|
||||
/*
|
||||
* Responses to general queries are subject to bounds.
|
||||
*/
|
||||
IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
|
||||
mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
|
||||
|
||||
LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
|
||||
|
||||
@ -683,7 +675,7 @@ igi_delete_locked(const struct ifnet *ifp)
|
||||
/*
|
||||
* Free deferred General Query responses.
|
||||
*/
|
||||
_IF_DRAIN(&igi->igi_gq);
|
||||
mbufq_drain(&igi->igi_gq);
|
||||
|
||||
LIST_REMOVE(igi, igi_link);
|
||||
|
||||
@ -1643,8 +1635,8 @@ igmp_fasttimo(void)
|
||||
static void
|
||||
igmp_fasttimo_vnet(void)
|
||||
{
|
||||
struct ifqueue scq; /* State-change packets */
|
||||
struct ifqueue qrq; /* Query response packets */
|
||||
struct mbufq scq; /* State-change packets */
|
||||
struct mbufq qrq; /* Query response packets */
|
||||
struct ifnet *ifp;
|
||||
struct igmp_ifinfo *igi;
|
||||
struct ifmultiaddr *ifma;
|
||||
@ -1705,12 +1697,8 @@ igmp_fasttimo_vnet(void)
|
||||
loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
|
||||
uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
|
||||
PR_FASTHZ);
|
||||
|
||||
memset(&qrq, 0, sizeof(struct ifqueue));
|
||||
IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS);
|
||||
|
||||
memset(&scq, 0, sizeof(struct ifqueue));
|
||||
IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
|
||||
mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
|
||||
mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
|
||||
}
|
||||
|
||||
IF_ADDR_RLOCK(ifp);
|
||||
@ -1809,7 +1797,7 @@ igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
|
||||
*/
|
||||
static void
|
||||
igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
|
||||
struct ifqueue *qrq, struct ifqueue *scq,
|
||||
struct mbufq *qrq, struct mbufq *scq,
|
||||
struct in_multi *inm, const int uri_fasthz)
|
||||
{
|
||||
int query_response_timer_expired;
|
||||
@ -2071,7 +2059,7 @@ igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi)
|
||||
*/
|
||||
inm->inm_sctimer = 0;
|
||||
inm->inm_timer = 0;
|
||||
_IF_DRAIN(&inm->inm_scq);
|
||||
mbufq_drain(&inm->inm_scq);
|
||||
}
|
||||
IF_ADDR_RUNLOCK(ifp);
|
||||
SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
|
||||
@ -2344,7 +2332,7 @@ static int
|
||||
igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
|
||||
{
|
||||
struct ifnet *ifp;
|
||||
struct ifqueue *ifq;
|
||||
struct mbufq *mq;
|
||||
int error, retval, syncstates;
|
||||
|
||||
CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)",
|
||||
@ -2418,9 +2406,9 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
|
||||
* Don't kick the timers if there is nothing to do,
|
||||
* or if an error occurred.
|
||||
*/
|
||||
ifq = &inm->inm_scq;
|
||||
_IF_DRAIN(ifq);
|
||||
retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
|
||||
mq = &inm->inm_scq;
|
||||
mbufq_drain(mq);
|
||||
retval = igmp_v3_enqueue_group_record(mq, inm, 1,
|
||||
0, 0);
|
||||
CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
|
||||
__func__, retval);
|
||||
@ -2500,7 +2488,7 @@ igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi)
|
||||
return (0);
|
||||
}
|
||||
|
||||
_IF_DRAIN(&inm->inm_scq);
|
||||
mbufq_drain(&inm->inm_scq);
|
||||
|
||||
retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
|
||||
CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
|
||||
@ -2569,7 +2557,7 @@ igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
|
||||
* TO_IN {} to be sent on the next fast timeout,
|
||||
* giving us an opportunity to merge reports.
|
||||
*/
|
||||
_IF_DRAIN(&inm->inm_scq);
|
||||
mbufq_drain(&inm->inm_scq);
|
||||
inm->inm_timer = 0;
|
||||
if (igi->igi_flags & IGIF_LOOPBACK) {
|
||||
inm->inm_scrv = 1;
|
||||
@ -2647,7 +2635,7 @@ igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
|
||||
* no record(s) were appended.
|
||||
*/
|
||||
static int
|
||||
igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
|
||||
const int is_state_change, const int is_group_query,
|
||||
const int is_source_query)
|
||||
{
|
||||
@ -2737,7 +2725,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
* Generate the filter list changes using a separate function.
|
||||
*/
|
||||
if (is_filter_list_change)
|
||||
return (igmp_v3_enqueue_filter_change(ifq, inm));
|
||||
return (igmp_v3_enqueue_filter_change(mq, inm));
|
||||
|
||||
if (type == IGMP_DO_NOTHING) {
|
||||
CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s",
|
||||
@ -2767,7 +2755,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
* Note: Group records for G/GSR query responses MUST be sent
|
||||
* in their own packet.
|
||||
*/
|
||||
m0 = ifq->ifq_tail;
|
||||
m0 = mbufq_last(mq);
|
||||
if (!is_group_query &&
|
||||
m0 != NULL &&
|
||||
(m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
|
||||
@ -2778,7 +2766,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
m = m0;
|
||||
CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
|
||||
} else {
|
||||
if (_IF_QFULL(ifq)) {
|
||||
if (mbufq_full(mq)) {
|
||||
CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
@ -2891,7 +2879,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
if (m != m0) {
|
||||
CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
|
||||
m->m_pkthdr.PH_vt.vt_nrecs = 1;
|
||||
_IF_ENQUEUE(ifq, m);
|
||||
mbufq_enqueue(mq, m);
|
||||
} else
|
||||
m->m_pkthdr.PH_vt.vt_nrecs++;
|
||||
|
||||
@ -2907,7 +2895,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
* Always try for a cluster first.
|
||||
*/
|
||||
while (nims != NULL) {
|
||||
if (_IF_QFULL(ifq)) {
|
||||
if (mbufq_full(mq)) {
|
||||
CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
@ -2970,7 +2958,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
|
||||
nbytes += (msrcs * sizeof(in_addr_t));
|
||||
|
||||
CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
|
||||
_IF_ENQUEUE(ifq, m);
|
||||
mbufq_enqueue(mq, m);
|
||||
}
|
||||
|
||||
return (nbytes);
|
||||
@ -3010,7 +2998,7 @@ typedef enum {
|
||||
* no record(s) were appended.
|
||||
*/
|
||||
static int
|
||||
igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
|
||||
igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
|
||||
{
|
||||
static const int MINRECLEN =
|
||||
sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
|
||||
@ -3054,7 +3042,7 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
|
||||
*/
|
||||
while (drt != REC_FULL) {
|
||||
do {
|
||||
m0 = ifq->ifq_tail;
|
||||
m0 = mbufq_last(mq);
|
||||
if (m0 != NULL &&
|
||||
(m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
|
||||
IGMP_V3_REPORT_MAXRECS) &&
|
||||
@ -3201,7 +3189,7 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
|
||||
*/
|
||||
m->m_pkthdr.PH_vt.vt_nrecs++;
|
||||
if (m != m0)
|
||||
_IF_ENQUEUE(ifq, m);
|
||||
mbufq_enqueue(mq, m);
|
||||
nbytes += npbytes;
|
||||
} while (nims != NULL);
|
||||
drt |= crt;
|
||||
@ -3215,9 +3203,9 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
|
||||
}
|
||||
|
||||
static int
|
||||
igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
|
||||
igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
|
||||
{
|
||||
struct ifqueue *gq;
|
||||
struct mbufq *gq;
|
||||
struct mbuf *m; /* pending state-change */
|
||||
struct mbuf *m0; /* copy of pending state-change */
|
||||
struct mbuf *mt; /* last state-change in packet */
|
||||
@ -3240,13 +3228,13 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
|
||||
|
||||
gq = &inm->inm_scq;
|
||||
#ifdef KTR
|
||||
if (gq->ifq_head == NULL) {
|
||||
if (mbufq_first(gq) == NULL) {
|
||||
CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
|
||||
__func__, inm);
|
||||
}
|
||||
#endif
|
||||
|
||||
m = gq->ifq_head;
|
||||
m = mbufq_first(gq);
|
||||
while (m != NULL) {
|
||||
/*
|
||||
* Only merge the report into the current packet if
|
||||
@ -3257,7 +3245,7 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
|
||||
* allocated clusters.
|
||||
*/
|
||||
domerge = 0;
|
||||
mt = ifscq->ifq_tail;
|
||||
mt = mbufq_last(scq);
|
||||
if (mt != NULL) {
|
||||
recslen = m_length(m, NULL);
|
||||
|
||||
@ -3269,7 +3257,7 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
|
||||
domerge = 1;
|
||||
}
|
||||
|
||||
if (!domerge && _IF_QFULL(gq)) {
|
||||
if (!domerge && mbufq_full(gq)) {
|
||||
CTR2(KTR_IGMPV3,
|
||||
"%s: outbound queue full, skipping whole packet %p",
|
||||
__func__, m);
|
||||
@ -3282,7 +3270,7 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
|
||||
|
||||
if (!docopy) {
|
||||
CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
|
||||
_IF_DEQUEUE(gq, m0);
|
||||
m0 = mbufq_dequeue(gq);
|
||||
m = m0->m_nextpkt;
|
||||
} else {
|
||||
CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
|
||||
@ -3294,13 +3282,13 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
|
||||
}
|
||||
|
||||
if (!domerge) {
|
||||
CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)",
|
||||
__func__, m0, ifscq);
|
||||
_IF_ENQUEUE(ifscq, m0);
|
||||
CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
|
||||
__func__, m0, scq);
|
||||
mbufq_enqueue(scq, m0);
|
||||
} else {
|
||||
struct mbuf *mtl; /* last mbuf of packet mt */
|
||||
|
||||
CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)",
|
||||
CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
|
||||
__func__, m0, mt);
|
||||
|
||||
mtl = m_last(mt);
|
||||
@ -3374,7 +3362,7 @@ igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
|
||||
/*
|
||||
* Slew transmission of bursts over 500ms intervals.
|
||||
*/
|
||||
if (igi->igi_gq.ifq_head != NULL) {
|
||||
if (mbufq_first(&igi->igi_gq) != NULL) {
|
||||
igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
|
||||
IGMP_RESPONSE_BURST_INTERVAL);
|
||||
V_interface_timers_running = 1;
|
||||
|
@ -523,12 +523,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group,
|
||||
inm->inm_ifma = ifma;
|
||||
inm->inm_refcount = 1;
|
||||
inm->inm_state = IGMP_NOT_MEMBER;
|
||||
|
||||
/*
|
||||
* Pending state-changes per group are subject to a bounds check.
|
||||
*/
|
||||
IFQ_SET_MAXLEN(&inm->inm_scq, IGMP_MAX_STATE_CHANGES);
|
||||
|
||||
mbufq_init(&inm->inm_scq, IGMP_MAX_STATE_CHANGES);
|
||||
inm->inm_st[0].iss_fmode = MCAST_UNDEFINED;
|
||||
inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
|
||||
RB_INIT(&inm->inm_srcs);
|
||||
|
@ -210,7 +210,7 @@ struct igmp_ifinfo {
|
||||
uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */
|
||||
uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */
|
||||
SLIST_HEAD(,in_multi) igi_relinmhead; /* released groups */
|
||||
struct ifqueue igi_gq; /* queue of general query responses */
|
||||
struct mbufq igi_gq; /* queue of general query responses */
|
||||
};
|
||||
|
||||
#define IGIF_SILENT 0x00000001 /* Do not use IGMP on this ifp */
|
||||
@ -299,7 +299,7 @@ struct in_multi {
|
||||
struct ip_msource_tree inm_srcs; /* tree of sources */
|
||||
u_long inm_nsrc; /* # of tree entries */
|
||||
|
||||
struct ifqueue inm_scq; /* queue of pending
|
||||
struct mbufq inm_scq; /* queue of pending
|
||||
* state-change packets */
|
||||
struct timeval inm_lastgsrtv; /* Time of last G-S-R query */
|
||||
uint16_t inm_sctimer; /* state-change timer */
|
||||
|
Loading…
Reference in New Issue
Block a user