cc4d3c30ea
and tested over the past two months in the ipfw3-head branch. This also happens to be the same code available in the Linux and Windows ports of ipfw and dummynet. The major enhancement is a completely restructured version of dummynet, with support for different packet scheduling algorithms (loadable at runtime), faster queue/pipe lookup, and a much cleaner internal architecture and kernel/userland ABI which simplifies future extensions. In addition to the existing schedulers (FIFO and WF2Q+), we include a Deficit Round Robin (DRR or RR for brevity) scheduler, and a new, very fast version of WF2Q+ called QFQ. Some test code is also present (in sys/netinet/ipfw/test) that lets you build and test schedulers in userland. Also, we have added a compatibility layer that understands requests from the RELENG_7 and RELENG_8 versions of the /sbin/ipfw binaries, and replies correctly (at least, it does its best; sometimes you just cannot tell who sent the request and how to answer). The compatibility layer should make it possible to MFC this code in a relatively short time. Some minor glitches (e.g. handling of ipfw set enable/disable, and a workaround for a bug in RELENG_7's /sbin/ipfw) will be fixed with separate commits. CREDITS: This work has been partly supported by the ONELAB2 project, and mostly developed by Riccardo Panicucci and myself. The code for the qfq scheduler is mostly from Fabio Checconi, and Marta Carbone and Francesco Magno have helped with testing, debugging and some bug fixes.
388 lines
12 KiB
C
388 lines
12 KiB
C
/*-
|
|
* Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
|
|
* All rights reserved
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*/
|
|
|
|
/*
|
|
* internal dummynet APIs.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _IP_DN_PRIVATE_H
|
|
#define _IP_DN_PRIVATE_H
|
|
|
|
/* debugging support
|
|
* use ND() to remove debugging, D() to print a line,
|
|
* DX(level, ...) to print above a certain level
|
|
* If you redefine D() you are expected to redefine all.
|
|
*/
|
|
#ifndef D
|
|
#define ND(fmt, ...) do {} while (0)
|
|
#define D1(fmt, ...) do {} while (0)
|
|
#define D(fmt, ...) printf("%-10s " fmt "\n", \
|
|
__FUNCTION__, ## __VA_ARGS__)
|
|
#define DX(lev, fmt, ...) do { \
|
|
if (dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0)
|
|
#endif
|
|
|
|
MALLOC_DECLARE(M_DUMMYNET);
|
|
|
|
#ifndef FREE_PKT
|
|
#define FREE_PKT(m) m_freem(m)
|
|
#endif
|
|
|
|
#ifndef __linux__
|
|
#define div64(a, b) ((int64_t)(a) / (int64_t)(b))
|
|
#endif
|
|
|
|
#define DN_LOCK_INIT() do { \
|
|
mtx_init(&dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF); \
|
|
mtx_init(&dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF); \
|
|
} while (0)
|
|
#define DN_LOCK_DESTROY() do { \
|
|
mtx_destroy(&dn_cfg.uh_mtx); \
|
|
mtx_destroy(&dn_cfg.bh_mtx); \
|
|
} while (0)
|
|
#if 0 /* not used yet */
|
|
#define DN_UH_RLOCK() mtx_lock(&dn_cfg.uh_mtx)
|
|
#define DN_UH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
|
|
#define DN_UH_WLOCK() mtx_lock(&dn_cfg.uh_mtx)
|
|
#define DN_UH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
|
|
#define DN_UH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
|
|
#endif
|
|
|
|
#define DN_BH_RLOCK() mtx_lock(&dn_cfg.uh_mtx)
|
|
#define DN_BH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
|
|
#define DN_BH_WLOCK() mtx_lock(&dn_cfg.uh_mtx)
|
|
#define DN_BH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
|
|
#define DN_BH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
|
|
|
|
SLIST_HEAD(dn_schk_head, dn_schk);
|
|
SLIST_HEAD(dn_sch_inst_head, dn_sch_inst);
|
|
SLIST_HEAD(dn_fsk_head, dn_fsk);
|
|
SLIST_HEAD(dn_queue_head, dn_queue);
|
|
SLIST_HEAD(dn_alg_head, dn_alg);
|
|
|
|
struct mq { /* a basic queue of packets*/
|
|
struct mbuf *head, *tail;
|
|
};
|
|
|
|
static inline void
|
|
set_oid(struct dn_id *o, int type, int len)
|
|
{
|
|
o->type = type;
|
|
o->len = len;
|
|
o->subtype = 0;
|
|
};
|
|
|
|
/*
|
|
* configuration and global data for a dummynet instance
|
|
*
|
|
* When a configuration is modified from userland, 'id' is incremented
|
|
* so we can use the value to check for stale pointers.
|
|
*/
|
|
struct dn_parms {
|
|
uint32_t id; /* configuration version */
|
|
|
|
/* defaults (sysctl-accessible) */
|
|
int red_lookup_depth;
|
|
int red_avg_pkt_size;
|
|
int red_max_pkt_size;
|
|
int hash_size;
|
|
int max_hash_size;
|
|
long byte_limit; /* max queue sizes */
|
|
long slot_limit;
|
|
|
|
int io_fast;
|
|
int debug;
|
|
|
|
/* timekeeping */
|
|
struct timeval prev_t; /* last time dummynet_tick ran */
|
|
struct dn_heap evheap; /* scheduled events */
|
|
|
|
/* counters of objects -- used for reporting space */
|
|
int schk_count;
|
|
int si_count;
|
|
int fsk_count;
|
|
int queue_count;
|
|
|
|
/* ticks and other stuff */
|
|
uint64_t curr_time;
|
|
/* flowsets and schedulers are in hash tables, with 'hash_size'
|
|
* buckets. fshash is looked up at every packet arrival
|
|
* so better be generous if we expect many entries.
|
|
*/
|
|
struct dn_ht *fshash;
|
|
struct dn_ht *schedhash;
|
|
/* list of flowsets without a scheduler -- use sch_chain */
|
|
struct dn_fsk_head fsu; /* list of unlinked flowsets */
|
|
struct dn_alg_head schedlist; /* list of algorithms */
|
|
|
|
/* Store the fs/sch to scan when draining. The value is the
|
|
* bucket number of the hash table
|
|
**/
|
|
int drain_fs;
|
|
int drain_sch;
|
|
|
|
/* if the upper half is busy doing something long,
|
|
* can set the busy flag and we will enqueue packets in
|
|
* a queue for later processing.
|
|
*/
|
|
int busy;
|
|
struct mq pending;
|
|
|
|
#ifdef _KERNEL
|
|
/*
|
|
* This file is normally used in the kernel, unless we do
|
|
* some userland tests, in which case we do not need a mtx.
|
|
* uh_mtx arbitrates between system calls and also
|
|
* protects fshash, schedhash and fsunlinked.
|
|
* These structures are readonly for the lower half.
|
|
* bh_mtx protects all other structures which may be
|
|
* modified upon packet arrivals
|
|
*/
|
|
#if defined( __linux__ ) || defined( _WIN32 )
|
|
spinlock_t uh_mtx;
|
|
spinlock_t bh_mtx;
|
|
#else
|
|
struct mtx uh_mtx;
|
|
struct mtx bh_mtx;
|
|
#endif
|
|
|
|
#endif /* _KERNEL */
|
|
};
|
|
|
|
/*
|
|
* Delay line, contains all packets on output from a link.
|
|
* Every scheduler instance has one.
|
|
*/
|
|
struct delay_line {
|
|
struct dn_id oid;
|
|
struct dn_sch_inst *si;
|
|
struct mq mq;
|
|
};
|
|
|
|
/*
|
|
* The kernel side of a flowset. It is linked in a hash table
|
|
* of flowsets, and in a list of children of their parent scheduler.
|
|
* qht is either the queue or (if HAVE_MASK) a hash table queues.
|
|
* Note that the mask to use is the (flow_mask|sched_mask), which
|
|
* changes as we attach/detach schedulers. So we store it here.
|
|
*
|
|
* XXX If we want to add scheduler-specific parameters, we need to
|
|
* put them in external storage because the scheduler may not be
|
|
* available when the fsk is created.
|
|
*/
|
|
struct dn_fsk { /* kernel side of a flowset */
|
|
struct dn_fs fs;
|
|
SLIST_ENTRY(dn_fsk) fsk_next; /* hash chain for fshash */
|
|
|
|
struct ipfw_flow_id fsk_mask;
|
|
|
|
/* qht is a hash table of queues, or just a single queue
|
|
* a bit in fs.flags tells us which one
|
|
*/
|
|
struct dn_ht *qht;
|
|
struct dn_schk *sched; /* Sched we are linked to */
|
|
SLIST_ENTRY(dn_fsk) sch_chain; /* list of fsk attached to sched */
|
|
|
|
/* bucket index used by drain routine to drain queues for this
|
|
* flowset
|
|
*/
|
|
int drain_bucket;
|
|
/* Parameter realted to RED / GRED */
|
|
/* original values are in dn_fs*/
|
|
int w_q ; /* queue weight (scaled) */
|
|
int max_th ; /* maximum threshold for queue (scaled) */
|
|
int min_th ; /* minimum threshold for queue (scaled) */
|
|
int max_p ; /* maximum value for p_b (scaled) */
|
|
|
|
u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */
|
|
u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */
|
|
u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */
|
|
u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */
|
|
u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */
|
|
u_int lookup_depth ; /* depth of lookup table */
|
|
int lookup_step ; /* granularity inside the lookup table */
|
|
int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
|
|
int avg_pkt_size ; /* medium packet size */
|
|
int max_pkt_size ; /* max packet size */
|
|
};
|
|
|
|
/*
|
|
* A queue is created as a child of a flowset unless it belongs to
|
|
* a !MULTIQUEUE scheduler. It is normally in a hash table in the
|
|
* flowset. fs always points to the parent flowset.
|
|
* si normally points to the sch_inst, unless the flowset has been
|
|
* detached from the scheduler -- in this case si == NULL and we
|
|
* should not enqueue.
|
|
*/
|
|
struct dn_queue {
|
|
struct dn_flow ni; /* oid, flow_id, stats */
|
|
struct mq mq; /* packets queue */
|
|
struct dn_sch_inst *_si; /* owner scheduler instance */
|
|
SLIST_ENTRY(dn_queue) q_next; /* hash chain list for qht */
|
|
struct dn_fsk *fs; /* parent flowset. */
|
|
|
|
/* RED parameters */
|
|
int avg; /* average queue length est. (scaled) */
|
|
int count; /* arrivals since last RED drop */
|
|
int random; /* random value (scaled) */
|
|
uint64_t q_time; /* start of queue idle time */
|
|
|
|
};
|
|
|
|
/*
|
|
* The kernel side of a scheduler. Contains the userland config,
|
|
* a link, pointer to extra config arguments from command line,
|
|
* kernel flags, and a pointer to the scheduler methods.
|
|
* It is stored in a hash table, and holds a list of all
|
|
* flowsets and scheduler instances.
|
|
* XXX sch must be at the beginning, see schk_hash().
|
|
*/
|
|
struct dn_schk {
|
|
struct dn_sch sch;
|
|
struct dn_alg *fp; /* Pointer to scheduler functions */
|
|
struct dn_link link; /* The link, embedded */
|
|
struct dn_profile *profile; /* delay profile, if any */
|
|
struct dn_id *cfg; /* extra config arguments */
|
|
|
|
SLIST_ENTRY(dn_schk) schk_next; /* hash chain for schedhash */
|
|
|
|
struct dn_fsk_head fsk_list; /* all fsk linked to me */
|
|
struct dn_fsk *fs; /* Flowset for !MULTIQUEUE */
|
|
|
|
/* bucket index used by the drain routine to drain the scheduler
|
|
* instance for this flowset.
|
|
*/
|
|
int drain_bucket;
|
|
|
|
/* Hash table of all instances (through sch.sched_mask)
|
|
* or single instance if no mask. Always valid.
|
|
*/
|
|
struct dn_ht *siht;
|
|
};
|
|
|
|
|
|
/*
|
|
* Scheduler instance.
|
|
* Contains variables and all queues relative to a this instance.
|
|
* This struct is created a runtime.
|
|
*/
|
|
struct dn_sch_inst {
|
|
struct dn_flow ni; /* oid, flowid and stats */
|
|
SLIST_ENTRY(dn_sch_inst) si_next; /* hash chain for siht */
|
|
struct delay_line dline;
|
|
struct dn_schk *sched; /* the template */
|
|
int kflags; /* DN_ACTIVE */
|
|
|
|
int64_t credit; /* bits I can transmit (more or less). */
|
|
uint64_t sched_time; /* time link was scheduled in ready_heap */
|
|
uint64_t idle_time; /* start of scheduler instance idle time */
|
|
|
|
/* q_count is the number of queues that this instance is using.
|
|
* The counter is incremented or decremented when
|
|
* a reference from the queue is created or deleted.
|
|
* It is used to make sure that a scheduler instance can be safely
|
|
* deleted by the drain routine. See notes below.
|
|
*/
|
|
int q_count;
|
|
|
|
};
|
|
|
|
/*
|
|
* NOTE about object drain.
|
|
* The system will automatically (XXX check when) drain queues and
|
|
* scheduler instances when they are idle.
|
|
* A queue is idle when it has no packets; an instance is idle when
|
|
* it is not in the evheap heap, and the corresponding delay line is empty.
|
|
* A queue can be safely deleted when it is idle because of the scheduler
|
|
* function xxx_free_queue() will remove any references to it.
|
|
* An instance can be only deleted when no queues reference it. To be sure
|
|
* of that, a counter (q_count) stores the number of queues that are pointing
|
|
* to the instance.
|
|
*
|
|
* XXX
|
|
* Order of scan:
|
|
* - take all flowset in a bucket for the flowset hash table
|
|
* - take all queues in a bucket for the flowset
|
|
* - increment the queue bucket
|
|
* - scan next flowset bucket
|
|
* Nothing is done if a bucket contains no entries.
|
|
*
|
|
* The same schema is used for sceduler instances
|
|
*/
|
|
|
|
|
|
/* kernel-side flags. Linux has DN_DELETE in fcntl.h
|
|
*/
|
|
enum {
|
|
/* 1 and 2 are reserved for the SCAN flags */
|
|
DN_DESTROY = 0x0004, /* destroy */
|
|
DN_DELETE_FS = 0x0008, /* destroy flowset */
|
|
DN_DETACH = 0x0010,
|
|
DN_ACTIVE = 0x0020, /* object is in evheap */
|
|
DN_F_DLINE = 0x0040, /* object is a delay line */
|
|
DN_F_SCHI = 0x00C0, /* object is a sched.instance */
|
|
DN_QHT_IS_Q = 0x0100, /* in flowset, qht is a single queue */
|
|
};
|
|
|
|
extern struct dn_parms dn_cfg;
|
|
|
|
int dummynet_io(struct mbuf **, int , struct ip_fw_args *);
|
|
void dummynet_task(void *context, int pending);
|
|
void dn_reschedule(void);
|
|
|
|
struct dn_queue *ipdn_q_find(struct dn_fsk *, struct dn_sch_inst *,
|
|
struct ipfw_flow_id *);
|
|
struct dn_sch_inst *ipdn_si_find(struct dn_schk *, struct ipfw_flow_id *);
|
|
|
|
/* helper structure to copy objects returned to userland */
|
|
struct copy_args {
|
|
char **start;
|
|
char *end;
|
|
int flags;
|
|
int type;
|
|
int extra; /* extra filtering */
|
|
};
|
|
|
|
struct sockopt;
|
|
int ip_dummynet_compat(struct sockopt *sopt);
|
|
int dummynet_get(struct sockopt *sopt, void **compat);
|
|
int dn_c_copy_q (void *_ni, void *arg);
|
|
int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq);
|
|
int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq);
|
|
int dn_compat_copy_queue(struct copy_args *a, void *_o);
|
|
int dn_compat_copy_pipe(struct copy_args *a, void *_o);
|
|
int copy_data_helper_compat(void *_o, void *_arg);
|
|
int dn_compat_calc_size(struct dn_parms dn_cfg);
|
|
int do_config(void *p, int l);
|
|
|
|
/* function to drain idle object */
|
|
void dn_drain_scheduler(void);
|
|
void dn_drain_queue(void);
|
|
|
|
#endif /* _IP_DN_PRIVATE_H */
|