whitespace fixes (trailing whitespace, bad indentation

after a merge, etc.)
This commit is contained in:
Luigi Rizzo 2010-04-19 16:17:30 +00:00
parent 3579cf4c4f
commit 6ba1ccc0f2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=206845
8 changed files with 61 additions and 59 deletions

View File

@ -87,14 +87,14 @@ enum {
DN_SYSCTL_SET,
DN_LAST,
} ;
};
enum { /* subtype for schedulers, flowset and the like */
DN_SCHED_UNKNOWN = 0,
DN_SCHED_FIFO = 1,
DN_SCHED_WF2QP = 2,
/* others are in individual modules */
} ;
};
enum { /* user flags */
DN_HAVE_MASK = 0x0001, /* fs or sched has a mask */
@ -113,16 +113,16 @@ enum { /* user flags */
struct dn_link {
struct dn_id oid;
/*
/*
* Userland sets bw and delay in bits/s and milliseconds.
* The kernel converts this back and forth to bits/tick and ticks.
* XXX what about burst ?
*/
*/
int32_t link_nr;
int bandwidth; /* bit/s or bits/tick. */
int delay; /* ms and ticks */
uint64_t burst; /* scaled. bits*Hz XXX */
} ;
};
/*
* A flowset, which is a template for flows. Contains parameters
@ -132,13 +132,13 @@ struct dn_link {
*/
struct dn_fs {
struct dn_id oid;
uint32_t fs_nr; /* the flowset number */
uint32_t flags; /* userland flags */
int qsize ; /* queue size in slots or bytes */
int32_t plr; /* PLR, pkt loss rate (2^31-1 means 100%) */
uint32_t fs_nr; /* the flowset number */
uint32_t flags; /* userland flags */
int qsize; /* queue size in slots or bytes */
int32_t plr; /* PLR, pkt loss rate (2^31-1 means 100%) */
uint32_t buckets; /* buckets used for the queue hash table */
struct ipfw_flow_id flow_mask ;
struct ipfw_flow_id flow_mask;
uint32_t sched_nr; /* the scheduler we attach to */
/* generic scheduler parameters. Leave them at -1 if unset.
* Now we use 0: weight, 1: lmax, 2: priority
@ -149,14 +149,14 @@ struct dn_fs {
* weight and probabilities are in the range 0..1 represented
* in fixed point arithmetic with SCALE_RED decimal bits.
*/
#define SCALE_RED 16
#define SCALE(x) ( (x) << SCALE_RED )
#define SCALE_VAL(x) ( (x) >> SCALE_RED )
#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED )
int w_q ; /* queue weight (scaled) */
int max_th ; /* maximum threshold for queue (scaled) */
int min_th ; /* minimum threshold for queue (scaled) */
int max_p ; /* maximum value for p_b (scaled) */
#define SCALE_RED 16
#define SCALE(x) ( (x) << SCALE_RED )
#define SCALE_VAL(x) ( (x) >> SCALE_RED )
#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED )
int w_q ; /* queue weight (scaled) */
int max_th ; /* maximum threshold for queue (scaled) */
int min_th ; /* minimum threshold for queue (scaled) */
int max_p ; /* maximum value for p_b (scaled) */
};
@ -177,10 +177,10 @@ struct dn_flow {
};
/*
/*
* Scheduler template, mostly indicating the name, number,
* sched_mask and buckets.
*/
*/
struct dn_sch {
struct dn_id oid;
uint32_t sched_nr; /* N, scheduler number */
@ -199,14 +199,14 @@ struct dn_sch {
#define ED_MAX_SAMPLES_NO 1024
struct dn_profile {
struct dn_id oid;
/* fields to simulate a delay profile */
/* fields to simulate a delay profile */
#define ED_MAX_NAME_LEN 32
char name[ED_MAX_NAME_LEN];
int link_nr;
int loss_level;
int bandwidth; // XXX use link bandwidth?
int samples_no; /* actual length of samples[] */
int samples[ED_MAX_SAMPLES_NO]; /* may be shorter */
char name[ED_MAX_NAME_LEN];
int link_nr;
int loss_level;
int bandwidth; // XXX use link bandwidth?
int samples_no; /* actual len of samples[] */
int samples[ED_MAX_SAMPLES_NO]; /* may be shorter */
};

View File

@ -140,9 +140,9 @@ struct dn_alg {
/* MSVC does not support initializers so we need this ugly macro */
#ifdef _WIN32
#define _SI(fld)
#define _SI(fld)
#else
#define _SI(fld) fld
#define _SI(fld) fld
#endif
/*

View File

@ -94,7 +94,7 @@ rr_remove_head(struct rr_si *si)
if (si->head == NULL)
return; /* empty queue */
si->head->status = 0;
if (si->head == si->tail) {
si->head = si->tail = NULL;
return;
@ -141,7 +141,7 @@ next_pointer(struct rr_si *si)
si->tail = si->tail->qnext;
}
static int
static int
rr_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
{
struct rr_si *si;
@ -154,7 +154,7 @@ rr_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
return 0;
}
/* If reach this point, queue q was idle */
/* If reach this point, queue q was idle */
si = (struct rr_si *)(_si + 1);
rrq = (struct rr_queue *)q;

View File

@ -125,7 +125,7 @@ idle_check(struct wf2qp_si *si, int n, int force)
}
}
static int
static int
wf2qp_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
{
struct dn_fsk *fs = q->fs;
@ -140,7 +140,7 @@ wf2qp_enqueue(struct dn_sch_inst *_si, struct dn_queue *q, struct mbuf *m)
return 0;
}
/* If reach this point, queue q was idle */
/* If reach this point, queue q was idle */
alg_fq = (struct wf2qp_queue *)q;
if (DN_KEY_LT(alg_fq->F, alg_fq->S)) {
@ -318,7 +318,7 @@ wf2qp_free_queue(struct dn_queue *q)
{
struct wf2qp_queue *alg_fq = (struct wf2qp_queue *)q;
struct wf2qp_si *si = (struct wf2qp_si *)(q->_si + 1);
if (alg_fq->S >= alg_fq->F + 1)
return 0; /* nothing to do, not in any heap */
si->wsum -= q->fs->fs.par[0];
@ -361,7 +361,7 @@ static struct dn_alg wf2qp_desc = {
_SI( .destroy = ) NULL,
_SI( .new_sched = ) wf2qp_new_sched,
_SI( .free_sched = ) wf2qp_free_sched,
_SI( .new_fsk = ) wf2qp_new_fsk,
_SI( .free_fsk = ) NULL,

View File

@ -149,7 +149,7 @@ struct dn_parms {
int drain_sch;
uint32_t expire;
uint32_t expire_cycle; /* tick count */
int init_done;
/* if the upper half is busy doing something long,

View File

@ -1547,28 +1547,28 @@ config_profile(struct dn_profile *pf, struct dn_id *arg)
/* XXX other sanity checks */
DN_BH_WLOCK();
for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
s = locate_scheduler(i);
s = locate_scheduler(i);
if (s == NULL) {
if (s == NULL) {
err = EINVAL;
break;
}
dn_cfg.id++;
/*
* If we had a profile and the new one does not fit,
* or it is deleted, then we need to free memory.
*/
if (s->profile && (pf->samples_no == 0 ||
s->profile->oid.len < pf->oid.len)) {
free(s->profile, M_DUMMYNET);
s->profile = NULL;
}
}
dn_cfg.id++;
/*
* If we had a profile and the new one does not fit,
* or it is deleted, then we need to free memory.
*/
if (s->profile && (pf->samples_no == 0 ||
s->profile->oid.len < pf->oid.len)) {
free(s->profile, M_DUMMYNET);
s->profile = NULL;
}
if (pf->samples_no == 0)
continue;
/*
/*
* new profile, possibly allocate memory
* and copy data.
*/
* and copy data.
*/
if (s->profile == NULL)
s->profile = malloc(pf->oid.len,
M_DUMMYNET, M_NOWAIT | M_ZERO);
@ -1642,7 +1642,8 @@ do_config(void *p, int l)
default:
D("cmd %d not implemented", o->type);
break;
#ifdef EMULATE_SYSCTL
#ifdef EMULATE_SYSCTL
/* sysctl emulation.
* if we recognize the command, jump to the correct
* handler and return
@ -1651,6 +1652,7 @@ do_config(void *p, int l)
err = kesysctl_emu_set(p, l);
return err;
#endif
case DN_CMD_CONFIG: /* simply a header */
break;

View File

@ -147,8 +147,8 @@ ipfw_check_hook(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir,
switch (ipfw) {
case IP_FW_PASS:
/* next_hop may be set by ipfw_chk */
if (args.next_hop == NULL)
break; /* pass */
if (args.next_hop == NULL)
break; /* pass */
#ifndef IPFIREWALL_FORWARD
ret = EACCES;
#else
@ -347,14 +347,14 @@ ipfw_attach_hooks(int arg)
if (arg == 0) /* detach */
ipfw_hook(0, AF_INET);
else if (V_fw_enable && ipfw_hook(1, AF_INET) != 0) {
else if (V_fw_enable && ipfw_hook(1, AF_INET) != 0) {
error = ENOENT; /* see ip_fw_pfil.c::ipfw_hook() */
printf("ipfw_hook() error\n");
}
#ifdef INET6
if (arg == 0) /* detach */
ipfw_hook(0, AF_INET6);
else if (V_fw6_enable && ipfw_hook(1, AF_INET6) != 0) {
else if (V_fw6_enable && ipfw_hook(1, AF_INET6) != 0) {
error = ENOENT;
printf("ipfw6_hook() error\n");
}

View File

@ -214,7 +214,7 @@ struct ip_fw_chain {
struct ip_fw *default_rule;
int n_rules; /* number of static rules */
int static_len; /* total len of static rules */
struct ip_fw **map; /* array of rule ptrs to ease lookup */
struct ip_fw **map; /* array of rule ptrs to ease lookup */
LIST_HEAD(nat_list, cfg_nat) nat; /* list of nat entries */
struct radix_node_head *tables[IPFW_TABLES_MAX];
#if defined( __linux__ ) || defined( _WIN32 )