altq: remove ALTQ3_COMPAT code

This code has apparently never compiled on FreeBSD since its
introduction in 2004 (r130365).  It has certainly not compiled
since 2006, when r164033 added #elsif [sic] preprocessor directives.
The code was left in the tree to reduce the diff from upstream (KAME).
Since that upstream is no longer relevant, remove the long-dead code.

This commit is the direct result of:

    unifdef -m -UALTQ3_COMPAT sys/net/altq/*

A later commit will do some manual cleanup.

I do not plan to MFC this.  If that would help you, go for it.
This commit is contained in:
Eric van Gyzen 2018-12-04 23:46:43 +00:00
parent ab8998c257
commit 325fab802e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=341507
14 changed files with 0 additions and 4686 deletions

View File

@ -38,16 +38,6 @@
#define ALTQ3_CLFIER_COMPAT /* for compatibility with altq-3 classifier */
#endif
#ifdef ALTQ3_COMPAT
#include <sys/param.h>
#include <sys/ioccom.h>
#include <sys/queue.h>
#include <netinet/in.h>
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
#endif /* ALTQ3_COMPAT */
/* altq discipline type */
#define ALTQT_NONE 0 /* reserved */
@ -67,12 +57,6 @@
#define ALTQT_CODEL 14 /* CoDel */
#define ALTQT_MAX 15 /* should be max discipline type + 1 */
#ifdef ALTQ3_COMPAT
struct altqreq {
char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */
u_long arg; /* request-specific argument */
};
#endif
/* simple token backet meter profile */
struct tb_profile {
@ -80,85 +64,6 @@ struct tb_profile {
u_int32_t depth; /* depth in bytes */
};
#ifdef ALTQ3_COMPAT
struct tbrreq {
char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */
struct tb_profile tb_prof; /* token bucket profile */
};
#ifdef ALTQ3_CLFIER_COMPAT
/*
* common network flow info structure
*/
struct flowinfo {
u_char fi_len; /* total length */
u_char fi_family; /* address family */
u_int8_t fi_data[46]; /* actually longer; address family
specific flow info. */
};
/*
* flow info structure for internet protocol family.
* (currently this is the only protocol family supported)
*/
struct flowinfo_in {
u_char fi_len; /* sizeof(struct flowinfo_in) */
u_char fi_family; /* AF_INET */
u_int8_t fi_proto; /* IPPROTO_XXX */
u_int8_t fi_tos; /* type-of-service */
struct in_addr fi_dst; /* dest address */
struct in_addr fi_src; /* src address */
u_int16_t fi_dport; /* dest port */
u_int16_t fi_sport; /* src port */
u_int32_t fi_gpi; /* generalized port id for ipsec */
u_int8_t _pad[28]; /* make the size equal to
flowinfo_in6 */
};
#ifdef SIN6_LEN
struct flowinfo_in6 {
u_char fi6_len; /* sizeof(struct flowinfo_in6) */
u_char fi6_family; /* AF_INET6 */
u_int8_t fi6_proto; /* IPPROTO_XXX */
u_int8_t fi6_tclass; /* traffic class */
u_int32_t fi6_flowlabel; /* ipv6 flowlabel */
u_int16_t fi6_dport; /* dest port */
u_int16_t fi6_sport; /* src port */
u_int32_t fi6_gpi; /* generalized port id */
struct in6_addr fi6_dst; /* dest address */
struct in6_addr fi6_src; /* src address */
};
#endif /* INET6 */
/*
* flow filters for AF_INET and AF_INET6
*/
struct flow_filter {
int ff_ruleno;
struct flowinfo_in ff_flow;
struct {
struct in_addr mask_dst;
struct in_addr mask_src;
u_int8_t mask_tos;
u_int8_t _pad[3];
} ff_mask;
u_int8_t _pad2[24]; /* make the size equal to flow_filter6 */
};
#ifdef SIN6_LEN
struct flow_filter6 {
int ff_ruleno;
struct flowinfo_in6 ff_flow6;
struct {
struct in6_addr mask6_dst;
struct in6_addr mask6_src;
u_int8_t mask6_tclass;
u_int8_t _pad[3];
} ff_mask6;
};
#endif /* INET6 */
#endif /* ALTQ3_CLFIER_COMPAT */
#endif /* ALTQ3_COMPAT */
/*
* generic packet counter
@ -171,33 +76,6 @@ struct pktcntr {
#define PKTCNTR_ADD(cntr, len) \
do { (cntr)->packets++; (cntr)->bytes += len; } while (/*CONSTCOND*/ 0)
#ifdef ALTQ3_COMPAT
/*
* altq related ioctls
*/
#define ALTQGTYPE _IOWR('q', 0, struct altqreq) /* get queue type */
#if 0
/*
* these ioctls are currently discipline-specific but could be shared
* in the future.
*/
#define ALTQATTACH _IOW('q', 1, struct altqreq) /* attach discipline */
#define ALTQDETACH _IOW('q', 2, struct altqreq) /* detach discipline */
#define ALTQENABLE _IOW('q', 3, struct altqreq) /* enable discipline */
#define ALTQDISABLE _IOW('q', 4, struct altqreq) /* disable discipline*/
#define ALTQCLEAR _IOW('q', 5, struct altqreq) /* (re)initialize */
#define ALTQCONFIG _IOWR('q', 6, struct altqreq) /* set config params */
#define ALTQADDCLASS _IOWR('q', 7, struct altqreq) /* add a class */
#define ALTQMODCLASS _IOWR('q', 8, struct altqreq) /* modify a class */
#define ALTQDELCLASS _IOWR('q', 9, struct altqreq) /* delete a class */
#define ALTQADDFILTER _IOWR('q', 10, struct altqreq) /* add a filter */
#define ALTQDELFILTER _IOWR('q', 11, struct altqreq) /* delete a filter */
#define ALTQGETSTATS _IOWR('q', 12, struct altqreq) /* get statistics */
#define ALTQGETCNTR _IOWR('q', 13, struct altqreq) /* get a pkt counter */
#endif /* 0 */
#define ALTQTBRSET _IOW('q', 14, struct tbrreq) /* set tb regulator */
#define ALTQTBRGET _IOWR('q', 15, struct tbrreq) /* get tb regulator */
#endif /* ALTQ3_COMPAT */
#ifdef _KERNEL
#include <net/altq/altq_var.h>

View File

@ -44,10 +44,6 @@
#include <sys/proc.h>
#include <sys/errno.h>
#include <sys/time.h>
#ifdef ALTQ3_COMPAT
#include <sys/uio.h>
#include <sys/kernel.h>
#endif
#include <net/if.h>
#include <net/if_var.h>
@ -58,16 +54,7 @@
#include <netpfil/pf/pf_mtag.h>
#include <net/altq/altq.h>
#include <net/altq/altq_cbq.h>
#ifdef ALTQ3_COMPAT
#include <net/altq/altq_conf.h>
#endif
#ifdef ALTQ3_COMPAT
/*
* Local Data structures.
*/
static cbq_state_t *cbq_list = NULL;
#endif
/*
* Forward Declarations.
@ -82,21 +69,6 @@ static struct mbuf *cbq_dequeue(struct ifaltq *, int);
static void cbqrestart(struct ifaltq *);
static void get_class_stats(class_stats_t *, struct rm_class *);
static void cbq_purge(cbq_state_t *);
#ifdef ALTQ3_COMPAT
static int cbq_add_class(struct cbq_add_class *);
static int cbq_delete_class(struct cbq_delete_class *);
static int cbq_modify_class(struct cbq_modify_class *);
static int cbq_class_create(cbq_state_t *, struct cbq_add_class *,
struct rm_class *, struct rm_class *);
static int cbq_clear_hierarchy(struct cbq_interface *);
static int cbq_set_enable(struct cbq_interface *, int);
static int cbq_ifattach(struct cbq_interface *);
static int cbq_ifdetach(struct cbq_interface *);
static int cbq_getstats(struct cbq_getstats *);
static int cbq_add_filter(struct cbq_add_filter *);
static int cbq_delete_filter(struct cbq_delete_filter *);
#endif /* ALTQ3_COMPAT */
/*
* int
@ -123,10 +95,6 @@ cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
cbqp->ifnp.root_ = NULL;
if (cl == cbqp->ifnp.default_)
cbqp->ifnp.default_ = NULL;
#ifdef ALTQ3_COMPAT
if (cl == cbqp->ifnp.ctl_)
cbqp->ifnp.ctl_ = NULL;
#endif
return (0);
}
@ -179,10 +147,6 @@ cbq_clear_interface(cbq_state_t *cbqp)
cbqp->ifnp.root_ = NULL;
if (cl == cbqp->ifnp.default_)
cbqp->ifnp.default_ = NULL;
#ifdef ALTQ3_COMPAT
if (cl == cbqp->ifnp.ctl_)
cbqp->ifnp.ctl_ = NULL;
#endif
}
}
}
@ -512,10 +476,6 @@ cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
cl = NULL;
if ((t = pf_find_mtag(m)) != NULL)
cl = clh_to_clp(cbqp, t->qid);
#ifdef ALTQ3_COMPAT
else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
cl = pktattr->pattr_class;
#endif
if (cl == NULL) {
cl = cbqp->ifnp.default_;
if (cl == NULL) {
@ -523,11 +483,6 @@ cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
return (ENOBUFS);
}
}
#ifdef ALTQ3_COMPAT
if (pktattr != NULL)
cl->pktattr_ = pktattr; /* save proto hdr used by ECN */
else
#endif
cl->pktattr_ = NULL;
len = m_pktlen(m);
if (rmc_queue_packet(cl, m) != 0) {
@ -606,564 +561,5 @@ static void cbq_purge(cbq_state_t *cbqp)
if (ALTQ_IS_ENABLED(cbqp->ifnp.ifq_))
cbqp->ifnp.ifq_->ifq_len = 0;
}
#ifdef ALTQ3_COMPAT
static int
cbq_add_class(acp)
struct cbq_add_class *acp;
{
char *ifacename;
struct rm_class *borrow, *parent;
cbq_state_t *cbqp;
ifacename = acp->cbq_iface.cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
/* check parameters */
if (acp->cbq_class.priority >= CBQ_MAXPRI ||
acp->cbq_class.maxq > CBQ_MAXQSIZE)
return (EINVAL);
/* Get pointers to parent and borrow classes. */
parent = clh_to_clp(cbqp, acp->cbq_class.parent_class_handle);
borrow = clh_to_clp(cbqp, acp->cbq_class.borrow_class_handle);
/*
* A class must borrow from it's parent or it can not
* borrow at all. Hence, borrow can be null.
*/
if (parent == NULL && (acp->cbq_class.flags & CBQCLF_ROOTCLASS) == 0) {
printf("cbq_add_class: no parent class!\n");
return (EINVAL);
}
if ((borrow != parent) && (borrow != NULL)) {
printf("cbq_add_class: borrow class != parent\n");
return (EINVAL);
}
return cbq_class_create(cbqp, acp, parent, borrow);
}
static int
cbq_delete_class(dcp)
struct cbq_delete_class *dcp;
{
char *ifacename;
struct rm_class *cl;
cbq_state_t *cbqp;
ifacename = dcp->cbq_iface.cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
if ((cl = clh_to_clp(cbqp, dcp->cbq_class_handle)) == NULL)
return (EINVAL);
/* if we are a parent class, then return an error. */
if (is_a_parent_class(cl))
return (EINVAL);
/* if a filter has a reference to this class delete the filter */
acc_discard_filters(&cbqp->cbq_classifier, cl, 0);
return cbq_class_destroy(cbqp, cl);
}
static int
cbq_modify_class(acp)
struct cbq_modify_class *acp;
{
char *ifacename;
struct rm_class *cl;
cbq_state_t *cbqp;
ifacename = acp->cbq_iface.cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
/* Get pointer to this class */
if ((cl = clh_to_clp(cbqp, acp->cbq_class_handle)) == NULL)
return (EINVAL);
if (rmc_modclass(cl, acp->cbq_class.nano_sec_per_byte,
acp->cbq_class.maxq, acp->cbq_class.maxidle,
acp->cbq_class.minidle, acp->cbq_class.offtime,
acp->cbq_class.pktsize) < 0)
return (EINVAL);
return (0);
}
/*
* struct rm_class *
* cbq_class_create(cbq_mod_state_t *cbqp, struct cbq_add_class *acp,
* struct rm_class *parent, struct rm_class *borrow)
*
* This function create a new traffic class in the CBQ class hierarchy of
* given parameters. The class that created is either the root, default,
* or a new dynamic class. If CBQ is not initilaized, the root class
* will be created.
*/
static int
cbq_class_create(cbqp, acp, parent, borrow)
cbq_state_t *cbqp;
struct cbq_add_class *acp;
struct rm_class *parent, *borrow;
{
struct rm_class *cl;
cbq_class_spec_t *spec = &acp->cbq_class;
u_int32_t chandle;
int i;
/*
* allocate class handle
*/
for (i = 1; i < CBQ_MAX_CLASSES; i++)
if (cbqp->cbq_class_tbl[i] == NULL)
break;
if (i == CBQ_MAX_CLASSES)
return (EINVAL);
chandle = i; /* use the slot number as class handle */
/*
* create a class. if this is a root class, initialize the
* interface.
*/
if ((spec->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, spec->nano_sec_per_byte,
cbqrestart, spec->maxq, RM_MAXQUEUED,
spec->maxidle, spec->minidle, spec->offtime,
spec->flags);
cl = cbqp->ifnp.root_;
} else {
cl = rmc_newclass(spec->priority,
&cbqp->ifnp, spec->nano_sec_per_byte,
rmc_delay_action, spec->maxq, parent, borrow,
spec->maxidle, spec->minidle, spec->offtime,
spec->pktsize, spec->flags);
}
if (cl == NULL)
return (ENOMEM);
/* return handle to user space. */
acp->cbq_class_handle = chandle;
cl->stats_.handle = chandle;
cl->stats_.depth = cl->depth_;
/* save the allocated class */
cbqp->cbq_class_tbl[i] = cl;
if ((spec->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
cbqp->ifnp.default_ = cl;
if ((spec->flags & CBQCLF_CLASSMASK) == CBQCLF_CTLCLASS)
cbqp->ifnp.ctl_ = cl;
return (0);
}
static int
cbq_add_filter(afp)
struct cbq_add_filter *afp;
{
char *ifacename;
cbq_state_t *cbqp;
struct rm_class *cl;
ifacename = afp->cbq_iface.cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
/* Get the pointer to class. */
if ((cl = clh_to_clp(cbqp, afp->cbq_class_handle)) == NULL)
return (EINVAL);
return acc_add_filter(&cbqp->cbq_classifier, &afp->cbq_filter,
cl, &afp->cbq_filter_handle);
}
static int
cbq_delete_filter(dfp)
struct cbq_delete_filter *dfp;
{
char *ifacename;
cbq_state_t *cbqp;
ifacename = dfp->cbq_iface.cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
return acc_delete_filter(&cbqp->cbq_classifier,
dfp->cbq_filter_handle);
}
/*
* cbq_clear_hierarchy deletes all classes and their filters on the
* given interface.
*/
static int
cbq_clear_hierarchy(ifacep)
struct cbq_interface *ifacep;
{
char *ifacename;
cbq_state_t *cbqp;
ifacename = ifacep->cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
return cbq_clear_interface(cbqp);
}
/*
* static int
* cbq_set_enable(struct cbq_enable *ep) - this function processed the
* ioctl request to enable class based queueing. It searches the list
* of interfaces for the specified interface and then enables CBQ on
* that interface.
*
* Returns: 0, for no error.
* EBADF, for specified inteface not found.
*/
static int
cbq_set_enable(ep, enable)
struct cbq_interface *ep;
int enable;
{
int error = 0;
cbq_state_t *cbqp;
char *ifacename;
ifacename = ep->cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
switch (enable) {
case ENABLE:
if (cbqp->ifnp.root_ == NULL || cbqp->ifnp.default_ == NULL ||
cbqp->ifnp.ctl_ == NULL) {
if (cbqp->ifnp.root_ == NULL)
printf("No Root Class for %s\n", ifacename);
if (cbqp->ifnp.default_ == NULL)
printf("No Default Class for %s\n", ifacename);
if (cbqp->ifnp.ctl_ == NULL)
printf("No Control Class for %s\n", ifacename);
error = EINVAL;
} else if ((error = altq_enable(cbqp->ifnp.ifq_)) == 0) {
cbqp->cbq_qlen = 0;
}
break;
case DISABLE:
error = altq_disable(cbqp->ifnp.ifq_);
break;
}
return (error);
}
static int
cbq_getstats(gsp)
struct cbq_getstats *gsp;
{
char *ifacename;
int i, n, nclasses;
cbq_state_t *cbqp;
struct rm_class *cl;
class_stats_t stats, *usp;
int error = 0;
ifacename = gsp->iface.cbq_ifacename;
nclasses = gsp->nclasses;
usp = gsp->stats;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
if (nclasses <= 0)
return (EINVAL);
for (n = 0, i = 0; n < nclasses && i < CBQ_MAX_CLASSES; n++, i++) {
while ((cl = cbqp->cbq_class_tbl[i]) == NULL)
if (++i >= CBQ_MAX_CLASSES)
goto out;
get_class_stats(&stats, cl);
stats.handle = cl->stats_.handle;
if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
sizeof(stats))) != 0)
return (error);
}
out:
gsp->nclasses = n;
return (error);
}
static int
cbq_ifattach(ifacep)
struct cbq_interface *ifacep;
{
int error = 0;
char *ifacename;
cbq_state_t *new_cbqp;
struct ifnet *ifp;
ifacename = ifacep->cbq_ifacename;
if ((ifp = ifunit(ifacename)) == NULL)
return (ENXIO);
if (!ALTQ_IS_READY(&ifp->if_snd))
return (ENXIO);
/* allocate and initialize cbq_state_t */
new_cbqp = malloc(sizeof(cbq_state_t), M_DEVBUF, M_WAITOK);
if (new_cbqp == NULL)
return (ENOMEM);
bzero(new_cbqp, sizeof(cbq_state_t));
CALLOUT_INIT(&new_cbqp->cbq_callout);
new_cbqp->cbq_qlen = 0;
new_cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
/*
* set CBQ to this ifnet structure.
*/
error = altq_attach(&ifp->if_snd, ALTQT_CBQ, new_cbqp,
cbq_enqueue, cbq_dequeue, cbq_request,
&new_cbqp->cbq_classifier, acc_classify);
if (error) {
free(new_cbqp, M_DEVBUF);
return (error);
}
/* prepend to the list of cbq_state_t's. */
new_cbqp->cbq_next = cbq_list;
cbq_list = new_cbqp;
return (0);
}
static int
cbq_ifdetach(ifacep)
struct cbq_interface *ifacep;
{
char *ifacename;
cbq_state_t *cbqp;
ifacename = ifacep->cbq_ifacename;
if ((cbqp = altq_lookup(ifacename, ALTQT_CBQ)) == NULL)
return (EBADF);
(void)cbq_set_enable(ifacep, DISABLE);
cbq_clear_interface(cbqp);
/* remove CBQ from the ifnet structure. */
(void)altq_detach(cbqp->ifnp.ifq_);
/* remove from the list of cbq_state_t's. */
if (cbq_list == cbqp)
cbq_list = cbqp->cbq_next;
else {
cbq_state_t *cp;
for (cp = cbq_list; cp != NULL; cp = cp->cbq_next)
if (cp->cbq_next == cbqp) {
cp->cbq_next = cbqp->cbq_next;
break;
}
ASSERT(cp != NULL);
}
/* deallocate cbq_state_t */
free(cbqp, M_DEVBUF);
return (0);
}
/*
* cbq device interface
*/
altqdev_decl(cbq);
int
cbqopen(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
return (0);
}
int
cbqclose(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
struct ifnet *ifp;
struct cbq_interface iface;
int err, error = 0;
while (cbq_list) {
ifp = cbq_list->ifnp.ifq_->altq_ifp;
sprintf(iface.cbq_ifacename, "%s", ifp->if_xname);
err = cbq_ifdetach(&iface);
if (err != 0 && error == 0)
error = err;
}
return (error);
}
int
cbqioctl(dev, cmd, addr, flag, p)
dev_t dev;
ioctlcmd_t cmd;
caddr_t addr;
int flag;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
int error = 0;
/* check cmd for superuser only */
switch (cmd) {
case CBQ_GETSTATS:
/* currently only command that an ordinary user can call */
break;
default:
#if (__FreeBSD_version > 700000)
error = priv_check(p, PRIV_ALTQ_MANAGE);
#elsif (__FreeBSD_version > 400000)
error = suser(p);
#else
error = suser(p->p_ucred, &p->p_acflag);
#endif
if (error)
return (error);
break;
}
switch (cmd) {
case CBQ_ENABLE:
error = cbq_set_enable((struct cbq_interface *)addr, ENABLE);
break;
case CBQ_DISABLE:
error = cbq_set_enable((struct cbq_interface *)addr, DISABLE);
break;
case CBQ_ADD_FILTER:
error = cbq_add_filter((struct cbq_add_filter *)addr);
break;
case CBQ_DEL_FILTER:
error = cbq_delete_filter((struct cbq_delete_filter *)addr);
break;
case CBQ_ADD_CLASS:
error = cbq_add_class((struct cbq_add_class *)addr);
break;
case CBQ_DEL_CLASS:
error = cbq_delete_class((struct cbq_delete_class *)addr);
break;
case CBQ_MODIFY_CLASS:
error = cbq_modify_class((struct cbq_modify_class *)addr);
break;
case CBQ_CLEAR_HIERARCHY:
error = cbq_clear_hierarchy((struct cbq_interface *)addr);
break;
case CBQ_IF_ATTACH:
error = cbq_ifattach((struct cbq_interface *)addr);
break;
case CBQ_IF_DETACH:
error = cbq_ifdetach((struct cbq_interface *)addr);
break;
case CBQ_GETSTATS:
error = cbq_getstats((struct cbq_getstats *)addr);
break;
default:
error = EINVAL;
break;
}
return error;
}
#if 0
/* for debug */
static void cbq_class_dump(int);
static void cbq_class_dump(i)
int i;
{
struct rm_class *cl;
rm_class_stats_t *s;
struct _class_queue_ *q;
if (cbq_list == NULL) {
printf("cbq_class_dump: no cbq_state found\n");
return;
}
cl = cbq_list->cbq_class_tbl[i];
printf("class %d cl=%p\n", i, cl);
if (cl != NULL) {
s = &cl->stats_;
q = cl->q_;
printf("pri=%d, depth=%d, maxrate=%d, allotment=%d\n",
cl->pri_, cl->depth_, cl->maxrate_, cl->allotment_);
printf("w_allotment=%d, bytes_alloc=%d, avgidle=%d, maxidle=%d\n",
cl->w_allotment_, cl->bytes_alloc_, cl->avgidle_,
cl->maxidle_);
printf("minidle=%d, offtime=%d, sleeping=%d, leaf=%d\n",
cl->minidle_, cl->offtime_, cl->sleeping_, cl->leaf_);
printf("handle=%d, depth=%d, packets=%d, bytes=%d\n",
s->handle, s->depth,
(int)s->xmit_cnt.packets, (int)s->xmit_cnt.bytes);
printf("over=%d\n, borrows=%d, drops=%d, overactions=%d, delays=%d\n",
s->over, s->borrows, (int)s->drop_cnt.packets,
s->overactions, s->delays);
printf("tail=%p, head=%p, qlen=%d, qlim=%d, qthresh=%d,qtype=%d\n",
q->tail_, q->head_, q->qlen_, q->qlim_,
q->qthresh_, q->qtype_);
}
}
#endif /* 0 */
#ifdef KLD_MODULE
static struct altqsw cbq_sw =
{"cbq", cbqopen, cbqclose, cbqioctl};
ALTQ_MODULE(altq_cbq, ALTQT_CBQ, &cbq_sw);
MODULE_DEPEND(altq_cbq, altq_red, 1, 1, 1);
MODULE_DEPEND(altq_cbq, altq_rio, 1, 1, 1);
#endif /* KLD_MODULE */
#endif /* ALTQ3_COMPAT */
#endif /* ALTQ_CBQ */

View File

@ -71,9 +71,6 @@ CTASSERT(CBQCLF_CODEL == RMCF_CODEL);
/* class flags for special classes */
#define CBQCLF_ROOTCLASS 0x1000 /* root class */
#define CBQCLF_DEFCLASS 0x2000 /* default class */
#ifdef ALTQ3_COMPAT
#define CBQCLF_CTLCLASS 0x4000 /* control class */
#endif
#define CBQCLF_CLASSMASK 0xf000 /* class mask */
#define CBQ_MAXQSIZE 200
@ -114,88 +111,6 @@ typedef struct _cbq_class_stats_ {
* header.
*/
#ifdef ALTQ3_COMPAT
/*
* Define structures associated with IOCTLS for cbq.
*/
/*
* Define the CBQ interface structure. This must be included in all
* IOCTL's such that the CBQ driver may find the appropriate CBQ module
* associated with the network interface to be affected.
*/
struct cbq_interface {
char cbq_ifacename[IFNAMSIZ];
};
typedef struct cbq_class_spec {
u_int priority;
u_int nano_sec_per_byte;
u_int maxq;
u_int maxidle;
int minidle;
u_int offtime;
u_int32_t parent_class_handle;
u_int32_t borrow_class_handle;
u_int pktsize;
int flags;
} cbq_class_spec_t;
struct cbq_add_class {
struct cbq_interface cbq_iface;
cbq_class_spec_t cbq_class;
u_int32_t cbq_class_handle;
};
struct cbq_delete_class {
struct cbq_interface cbq_iface;
u_int32_t cbq_class_handle;
};
struct cbq_modify_class {
struct cbq_interface cbq_iface;
cbq_class_spec_t cbq_class;
u_int32_t cbq_class_handle;
};
struct cbq_add_filter {
struct cbq_interface cbq_iface;
u_int32_t cbq_class_handle;
struct flow_filter cbq_filter;
u_long cbq_filter_handle;
};
struct cbq_delete_filter {
struct cbq_interface cbq_iface;
u_long cbq_filter_handle;
};
/* number of classes are returned in nclasses field */
struct cbq_getstats {
struct cbq_interface iface;
int nclasses;
class_stats_t *stats;
};
/*
* Define IOCTLs for CBQ.
*/
#define CBQ_IF_ATTACH _IOW('Q', 1, struct cbq_interface)
#define CBQ_IF_DETACH _IOW('Q', 2, struct cbq_interface)
#define CBQ_ENABLE _IOW('Q', 3, struct cbq_interface)
#define CBQ_DISABLE _IOW('Q', 4, struct cbq_interface)
#define CBQ_CLEAR_HIERARCHY _IOW('Q', 5, struct cbq_interface)
#define CBQ_ADD_CLASS _IOWR('Q', 7, struct cbq_add_class)
#define CBQ_DEL_CLASS _IOW('Q', 8, struct cbq_delete_class)
#define CBQ_MODIFY_CLASS _IOWR('Q', 9, struct cbq_modify_class)
#define CBQ_ADD_FILTER _IOWR('Q', 10, struct cbq_add_filter)
#define CBQ_DEL_FILTER _IOW('Q', 11, struct cbq_delete_filter)
#define CBQ_GETSTATS _IOWR('Q', 12, struct cbq_getstats)
#endif /* ALTQ3_COMPAT */
#ifdef _KERNEL
/*
@ -207,20 +122,11 @@ struct cbq_getstats {
#define CBQ_MAX_CLASSES 256
#ifdef ALTQ3_COMPAT
#define CBQ_MAX_FILTERS 256
#define DISABLE 0x00
#define ENABLE 0x01
#endif /* ALTQ3_COMPAT */
/*
* Define State structures.
*/
typedef struct cbqstate {
#ifdef ALTQ3_COMPAT
struct cbqstate *cbq_next;
#endif
int cbq_qlen; /* # of packets in cbq */
struct rm_class *cbq_class_tbl[CBQ_MAX_CLASSES];

File diff suppressed because it is too large Load Diff

View File

@ -70,9 +70,6 @@
#include <netpfil/pf/pf_mtag.h>
#include <net/altq/altq.h>
#include <net/altq/altq_hfsc.h>
#ifdef ALTQ3_COMPAT
#include <net/altq/altq_conf.h>
#endif
/*
* function prototypes
@ -137,23 +134,6 @@ static void get_class_stats_v1(struct hfsc_classstats_v1 *,
static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
#ifdef ALTQ3_COMPAT
static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
static int hfsc_detach(struct hfsc_if *);
static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
struct service_curve *, struct service_curve *);
static int hfsccmd_if_attach(struct hfsc_attach *);
static int hfsccmd_if_detach(struct hfsc_interface *);
static int hfsccmd_add_class(struct hfsc_add_class *);
static int hfsccmd_delete_class(struct hfsc_delete_class *);
static int hfsccmd_modify_class(struct hfsc_modify_class *);
static int hfsccmd_add_filter(struct hfsc_add_filter *);
static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
static int hfsccmd_class_stats(struct hfsc_class_stats *);
altqdev_decl(hfsc);
#endif /* ALTQ3_COMPAT */
/*
* macros
@ -162,10 +142,6 @@ altqdev_decl(hfsc);
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
#ifdef ALTQ3_COMPAT
/* hif_list keeps all hfsc_if's allocated. */
static struct hfsc_if *hif_list = NULL;
#endif /* ALTQ3_COMPAT */
int
hfsc_pfattach(struct pf_altq *a)
@ -332,10 +308,6 @@ hfsc_clear_interface(struct hfsc_if *hif)
{
struct hfsc_class *cl;
#ifdef ALTQ3_COMPAT
/* free the filters for this interface */
acc_discard_filters(&hif->hif_classifier, NULL, 1);
#endif
/* clear out the classes */
while (hif->hif_rootclass != NULL &&
@ -597,10 +569,6 @@ hfsc_class_destroy(struct hfsc_class *cl)
s = splnet();
IFQ_LOCK(cl->cl_hif->hif_ifq);
#ifdef ALTQ3_COMPAT
/* delete filters referencing to this class */
acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
#endif /* ALTQ3_COMPAT */
if (!qempty(cl->cl_q))
hfsc_purgeq(cl);
@ -714,10 +682,6 @@ hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
cl = NULL;
if ((t = pf_find_mtag(m)) != NULL)
cl = clh_to_clp(hif, t->qid);
#ifdef ALTQ3_COMPAT
else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
cl = pktattr->pattr_class;
#endif
if (cl == NULL || is_a_parent_class(cl)) {
cl = hif->hif_defaultclass;
if (cl == NULL) {
@ -725,11 +689,6 @@ hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
return (ENOBUFS);
}
}
#ifdef ALTQ3_COMPAT
if (pktattr != NULL)
cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
else
#endif
cl->cl_pktattr = NULL;
len = m_pktlen(m);
if (hfsc_addq(cl, m) != 0) {
@ -1788,542 +1747,5 @@ clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
return (NULL);
}
#ifdef ALTQ3_COMPAT
static struct hfsc_if *
hfsc_attach(ifq, bandwidth)
struct ifaltq *ifq;
u_int bandwidth;
{
struct hfsc_if *hif;
hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
if (hif == NULL)
return (NULL);
bzero(hif, sizeof(struct hfsc_if));
hif->hif_eligible = ellist_alloc();
if (hif->hif_eligible == NULL) {
free(hif, M_DEVBUF);
return NULL;
}
hif->hif_ifq = ifq;
/* add this state to the hfsc list */
hif->hif_next = hif_list;
hif_list = hif;
return (hif);
}
static int
hfsc_detach(hif)
struct hfsc_if *hif;
{
(void)hfsc_clear_interface(hif);
(void)hfsc_class_destroy(hif->hif_rootclass);
/* remove this interface from the hif list */
if (hif_list == hif)
hif_list = hif->hif_next;
else {
struct hfsc_if *h;
for (h = hif_list; h != NULL; h = h->hif_next)
if (h->hif_next == hif) {
h->hif_next = hif->hif_next;
break;
}
ASSERT(h != NULL);
}
ellist_destroy(hif->hif_eligible);
free(hif, M_DEVBUF);
return (0);
}
static int
hfsc_class_modify(cl, rsc, fsc, usc)
struct hfsc_class *cl;
struct service_curve *rsc, *fsc, *usc;
{
struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
u_int64_t cur_time;
int s;
rsc_tmp = fsc_tmp = usc_tmp = NULL;
if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
cl->cl_rsc == NULL) {
rsc_tmp = malloc(sizeof(struct internal_sc),
M_DEVBUF, M_WAITOK);
if (rsc_tmp == NULL)
return (ENOMEM);
}
if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
cl->cl_fsc == NULL) {
fsc_tmp = malloc(sizeof(struct internal_sc),
M_DEVBUF, M_WAITOK);
if (fsc_tmp == NULL) {
free(rsc_tmp);
return (ENOMEM);
}
}
if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
cl->cl_usc == NULL) {
usc_tmp = malloc(sizeof(struct internal_sc),
M_DEVBUF, M_WAITOK);
if (usc_tmp == NULL) {
free(rsc_tmp);
free(fsc_tmp);
return (ENOMEM);
}
}
cur_time = read_machclk();
s = splnet();
IFQ_LOCK(cl->cl_hif->hif_ifq);
if (rsc != NULL) {
if (rsc->m1 == 0 && rsc->m2 == 0) {
if (cl->cl_rsc != NULL) {
if (!qempty(cl->cl_q))
hfsc_purgeq(cl);
free(cl->cl_rsc, M_DEVBUF);
cl->cl_rsc = NULL;
}
} else {
if (cl->cl_rsc == NULL)
cl->cl_rsc = rsc_tmp;
sc2isc(rsc, cl->cl_rsc);
rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
cl->cl_cumul);
cl->cl_eligible = cl->cl_deadline;
if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
cl->cl_eligible.dx = 0;
cl->cl_eligible.dy = 0;
}
}
}
if (fsc != NULL) {
if (fsc->m1 == 0 && fsc->m2 == 0) {
if (cl->cl_fsc != NULL) {
if (!qempty(cl->cl_q))
hfsc_purgeq(cl);
free(cl->cl_fsc, M_DEVBUF);
cl->cl_fsc = NULL;
}
} else {
if (cl->cl_fsc == NULL)
cl->cl_fsc = fsc_tmp;
sc2isc(fsc, cl->cl_fsc);
rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
cl->cl_total);
}
}
if (usc != NULL) {
if (usc->m1 == 0 && usc->m2 == 0) {
if (cl->cl_usc != NULL) {
free(cl->cl_usc, M_DEVBUF);
cl->cl_usc = NULL;
cl->cl_myf = 0;
}
} else {
if (cl->cl_usc == NULL)
cl->cl_usc = usc_tmp;
sc2isc(usc, cl->cl_usc);
rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
cl->cl_total);
}
}
if (!qempty(cl->cl_q)) {
if (cl->cl_rsc != NULL)
update_ed(cl, m_pktlen(qhead(cl->cl_q)));
if (cl->cl_fsc != NULL)
update_vf(cl, 0, cur_time);
/* is this enough? */
}
IFQ_UNLOCK(cl->cl_hif->hif_ifq);
splx(s);
return (0);
}
/*
* hfsc device interface
*/
int
hfscopen(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
if (machclk_freq == 0)
init_machclk();
if (machclk_freq == 0) {
printf("hfsc: no cpu clock available!\n");
return (ENXIO);
}
/* everything will be done when the queueing scheme is attached. */
return 0;
}
int
hfscclose(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
struct hfsc_if *hif;
int err, error = 0;
while ((hif = hif_list) != NULL) {
/* destroy all */
if (ALTQ_IS_ENABLED(hif->hif_ifq))
altq_disable(hif->hif_ifq);
err = altq_detach(hif->hif_ifq);
if (err == 0)
err = hfsc_detach(hif);
if (err != 0 && error == 0)
error = err;
}
return error;
}
int
hfscioctl(dev, cmd, addr, flag, p)
dev_t dev;
ioctlcmd_t cmd;
caddr_t addr;
int flag;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
struct hfsc_if *hif;
struct hfsc_interface *ifacep;
int error = 0;
/* check super-user privilege */
switch (cmd) {
case HFSC_GETSTATS:
break;
default:
#if (__FreeBSD_version > 700000)
if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
return (error);
#elsif (__FreeBSD_version > 400000)
if ((error = suser(p)) != 0)
return (error);
#else
if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
return (error);
#endif
break;
}
switch (cmd) {
case HFSC_IF_ATTACH:
error = hfsccmd_if_attach((struct hfsc_attach *)addr);
break;
case HFSC_IF_DETACH:
error = hfsccmd_if_detach((struct hfsc_interface *)addr);
break;
case HFSC_ENABLE:
case HFSC_DISABLE:
case HFSC_CLEAR_HIERARCHY:
ifacep = (struct hfsc_interface *)addr;
if ((hif = altq_lookup(ifacep->hfsc_ifname,
ALTQT_HFSC)) == NULL) {
error = EBADF;
break;
}
switch (cmd) {
case HFSC_ENABLE:
if (hif->hif_defaultclass == NULL) {
#ifdef ALTQ_DEBUG
printf("hfsc: no default class\n");
#endif
error = EINVAL;
break;
}
error = altq_enable(hif->hif_ifq);
break;
case HFSC_DISABLE:
error = altq_disable(hif->hif_ifq);
break;
case HFSC_CLEAR_HIERARCHY:
hfsc_clear_interface(hif);
break;
}
break;
case HFSC_ADD_CLASS:
error = hfsccmd_add_class((struct hfsc_add_class *)addr);
break;
case HFSC_DEL_CLASS:
error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
break;
case HFSC_MOD_CLASS:
error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
break;
case HFSC_ADD_FILTER:
error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
break;
case HFSC_DEL_FILTER:
error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
break;
case HFSC_GETSTATS:
error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
break;
default:
error = EINVAL;
break;
}
return error;
}
static int
hfsccmd_if_attach(ap)
struct hfsc_attach *ap;
{
struct hfsc_if *hif;
struct ifnet *ifp;
int error;
if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
return (ENXIO);
if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
return (ENOMEM);
/*
* set HFSC to this ifnet structure.
*/
if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
hfsc_enqueue, hfsc_dequeue, hfsc_request,
&hif->hif_classifier, acc_classify)) != 0)
(void)hfsc_detach(hif);
return (error);
}
static int
hfsccmd_if_detach(ap)
struct hfsc_interface *ap;
{
struct hfsc_if *hif;
int error;
if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
if (ALTQ_IS_ENABLED(hif->hif_ifq))
altq_disable(hif->hif_ifq);
if ((error = altq_detach(hif->hif_ifq)))
return (error);
return hfsc_detach(hif);
}
static int
hfsccmd_add_class(ap)
struct hfsc_add_class *ap;
{
struct hfsc_if *hif;
struct hfsc_class *cl, *parent;
int i;
if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
hif->hif_rootclass == NULL)
parent = NULL;
else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
return (EINVAL);
/* assign a class handle (use a free slot number for now) */
for (i = 1; i < HFSC_MAX_CLASSES; i++)
if (hif->hif_class_tbl[i] == NULL)
break;
if (i == HFSC_MAX_CLASSES)
return (EBUSY);
if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
parent, ap->qlimit, ap->flags, i)) == NULL)
return (ENOMEM);
/* return a class handle to the user */
ap->class_handle = i;
return (0);
}
static int
hfsccmd_delete_class(ap)
struct hfsc_delete_class *ap;
{
struct hfsc_if *hif;
struct hfsc_class *cl;
if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
return (EINVAL);
return hfsc_class_destroy(cl);
}
static int
hfsccmd_modify_class(ap)
struct hfsc_modify_class *ap;
{
struct hfsc_if *hif;
struct hfsc_class *cl;
struct service_curve *rsc = NULL;
struct service_curve *fsc = NULL;
struct service_curve *usc = NULL;
if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
return (EINVAL);
if (ap->sctype & HFSC_REALTIMESC)
rsc = &ap->service_curve;
if (ap->sctype & HFSC_LINKSHARINGSC)
fsc = &ap->service_curve;
if (ap->sctype & HFSC_UPPERLIMITSC)
usc = &ap->service_curve;
return hfsc_class_modify(cl, rsc, fsc, usc);
}
static int
hfsccmd_add_filter(ap)
struct hfsc_add_filter *ap;
{
struct hfsc_if *hif;
struct hfsc_class *cl;
if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
return (EINVAL);
if (is_a_parent_class(cl)) {
#ifdef ALTQ_DEBUG
printf("hfsccmd_add_filter: not a leaf class!\n");
#endif
return (EINVAL);
}
return acc_add_filter(&hif->hif_classifier, &ap->filter,
cl, &ap->filter_handle);
}
static int
hfsccmd_delete_filter(ap)
struct hfsc_delete_filter *ap;
{
struct hfsc_if *hif;
if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
return acc_delete_filter(&hif->hif_classifier,
ap->filter_handle);
}
static int
hfsccmd_class_stats(ap)
struct hfsc_class_stats *ap;
{
struct hfsc_if *hif;
struct hfsc_class *cl;
struct hfsc_classstats stats, *usp;
int n, nclasses, error;
if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
return (EBADF);
ap->cur_time = read_machclk();
ap->machclk_freq = machclk_freq;
ap->hif_classes = hif->hif_classes;
ap->hif_packets = hif->hif_packets;
/* skip the first N classes in the tree */
nclasses = ap->nskip;
for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
cl = hfsc_nextclass(cl), n++)
;
if (n != nclasses)
return (EINVAL);
/* then, read the next N classes in the tree */
nclasses = ap->nclasses;
usp = ap->stats;
for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
get_class_stats(&stats, cl);
if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
sizeof(stats))) != 0)
return (error);
}
ap->nclasses = n;
return (0);
}
#ifdef KLD_MODULE
static struct altqsw hfsc_sw =
{"hfsc", hfscopen, hfscclose, hfscioctl};
ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
#endif /* KLD_MODULE */
#endif /* ALTQ3_COMPAT */
#endif /* ALTQ_HFSC */

View File

@ -168,74 +168,6 @@ struct hfsc_classstats_v1 {
* header.
*/
#ifdef ALTQ3_COMPAT
struct hfsc_interface {
char hfsc_ifname[IFNAMSIZ]; /* interface name (e.g., fxp0) */
};
struct hfsc_attach {
struct hfsc_interface iface;
u_int bandwidth; /* link bandwidth in bits/sec */
};
struct hfsc_add_class {
struct hfsc_interface iface;
u_int32_t parent_handle;
struct service_curve service_curve;
int qlimit;
int flags;
u_int32_t class_handle; /* return value */
};
struct hfsc_delete_class {
struct hfsc_interface iface;
u_int32_t class_handle;
};
struct hfsc_modify_class {
struct hfsc_interface iface;
u_int32_t class_handle;
struct service_curve service_curve;
int sctype;
};
struct hfsc_add_filter {
struct hfsc_interface iface;
u_int32_t class_handle;
struct flow_filter filter;
u_long filter_handle; /* return value */
};
struct hfsc_delete_filter {
struct hfsc_interface iface;
u_long filter_handle;
};
struct hfsc_class_stats {
struct hfsc_interface iface;
int nskip; /* skip # of classes */
int nclasses; /* # of class stats (WR) */
u_int64_t cur_time; /* current time */
u_int32_t machclk_freq; /* machine clock frequency */
u_int hif_classes; /* # of classes in the tree */
u_int hif_packets; /* # of packets in the tree */
struct hfsc_classstats *stats; /* pointer to stats array */
};
#define HFSC_IF_ATTACH _IOW('Q', 1, struct hfsc_attach)
#define HFSC_IF_DETACH _IOW('Q', 2, struct hfsc_interface)
#define HFSC_ENABLE _IOW('Q', 3, struct hfsc_interface)
#define HFSC_DISABLE _IOW('Q', 4, struct hfsc_interface)
#define HFSC_CLEAR_HIERARCHY _IOW('Q', 5, struct hfsc_interface)
#define HFSC_ADD_CLASS _IOWR('Q', 7, struct hfsc_add_class)
#define HFSC_DEL_CLASS _IOW('Q', 8, struct hfsc_delete_class)
#define HFSC_MOD_CLASS _IOW('Q', 9, struct hfsc_modify_class)
#define HFSC_ADD_FILTER _IOWR('Q', 10, struct hfsc_add_filter)
#define HFSC_DEL_FILTER _IOW('Q', 11, struct hfsc_delete_filter)
#define HFSC_GETSTATS _IOWR('Q', 12, struct hfsc_class_stats)
#endif /* ALTQ3_COMPAT */
#ifdef _KERNEL
/*

View File

@ -55,18 +55,11 @@
#include <netpfil/pf/pf_altq.h>
#include <netpfil/pf/pf_mtag.h>
#include <net/altq/altq.h>
#ifdef ALTQ3_COMPAT
#include <net/altq/altq_conf.h>
#endif
#include <net/altq/altq_priq.h>
/*
* function prototypes
*/
#ifdef ALTQ3_COMPAT
static struct priq_if *priq_attach(struct ifaltq *, u_int);
static int priq_detach(struct priq_if *);
#endif
static int priq_clear_interface(struct priq_if *);
static int priq_request(struct ifaltq *, int, void *);
static void priq_purge(struct priq_if *);
@ -81,26 +74,10 @@ static struct mbuf *priq_getq(struct priq_class *);
static struct mbuf *priq_pollq(struct priq_class *);
static void priq_purgeq(struct priq_class *);
#ifdef ALTQ3_COMPAT
static int priqcmd_if_attach(struct priq_interface *);
static int priqcmd_if_detach(struct priq_interface *);
static int priqcmd_add_class(struct priq_add_class *);
static int priqcmd_delete_class(struct priq_delete_class *);
static int priqcmd_modify_class(struct priq_modify_class *);
static int priqcmd_add_filter(struct priq_add_filter *);
static int priqcmd_delete_filter(struct priq_delete_filter *);
static int priqcmd_class_stats(struct priq_class_stats *);
#endif /* ALTQ3_COMPAT */
static void get_class_stats(struct priq_classstats *, struct priq_class *);
static struct priq_class *clh_to_clp(struct priq_if *, u_int32_t);
#ifdef ALTQ3_COMPAT
altqdev_decl(priq);
/* pif_list keeps all priq_if's allocated. */
static struct priq_if *pif_list = NULL;
#endif /* ALTQ3_COMPAT */
int
priq_pfattach(struct pf_altq *a)
@ -489,10 +466,6 @@ priq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
cl = NULL;
if ((t = pf_find_mtag(m)) != NULL)
cl = clh_to_clp(pif, t->qid);
#ifdef ALTQ3_COMPAT
else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
cl = pktattr->pattr_class;
#endif
if (cl == NULL) {
cl = pif->pif_default;
if (cl == NULL) {
@ -500,11 +473,6 @@ priq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
return (ENOBUFS);
}
}
#ifdef ALTQ3_COMPAT
if (pktattr != NULL)
cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
else
#endif
cl->cl_pktattr = NULL;
len = m_pktlen(m);
if (priq_addq(cl, m) != 0) {
@ -674,397 +642,4 @@ clh_to_clp(struct priq_if *pif, u_int32_t chandle)
}
#ifdef ALTQ3_COMPAT
static struct priq_if *
priq_attach(ifq, bandwidth)
struct ifaltq *ifq;
u_int bandwidth;
{
struct priq_if *pif;
pif = malloc(sizeof(struct priq_if),
M_DEVBUF, M_WAITOK);
if (pif == NULL)
return (NULL);
bzero(pif, sizeof(struct priq_if));
pif->pif_bandwidth = bandwidth;
pif->pif_maxpri = -1;
pif->pif_ifq = ifq;
/* add this state to the priq list */
pif->pif_next = pif_list;
pif_list = pif;
return (pif);
}
static int
priq_detach(pif)
struct priq_if *pif;
{
(void)priq_clear_interface(pif);
/* remove this interface from the pif list */
if (pif_list == pif)
pif_list = pif->pif_next;
else {
struct priq_if *p;
for (p = pif_list; p != NULL; p = p->pif_next)
if (p->pif_next == pif) {
p->pif_next = pif->pif_next;
break;
}
ASSERT(p != NULL);
}
free(pif, M_DEVBUF);
return (0);
}
/*
* priq device interface
*/
int
priqopen(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
/* everything will be done when the queueing scheme is attached. */
return 0;
}
int
priqclose(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
struct priq_if *pif;
int err, error = 0;
while ((pif = pif_list) != NULL) {
/* destroy all */
if (ALTQ_IS_ENABLED(pif->pif_ifq))
altq_disable(pif->pif_ifq);
err = altq_detach(pif->pif_ifq);
if (err == 0)
err = priq_detach(pif);
if (err != 0 && error == 0)
error = err;
}
return error;
}
int
priqioctl(dev, cmd, addr, flag, p)
dev_t dev;
ioctlcmd_t cmd;
caddr_t addr;
int flag;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
struct priq_if *pif;
struct priq_interface *ifacep;
int error = 0;
/* check super-user privilege */
switch (cmd) {
case PRIQ_GETSTATS:
break;
default:
#if (__FreeBSD_version > 700000)
if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
return (error);
#elsif (__FreeBSD_version > 400000)
if ((error = suser(p)) != 0)
return (error);
#else
if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
return (error);
#endif
break;
}
switch (cmd) {
case PRIQ_IF_ATTACH:
error = priqcmd_if_attach((struct priq_interface *)addr);
break;
case PRIQ_IF_DETACH:
error = priqcmd_if_detach((struct priq_interface *)addr);
break;
case PRIQ_ENABLE:
case PRIQ_DISABLE:
case PRIQ_CLEAR:
ifacep = (struct priq_interface *)addr;
if ((pif = altq_lookup(ifacep->ifname,
ALTQT_PRIQ)) == NULL) {
error = EBADF;
break;
}
switch (cmd) {
case PRIQ_ENABLE:
if (pif->pif_default == NULL) {
#ifdef ALTQ_DEBUG
printf("priq: no default class\n");
#endif
error = EINVAL;
break;
}
error = altq_enable(pif->pif_ifq);
break;
case PRIQ_DISABLE:
error = altq_disable(pif->pif_ifq);
break;
case PRIQ_CLEAR:
priq_clear_interface(pif);
break;
}
break;
case PRIQ_ADD_CLASS:
error = priqcmd_add_class((struct priq_add_class *)addr);
break;
case PRIQ_DEL_CLASS:
error = priqcmd_delete_class((struct priq_delete_class *)addr);
break;
case PRIQ_MOD_CLASS:
error = priqcmd_modify_class((struct priq_modify_class *)addr);
break;
case PRIQ_ADD_FILTER:
error = priqcmd_add_filter((struct priq_add_filter *)addr);
break;
case PRIQ_DEL_FILTER:
error = priqcmd_delete_filter((struct priq_delete_filter *)addr);
break;
case PRIQ_GETSTATS:
error = priqcmd_class_stats((struct priq_class_stats *)addr);
break;
default:
error = EINVAL;
break;
}
return error;
}
static int
priqcmd_if_attach(ap)
struct priq_interface *ap;
{
struct priq_if *pif;
struct ifnet *ifp;
int error;
if ((ifp = ifunit(ap->ifname)) == NULL)
return (ENXIO);
if ((pif = priq_attach(&ifp->if_snd, ap->arg)) == NULL)
return (ENOMEM);
/*
* set PRIQ to this ifnet structure.
*/
if ((error = altq_attach(&ifp->if_snd, ALTQT_PRIQ, pif,
priq_enqueue, priq_dequeue, priq_request,
&pif->pif_classifier, acc_classify)) != 0)
(void)priq_detach(pif);
return (error);
}
static int
priqcmd_if_detach(ap)
struct priq_interface *ap;
{
struct priq_if *pif;
int error;
if ((pif = altq_lookup(ap->ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
if (ALTQ_IS_ENABLED(pif->pif_ifq))
altq_disable(pif->pif_ifq);
if ((error = altq_detach(pif->pif_ifq)))
return (error);
return priq_detach(pif);
}
static int
priqcmd_add_class(ap)
struct priq_add_class *ap;
{
struct priq_if *pif;
struct priq_class *cl;
int qid;
if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
if (ap->pri < 0 || ap->pri >= PRIQ_MAXPRI)
return (EINVAL);
if (pif->pif_classes[ap->pri] != NULL)
return (EBUSY);
qid = ap->pri + 1;
if ((cl = priq_class_create(pif, ap->pri,
ap->qlimit, ap->flags, qid)) == NULL)
return (ENOMEM);
/* return a class handle to the user */
ap->class_handle = cl->cl_handle;
return (0);
}
static int
priqcmd_delete_class(ap)
struct priq_delete_class *ap;
{
struct priq_if *pif;
struct priq_class *cl;
if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
if ((cl = clh_to_clp(pif, ap->class_handle)) == NULL)
return (EINVAL);
return priq_class_destroy(cl);
}
static int
priqcmd_modify_class(ap)
struct priq_modify_class *ap;
{
struct priq_if *pif;
struct priq_class *cl;
if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
if (ap->pri < 0 || ap->pri >= PRIQ_MAXPRI)
return (EINVAL);
if ((cl = clh_to_clp(pif, ap->class_handle)) == NULL)
return (EINVAL);
/*
* if priority is changed, move the class to the new priority
*/
if (pif->pif_classes[ap->pri] != cl) {
if (pif->pif_classes[ap->pri] != NULL)
return (EEXIST);
pif->pif_classes[cl->cl_pri] = NULL;
pif->pif_classes[ap->pri] = cl;
cl->cl_pri = ap->pri;
}
/* call priq_class_create to change class parameters */
if ((cl = priq_class_create(pif, ap->pri,
ap->qlimit, ap->flags, ap->class_handle)) == NULL)
return (ENOMEM);
return 0;
}
static int
priqcmd_add_filter(ap)
struct priq_add_filter *ap;
{
struct priq_if *pif;
struct priq_class *cl;
if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
if ((cl = clh_to_clp(pif, ap->class_handle)) == NULL)
return (EINVAL);
return acc_add_filter(&pif->pif_classifier, &ap->filter,
cl, &ap->filter_handle);
}
static int
priqcmd_delete_filter(ap)
struct priq_delete_filter *ap;
{
struct priq_if *pif;
if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
return acc_delete_filter(&pif->pif_classifier,
ap->filter_handle);
}
static int
priqcmd_class_stats(ap)
struct priq_class_stats *ap;
{
struct priq_if *pif;
struct priq_class *cl;
struct priq_classstats stats, *usp;
int pri, error;
if ((pif = altq_lookup(ap->iface.ifname, ALTQT_PRIQ)) == NULL)
return (EBADF);
ap->maxpri = pif->pif_maxpri;
/* then, read the next N classes in the tree */
usp = ap->stats;
for (pri = 0; pri <= pif->pif_maxpri; pri++) {
cl = pif->pif_classes[pri];
if (cl != NULL)
get_class_stats(&stats, cl);
else
bzero(&stats, sizeof(stats));
if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
sizeof(stats))) != 0)
return (error);
}
return (0);
}
#ifdef KLD_MODULE
static struct altqsw priq_sw =
{"priq", priqopen, priqclose, priqioctl};
ALTQ_MODULE(altq_priq, ALTQT_PRIQ, &priq_sw);
MODULE_DEPEND(altq_priq, altq_red, 1, 1, 1);
MODULE_DEPEND(altq_priq, altq_rio, 1, 1, 1);
#endif /* KLD_MODULE */
#endif /* ALTQ3_COMPAT */
#endif /* ALTQ_PRIQ */

View File

@ -42,21 +42,6 @@ extern "C" {
#define PRIQ_MAXPRI 16 /* upper limit of the number of priorities */
#ifdef ALTQ3_COMPAT
struct priq_interface {
char ifname[IFNAMSIZ]; /* interface name (e.g., fxp0) */
u_long arg; /* request-specific argument */
};
struct priq_add_class {
struct priq_interface iface;
int pri; /* priority (0 is the lowest) */
int qlimit; /* queue size limit */
int flags; /* misc flags (see below) */
u_int32_t class_handle; /* return value */
};
#endif /* ALTQ3_COMPAT */
/* priq class flags */
#define PRCF_RED 0x0001 /* use RED */
@ -69,33 +54,6 @@ struct priq_add_class {
/* special class handles */
#define PRIQ_NULLCLASS_HANDLE 0
#ifdef ALTQ3_COMPAT
struct priq_delete_class {
struct priq_interface iface;
u_int32_t class_handle;
};
struct priq_modify_class {
struct priq_interface iface;
u_int32_t class_handle;
int pri;
int qlimit;
int flags;
};
struct priq_add_filter {
struct priq_interface iface;
u_int32_t class_handle;
struct flow_filter filter;
u_long filter_handle; /* return value */
};
struct priq_delete_filter {
struct priq_interface iface;
u_long filter_handle;
};
#endif /* ALTQ3_COMPAT */
struct priq_classstats {
u_int32_t class_handle;
@ -118,27 +76,6 @@ struct priq_classstats {
* header.
*/
#ifdef ALTQ3_COMPAT
struct priq_class_stats {
struct priq_interface iface;
int maxpri; /* in/out */
struct priq_classstats *stats; /* pointer to stats array */
};
#define PRIQ_IF_ATTACH _IOW('Q', 1, struct priq_interface)
#define PRIQ_IF_DETACH _IOW('Q', 2, struct priq_interface)
#define PRIQ_ENABLE _IOW('Q', 3, struct priq_interface)
#define PRIQ_DISABLE _IOW('Q', 4, struct priq_interface)
#define PRIQ_CLEAR _IOW('Q', 5, struct priq_interface)
#define PRIQ_ADD_CLASS _IOWR('Q', 7, struct priq_add_class)
#define PRIQ_DEL_CLASS _IOW('Q', 8, struct priq_delete_class)
#define PRIQ_MOD_CLASS _IOW('Q', 9, struct priq_modify_class)
#define PRIQ_ADD_FILTER _IOWR('Q', 10, struct priq_add_filter)
#define PRIQ_DEL_FILTER _IOW('Q', 11, struct priq_delete_filter)
#define PRIQ_GETSTATS _IOWR('Q', 12, struct priq_class_stats)
#endif /* ALTQ3_COMPAT */
#ifdef _KERNEL

View File

@ -96,12 +96,6 @@
#include <netpfil/pf/pf_mtag.h>
#include <net/altq/altq.h>
#include <net/altq/altq_red.h>
#ifdef ALTQ3_COMPAT
#include <net/altq/altq_conf.h>
#ifdef ALTQ_FLOWVALVE
#include <net/altq/altq_flowvalve.h>
#endif
#endif
/*
* ALTQ/RED (Random Early Detection) implementation using 32-bit
@ -168,56 +162,12 @@
* to switch to the random-drop policy, define "RED_RANDOM_DROP".
*/
#ifdef ALTQ3_COMPAT
#ifdef ALTQ_FLOWVALVE
/*
* flow-valve is an extension to protect red from unresponsive flows
* and to promote end-to-end congestion control.
* flow-valve observes the average drop rates of the flows that have
* experienced packet drops in the recent past.
* when the average drop rate exceeds the threshold, the flow is
* blocked by the flow-valve. the trapped flow should back off
* exponentially to escape from the flow-valve.
*/
#ifdef RED_RANDOM_DROP
#error "random-drop can't be used with flow-valve!"
#endif
#endif /* ALTQ_FLOWVALVE */
/* red_list keeps all red_queue_t's allocated. */
static red_queue_t *red_list = NULL;
#endif /* ALTQ3_COMPAT */
/* default red parameter values */
static int default_th_min = TH_MIN;
static int default_th_max = TH_MAX;
static int default_inv_pmax = INV_P_MAX;
#ifdef ALTQ3_COMPAT
/* internal function prototypes */
static int red_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
static struct mbuf *red_dequeue(struct ifaltq *, int);
static int red_request(struct ifaltq *, int, void *);
static void red_purgeq(red_queue_t *);
static int red_detach(red_queue_t *);
#ifdef ALTQ_FLOWVALVE
static __inline struct fve *flowlist_lookup(struct flowvalve *,
struct altq_pktattr *, struct timeval *);
static __inline struct fve *flowlist_reclaim(struct flowvalve *,
struct altq_pktattr *);
static __inline void flowlist_move_to_head(struct flowvalve *, struct fve *);
static __inline int fv_p2f(struct flowvalve *, int);
#if 0 /* XXX: make the compiler happy (fv_alloc unused) */
static struct flowvalve *fv_alloc(struct red *);
#endif
static void fv_destroy(struct flowvalve *);
static int fv_checkflow(struct flowvalve *, struct altq_pktattr *,
struct fve **);
static void fv_dropbyred(struct flowvalve *fv, struct altq_pktattr *,
struct fve *);
#endif
#endif /* ALTQ3_COMPAT */
/*
* red support routines
@ -315,12 +265,6 @@ red_alloc(int weight, int inv_pmax, int th_min, int th_max, int flags,
void
red_destroy(red_t *rp)
{
#ifdef ALTQ3_COMPAT
#ifdef ALTQ_FLOWVALVE
if (rp->red_flowvalve != NULL)
fv_destroy(rp->red_flowvalve);
#endif
#endif /* ALTQ3_COMPAT */
wtab_destroy(rp->red_wtab);
free(rp, M_DEVBUF);
}
@ -342,17 +286,6 @@ red_addq(red_t *rp, class_queue_t *q, struct mbuf *m,
{
int avg, droptype;
int n;
#ifdef ALTQ3_COMPAT
#ifdef ALTQ_FLOWVALVE
struct fve *fve = NULL;
if (rp->red_flowvalve != NULL && rp->red_flowvalve->fv_flows > 0)
if (fv_checkflow(rp->red_flowvalve, pktattr, &fve)) {
m_freem(m);
return (-1);
}
#endif
#endif /* ALTQ3_COMPAT */
avg = rp->red_avg;
@ -458,12 +391,6 @@ red_addq(red_t *rp, class_queue_t *q, struct mbuf *m,
PKTCNTR_ADD(&rp->red_stats.drop_cnt, m_pktlen(m));
#endif
rp->red_count = 0;
#ifdef ALTQ3_COMPAT
#ifdef ALTQ_FLOWVALVE
if (rp->red_flowvalve != NULL)
fv_dropbyred(rp->red_flowvalve, pktattr, fve);
#endif
#endif /* ALTQ3_COMPAT */
m_freem(m);
return (-1);
}
@ -521,11 +448,6 @@ mark_ecn(struct mbuf *m, struct altq_pktattr *pktattr, int flags)
at = pf_find_mtag(m);
if (at != NULL) {
hdr = at->hdr;
#ifdef ALTQ3_COMPAT
} else if (pktattr != NULL) {
af = pktattr->pattr_af;
hdr = pktattr->pattr_hdr;
#endif /* ALTQ3_COMPAT */
} else
return (0);
@ -707,786 +629,5 @@ pow_w(struct wtab *w, int n)
return (val);
}
#ifdef ALTQ3_COMPAT
/*
* red device interface
*/
altqdev_decl(red);
int
redopen(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
/* everything will be done when the queueing scheme is attached. */
return 0;
}
int
redclose(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
red_queue_t *rqp;
int err, error = 0;
while ((rqp = red_list) != NULL) {
/* destroy all */
err = red_detach(rqp);
if (err != 0 && error == 0)
error = err;
}
return error;
}
int
redioctl(dev, cmd, addr, flag, p)
dev_t dev;
ioctlcmd_t cmd;
caddr_t addr;
int flag;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
red_queue_t *rqp;
struct red_interface *ifacep;
struct ifnet *ifp;
int error = 0;
/* check super-user privilege */
switch (cmd) {
case RED_GETSTATS:
break;
default:
#if (__FreeBSD_version > 700000)
if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
#elsif (__FreeBSD_version > 400000)
if ((error = suser(p)) != 0)
#else
if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
#endif
return (error);
break;
}
switch (cmd) {
case RED_ENABLE:
ifacep = (struct red_interface *)addr;
if ((rqp = altq_lookup(ifacep->red_ifname, ALTQT_RED)) == NULL) {
error = EBADF;
break;
}
error = altq_enable(rqp->rq_ifq);
break;
case RED_DISABLE:
ifacep = (struct red_interface *)addr;
if ((rqp = altq_lookup(ifacep->red_ifname, ALTQT_RED)) == NULL) {
error = EBADF;
break;
}
error = altq_disable(rqp->rq_ifq);
break;
case RED_IF_ATTACH:
ifp = ifunit(((struct red_interface *)addr)->red_ifname);
if (ifp == NULL) {
error = ENXIO;
break;
}
/* allocate and initialize red_queue_t */
rqp = malloc(sizeof(red_queue_t), M_DEVBUF, M_WAITOK);
if (rqp == NULL) {
error = ENOMEM;
break;
}
bzero(rqp, sizeof(red_queue_t));
rqp->rq_q = malloc(sizeof(class_queue_t),
M_DEVBUF, M_WAITOK);
if (rqp->rq_q == NULL) {
free(rqp, M_DEVBUF);
error = ENOMEM;
break;
}
bzero(rqp->rq_q, sizeof(class_queue_t));
rqp->rq_red = red_alloc(0, 0, 0, 0, 0, 0);
if (rqp->rq_red == NULL) {
free(rqp->rq_q, M_DEVBUF);
free(rqp, M_DEVBUF);
error = ENOMEM;
break;
}
rqp->rq_ifq = &ifp->if_snd;
qtail(rqp->rq_q) = NULL;
qlen(rqp->rq_q) = 0;
qlimit(rqp->rq_q) = RED_LIMIT;
qtype(rqp->rq_q) = Q_RED;
/*
* set RED to this ifnet structure.
*/
error = altq_attach(rqp->rq_ifq, ALTQT_RED, rqp,
red_enqueue, red_dequeue, red_request,
NULL, NULL);
if (error) {
red_destroy(rqp->rq_red);
free(rqp->rq_q, M_DEVBUF);
free(rqp, M_DEVBUF);
break;
}
/* add this state to the red list */
rqp->rq_next = red_list;
red_list = rqp;
break;
case RED_IF_DETACH:
ifacep = (struct red_interface *)addr;
if ((rqp = altq_lookup(ifacep->red_ifname, ALTQT_RED)) == NULL) {
error = EBADF;
break;
}
error = red_detach(rqp);
break;
case RED_GETSTATS:
do {
struct red_stats *q_stats;
red_t *rp;
q_stats = (struct red_stats *)addr;
if ((rqp = altq_lookup(q_stats->iface.red_ifname,
ALTQT_RED)) == NULL) {
error = EBADF;
break;
}
q_stats->q_len = qlen(rqp->rq_q);
q_stats->q_limit = qlimit(rqp->rq_q);
rp = rqp->rq_red;
q_stats->q_avg = rp->red_avg >> rp->red_wshift;
q_stats->xmit_cnt = rp->red_stats.xmit_cnt;
q_stats->drop_cnt = rp->red_stats.drop_cnt;
q_stats->drop_forced = rp->red_stats.drop_forced;
q_stats->drop_unforced = rp->red_stats.drop_unforced;
q_stats->marked_packets = rp->red_stats.marked_packets;
q_stats->weight = rp->red_weight;
q_stats->inv_pmax = rp->red_inv_pmax;
q_stats->th_min = rp->red_thmin;
q_stats->th_max = rp->red_thmax;
#ifdef ALTQ_FLOWVALVE
if (rp->red_flowvalve != NULL) {
struct flowvalve *fv = rp->red_flowvalve;
q_stats->fv_flows = fv->fv_flows;
q_stats->fv_pass = fv->fv_stats.pass;
q_stats->fv_predrop = fv->fv_stats.predrop;
q_stats->fv_alloc = fv->fv_stats.alloc;
q_stats->fv_escape = fv->fv_stats.escape;
} else {
#endif /* ALTQ_FLOWVALVE */
q_stats->fv_flows = 0;
q_stats->fv_pass = 0;
q_stats->fv_predrop = 0;
q_stats->fv_alloc = 0;
q_stats->fv_escape = 0;
#ifdef ALTQ_FLOWVALVE
}
#endif /* ALTQ_FLOWVALVE */
} while (/*CONSTCOND*/ 0);
break;
case RED_CONFIG:
do {
struct red_conf *fc;
red_t *new;
int s, limit;
fc = (struct red_conf *)addr;
if ((rqp = altq_lookup(fc->iface.red_ifname,
ALTQT_RED)) == NULL) {
error = EBADF;
break;
}
new = red_alloc(fc->red_weight,
fc->red_inv_pmax,
fc->red_thmin,
fc->red_thmax,
fc->red_flags,
fc->red_pkttime);
if (new == NULL) {
error = ENOMEM;
break;
}
s = splnet();
red_purgeq(rqp);
limit = fc->red_limit;
if (limit < fc->red_thmax)
limit = fc->red_thmax;
qlimit(rqp->rq_q) = limit;
fc->red_limit = limit; /* write back the new value */
red_destroy(rqp->rq_red);
rqp->rq_red = new;
splx(s);
/* write back new values */
fc->red_limit = limit;
fc->red_inv_pmax = rqp->rq_red->red_inv_pmax;
fc->red_thmin = rqp->rq_red->red_thmin;
fc->red_thmax = rqp->rq_red->red_thmax;
} while (/*CONSTCOND*/ 0);
break;
case RED_SETDEFAULTS:
do {
struct redparams *rp;
rp = (struct redparams *)addr;
default_th_min = rp->th_min;
default_th_max = rp->th_max;
default_inv_pmax = rp->inv_pmax;
} while (/*CONSTCOND*/ 0);
break;
default:
error = EINVAL;
break;
}
return error;
}
static int
red_detach(rqp)
red_queue_t *rqp;
{
red_queue_t *tmp;
int error = 0;
if (ALTQ_IS_ENABLED(rqp->rq_ifq))
altq_disable(rqp->rq_ifq);
if ((error = altq_detach(rqp->rq_ifq)))
return (error);
if (red_list == rqp)
red_list = rqp->rq_next;
else {
for (tmp = red_list; tmp != NULL; tmp = tmp->rq_next)
if (tmp->rq_next == rqp) {
tmp->rq_next = rqp->rq_next;
break;
}
if (tmp == NULL)
printf("red_detach: no state found in red_list!\n");
}
red_destroy(rqp->rq_red);
free(rqp->rq_q, M_DEVBUF);
free(rqp, M_DEVBUF);
return (error);
}
/*
* enqueue routine:
*
* returns: 0 when successfully queued.
* ENOBUFS when drop occurs.
*/
static int
red_enqueue(ifq, m, pktattr)
struct ifaltq *ifq;
struct mbuf *m;
struct altq_pktattr *pktattr;
{
red_queue_t *rqp = (red_queue_t *)ifq->altq_disc;
IFQ_LOCK_ASSERT(ifq);
if (red_addq(rqp->rq_red, rqp->rq_q, m, pktattr) < 0)
return ENOBUFS;
ifq->ifq_len++;
return 0;
}
/*
* dequeue routine:
* must be called in splimp.
*
* returns: mbuf dequeued.
* NULL when no packet is available in the queue.
*/
static struct mbuf *
red_dequeue(ifq, op)
struct ifaltq *ifq;
int op;
{
red_queue_t *rqp = (red_queue_t *)ifq->altq_disc;
struct mbuf *m;
IFQ_LOCK_ASSERT(ifq);
if (op == ALTDQ_POLL)
return qhead(rqp->rq_q);
/* op == ALTDQ_REMOVE */
m = red_getq(rqp->rq_red, rqp->rq_q);
if (m != NULL)
ifq->ifq_len--;
return (m);
}
static int
red_request(ifq, req, arg)
struct ifaltq *ifq;
int req;
void *arg;
{
red_queue_t *rqp = (red_queue_t *)ifq->altq_disc;
IFQ_LOCK_ASSERT(ifq);
switch (req) {
case ALTRQ_PURGE:
red_purgeq(rqp);
break;
}
return (0);
}
static void
red_purgeq(rqp)
red_queue_t *rqp;
{
_flushq(rqp->rq_q);
if (ALTQ_IS_ENABLED(rqp->rq_ifq))
rqp->rq_ifq->ifq_len = 0;
}
#ifdef ALTQ_FLOWVALVE
#define FV_PSHIFT 7 /* weight of average drop rate -- 1/128 */
#define FV_PSCALE(x) ((x) << FV_PSHIFT)
#define FV_PUNSCALE(x) ((x) >> FV_PSHIFT)
#define FV_FSHIFT 5 /* weight of average fraction -- 1/32 */
#define FV_FSCALE(x) ((x) << FV_FSHIFT)
#define FV_FUNSCALE(x) ((x) >> FV_FSHIFT)
#define FV_TIMER (3 * hz) /* timer value for garbage collector */
#define FV_FLOWLISTSIZE 64 /* how many flows in flowlist */
#define FV_N 10 /* update fve_f every FV_N packets */
#define FV_BACKOFFTHRESH 1 /* backoff threshold interval in second */
#define FV_TTHRESH 3 /* time threshold to delete fve */
#define FV_ALPHA 5 /* extra packet count */
#define FV_STATS
#if (__FreeBSD_version > 300000)
#define FV_TIMESTAMP(tp) getmicrotime(tp)
#else
#define FV_TIMESTAMP(tp) { (*(tp)) = time; }
#endif
/*
* Brtt table: 127 entry table to convert drop rate (p) to
* the corresponding bandwidth fraction (f)
* the following equation is implemented to use scaled values,
* fve_p and fve_f, in the fixed point format.
*
* Brtt(p) = 1 /(sqrt(4*p/3) + min(1,3*sqrt(p*6/8)) * p * (1+32 * p*p))
* f = Brtt(p) / (max_th + alpha)
*/
#define BRTT_SIZE 128
#define BRTT_SHIFT 12
#define BRTT_MASK 0x0007f000
#define BRTT_PMAX (1 << (FV_PSHIFT + FP_SHIFT))
const int brtt_tab[BRTT_SIZE] = {
0, 1262010, 877019, 703694, 598706, 525854, 471107, 427728,
392026, 361788, 335598, 312506, 291850, 273158, 256081, 240361,
225800, 212247, 199585, 187788, 178388, 169544, 161207, 153333,
145888, 138841, 132165, 125836, 119834, 114141, 108739, 103612,
98747, 94129, 89746, 85585, 81637, 77889, 74333, 70957,
67752, 64711, 61824, 59084, 56482, 54013, 51667, 49440,
47325, 45315, 43406, 41591, 39866, 38227, 36667, 35184,
33773, 32430, 31151, 29933, 28774, 27668, 26615, 25611,
24653, 23740, 22868, 22035, 21240, 20481, 19755, 19062,
18399, 17764, 17157, 16576, 16020, 15487, 14976, 14487,
14017, 13567, 13136, 12721, 12323, 11941, 11574, 11222,
10883, 10557, 10243, 9942, 9652, 9372, 9103, 8844,
8594, 8354, 8122, 7898, 7682, 7474, 7273, 7079,
6892, 6711, 6536, 6367, 6204, 6046, 5893, 5746,
5603, 5464, 5330, 5201, 5075, 4954, 4836, 4722,
4611, 4504, 4400, 4299, 4201, 4106, 4014, 3924
};
static __inline struct fve *
flowlist_lookup(fv, pktattr, now)
struct flowvalve *fv;
struct altq_pktattr *pktattr;
struct timeval *now;
{
struct fve *fve;
int flows;
struct ip *ip;
#ifdef INET6
struct ip6_hdr *ip6;
#endif
struct timeval tthresh;
if (pktattr == NULL)
return (NULL);
tthresh.tv_sec = now->tv_sec - FV_TTHRESH;
flows = 0;
/*
* search the flow list
*/
switch (pktattr->pattr_af) {
case AF_INET:
ip = (struct ip *)pktattr->pattr_hdr;
TAILQ_FOREACH(fve, &fv->fv_flowlist, fve_lru){
if (fve->fve_lastdrop.tv_sec == 0)
break;
if (fve->fve_lastdrop.tv_sec < tthresh.tv_sec) {
fve->fve_lastdrop.tv_sec = 0;
break;
}
if (fve->fve_flow.flow_af == AF_INET &&
fve->fve_flow.flow_ip.ip_src.s_addr ==
ip->ip_src.s_addr &&
fve->fve_flow.flow_ip.ip_dst.s_addr ==
ip->ip_dst.s_addr)
return (fve);
flows++;
}
break;
#ifdef INET6
case AF_INET6:
ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
TAILQ_FOREACH(fve, &fv->fv_flowlist, fve_lru){
if (fve->fve_lastdrop.tv_sec == 0)
break;
if (fve->fve_lastdrop.tv_sec < tthresh.tv_sec) {
fve->fve_lastdrop.tv_sec = 0;
break;
}
if (fve->fve_flow.flow_af == AF_INET6 &&
IN6_ARE_ADDR_EQUAL(&fve->fve_flow.flow_ip6.ip6_src,
&ip6->ip6_src) &&
IN6_ARE_ADDR_EQUAL(&fve->fve_flow.flow_ip6.ip6_dst,
&ip6->ip6_dst))
return (fve);
flows++;
}
break;
#endif /* INET6 */
default:
/* unknown protocol. no drop. */
return (NULL);
}
fv->fv_flows = flows; /* save the number of active fve's */
return (NULL);
}
static __inline struct fve *
flowlist_reclaim(fv, pktattr)
struct flowvalve *fv;
struct altq_pktattr *pktattr;
{
struct fve *fve;
struct ip *ip;
#ifdef INET6
struct ip6_hdr *ip6;
#endif
/*
* get an entry from the tail of the LRU list.
*/
fve = TAILQ_LAST(&fv->fv_flowlist, fv_flowhead);
switch (pktattr->pattr_af) {
case AF_INET:
ip = (struct ip *)pktattr->pattr_hdr;
fve->fve_flow.flow_af = AF_INET;
fve->fve_flow.flow_ip.ip_src = ip->ip_src;
fve->fve_flow.flow_ip.ip_dst = ip->ip_dst;
break;
#ifdef INET6
case AF_INET6:
ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
fve->fve_flow.flow_af = AF_INET6;
fve->fve_flow.flow_ip6.ip6_src = ip6->ip6_src;
fve->fve_flow.flow_ip6.ip6_dst = ip6->ip6_dst;
break;
#endif
}
fve->fve_state = Green;
fve->fve_p = 0.0;
fve->fve_f = 0.0;
fve->fve_ifseq = fv->fv_ifseq - 1;
fve->fve_count = 0;
fv->fv_flows++;
#ifdef FV_STATS
fv->fv_stats.alloc++;
#endif
return (fve);
}
static __inline void
flowlist_move_to_head(fv, fve)
struct flowvalve *fv;
struct fve *fve;
{
if (TAILQ_FIRST(&fv->fv_flowlist) != fve) {
TAILQ_REMOVE(&fv->fv_flowlist, fve, fve_lru);
TAILQ_INSERT_HEAD(&fv->fv_flowlist, fve, fve_lru);
}
}
#if 0 /* XXX: make the compiler happy (fv_alloc unused) */
/*
* allocate flowvalve structure
*/
static struct flowvalve *
fv_alloc(rp)
struct red *rp;
{
struct flowvalve *fv;
struct fve *fve;
int i, num;
num = FV_FLOWLISTSIZE;
fv = malloc(sizeof(struct flowvalve),
M_DEVBUF, M_WAITOK);
if (fv == NULL)
return (NULL);
bzero(fv, sizeof(struct flowvalve));
fv->fv_fves = malloc(sizeof(struct fve) * num,
M_DEVBUF, M_WAITOK);
if (fv->fv_fves == NULL) {
free(fv, M_DEVBUF);
return (NULL);
}
bzero(fv->fv_fves, sizeof(struct fve) * num);
fv->fv_flows = 0;
TAILQ_INIT(&fv->fv_flowlist);
for (i = 0; i < num; i++) {
fve = &fv->fv_fves[i];
fve->fve_lastdrop.tv_sec = 0;
TAILQ_INSERT_TAIL(&fv->fv_flowlist, fve, fve_lru);
}
/* initialize drop rate threshold in scaled fixed-point */
fv->fv_pthresh = (FV_PSCALE(1) << FP_SHIFT) / rp->red_inv_pmax;
/* initialize drop rate to fraction table */
fv->fv_p2ftab = malloc(sizeof(int) * BRTT_SIZE,
M_DEVBUF, M_WAITOK);
if (fv->fv_p2ftab == NULL) {
free(fv->fv_fves, M_DEVBUF);
free(fv, M_DEVBUF);
return (NULL);
}
/*
* create the p2f table.
* (shift is used to keep the precision)
*/
for (i = 1; i < BRTT_SIZE; i++) {
int f;
f = brtt_tab[i] << 8;
fv->fv_p2ftab[i] = (f / (rp->red_thmax + FV_ALPHA)) >> 8;
}
return (fv);
}
#endif
static void fv_destroy(fv)
struct flowvalve *fv;
{
free(fv->fv_p2ftab, M_DEVBUF);
free(fv->fv_fves, M_DEVBUF);
free(fv, M_DEVBUF);
}
static __inline int
fv_p2f(fv, p)
struct flowvalve *fv;
int p;
{
int val, f;
if (p >= BRTT_PMAX)
f = fv->fv_p2ftab[BRTT_SIZE-1];
else if ((val = (p & BRTT_MASK)))
f = fv->fv_p2ftab[(val >> BRTT_SHIFT)];
else
f = fv->fv_p2ftab[1];
return (f);
}
/*
* check if an arriving packet should be pre-dropped.
* called from red_addq() when a packet arrives.
* returns 1 when the packet should be pre-dropped.
* should be called in splimp.
*/
static int
fv_checkflow(fv, pktattr, fcache)
struct flowvalve *fv;
struct altq_pktattr *pktattr;
struct fve **fcache;
{
struct fve *fve;
struct timeval now;
fv->fv_ifseq++;
FV_TIMESTAMP(&now);
if ((fve = flowlist_lookup(fv, pktattr, &now)) == NULL)
/* no matching entry in the flowlist */
return (0);
*fcache = fve;
/* update fraction f for every FV_N packets */
if (++fve->fve_count == FV_N) {
/*
* f = Wf * N / (fv_ifseq - fve_ifseq) + (1 - Wf) * f
*/
fve->fve_f =
(FV_N << FP_SHIFT) / (fv->fv_ifseq - fve->fve_ifseq)
+ fve->fve_f - FV_FUNSCALE(fve->fve_f);
fve->fve_ifseq = fv->fv_ifseq;
fve->fve_count = 0;
}
/*
* overpumping test
*/
if (fve->fve_state == Green && fve->fve_p > fv->fv_pthresh) {
int fthresh;
/* calculate a threshold */
fthresh = fv_p2f(fv, fve->fve_p);
if (fve->fve_f > fthresh)
fve->fve_state = Red;
}
if (fve->fve_state == Red) {
/*
* backoff test
*/
if (now.tv_sec - fve->fve_lastdrop.tv_sec > FV_BACKOFFTHRESH) {
/* no drop for at least FV_BACKOFFTHRESH sec */
fve->fve_p = 0;
fve->fve_state = Green;
#ifdef FV_STATS
fv->fv_stats.escape++;
#endif
} else {
/* block this flow */
flowlist_move_to_head(fv, fve);
fve->fve_lastdrop = now;
#ifdef FV_STATS
fv->fv_stats.predrop++;
#endif
return (1);
}
}
/*
* p = (1 - Wp) * p
*/
fve->fve_p -= FV_PUNSCALE(fve->fve_p);
if (fve->fve_p < 0)
fve->fve_p = 0;
#ifdef FV_STATS
fv->fv_stats.pass++;
#endif
return (0);
}
/*
* called from red_addq when a packet is dropped by red.
* should be called in splimp.
*/
static void fv_dropbyred(fv, pktattr, fcache)
struct flowvalve *fv;
struct altq_pktattr *pktattr;
struct fve *fcache;
{
struct fve *fve;
struct timeval now;
if (pktattr == NULL)
return;
FV_TIMESTAMP(&now);
if (fcache != NULL)
/* the fve of this packet is already cached */
fve = fcache;
else if ((fve = flowlist_lookup(fv, pktattr, &now)) == NULL)
fve = flowlist_reclaim(fv, pktattr);
flowlist_move_to_head(fv, fve);
/*
* update p: the following line cancels the update
* in fv_checkflow() and calculate
* p = Wp + (1 - Wp) * p
*/
fve->fve_p = (1 << FP_SHIFT) + fve->fve_p;
fve->fve_lastdrop = now;
}
#endif /* ALTQ_FLOWVALVE */
#ifdef KLD_MODULE
static struct altqsw red_sw =
{"red", redopen, redclose, redioctl};
ALTQ_MODULE(altq_red, ALTQT_RED, &red_sw);
MODULE_VERSION(altq_red, 1);
#endif /* KLD_MODULE */
#endif /* ALTQ3_COMPAT */
#endif /* ALTQ_RED */

View File

@ -32,48 +32,6 @@
#include <net/altq/altq_classq.h>
#ifdef ALTQ3_COMPAT
struct red_interface {
char red_ifname[IFNAMSIZ];
};
struct red_stats {
struct red_interface iface;
int q_len;
int q_avg;
struct pktcntr xmit_cnt;
struct pktcntr drop_cnt;
u_int drop_forced;
u_int drop_unforced;
u_int marked_packets;
/* static red parameters */
int q_limit;
int weight;
int inv_pmax;
int th_min;
int th_max;
/* flowvalve related stuff */
u_int fv_flows;
u_int fv_pass;
u_int fv_predrop;
u_int fv_alloc;
u_int fv_escape;
};
struct red_conf {
struct red_interface iface;
int red_weight; /* weight for EWMA */
int red_inv_pmax; /* inverse of max drop probability */
int red_thmin; /* red min threshold */
int red_thmax; /* red max threshold */
int red_limit; /* max queue length */
int red_pkttime; /* average packet time in usec */
int red_flags; /* see below */
};
#endif /* ALTQ3_COMPAT */
/* red flags */
#define REDF_ECN4 0x01 /* use packet marking for IPv4 packets */
@ -100,24 +58,9 @@ struct redstats {
u_int marked_packets;
};
#ifdef ALTQ3_COMPAT
/*
* IOCTLs for RED
*/
#define RED_IF_ATTACH _IOW('Q', 1, struct red_interface)
#define RED_IF_DETACH _IOW('Q', 2, struct red_interface)
#define RED_ENABLE _IOW('Q', 3, struct red_interface)
#define RED_DISABLE _IOW('Q', 4, struct red_interface)
#define RED_CONFIG _IOWR('Q', 6, struct red_conf)
#define RED_GETSTATS _IOWR('Q', 12, struct red_stats)
#define RED_SETDEFAULTS _IOW('Q', 30, struct redparams)
#endif /* ALTQ3_COMPAT */
#ifdef _KERNEL
#ifdef ALTQ3_COMPAT
struct flowvalve;
#endif
/* weight table structure for idle time calibration */
struct wtab {
@ -153,9 +96,6 @@ typedef struct red {
struct wtab *red_wtab; /* weight table */
struct timeval red_last; /* time when the queue becomes idle */
#ifdef ALTQ3_COMPAT
struct flowvalve *red_flowvalve; /* flowvalve state */
#endif
struct {
struct pktcntr xmit_cnt;
@ -166,16 +106,6 @@ typedef struct red {
} red_stats;
} red_t;
#ifdef ALTQ3_COMPAT
typedef struct red_queue {
struct red_queue *rq_next; /* next red_state in the list */
struct ifaltq *rq_ifq; /* backpointer to ifaltq */
class_queue_t *rq_q;
red_t *rq_red;
} red_queue_t;
#endif /* ALTQ3_COMPAT */
/* red drop types */
#define DTYPE_NODROP 0 /* no drop */

View File

@ -92,9 +92,6 @@
#include <net/altq/altq_cdnr.h>
#include <net/altq/altq_red.h>
#include <net/altq/altq_rio.h>
#ifdef ALTQ3_COMPAT
#include <net/altq/altq_conf.h>
#endif
/*
* RIO: RED with IN/OUT bit
@ -168,10 +165,6 @@
} \
}
#ifdef ALTQ3_COMPAT
/* rio_list keeps all rio_queue_t's allocated. */
static rio_queue_t *rio_list = NULL;
#endif
/* default rio parameter values */
static struct redparams default_rio_params[RIO_NDROPPREC] = {
/* th_min, th_max, inv_pmax */
@ -182,18 +175,6 @@ static struct redparams default_rio_params[RIO_NDROPPREC] = {
/* internal function prototypes */
static int dscp2index(u_int8_t);
#ifdef ALTQ3_COMPAT
static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
static struct mbuf *rio_dequeue(struct ifaltq *, int);
static int rio_request(struct ifaltq *, int, void *);
static int rio_detach(rio_queue_t *);
/*
* rio device interface
*/
altqdev_decl(rio);
#endif /* ALTQ3_COMPAT */
rio_t *
rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
@ -466,379 +447,5 @@ rio_getq(rio_t *rp, class_queue_t *q)
return (m);
}
#ifdef ALTQ3_COMPAT
int
rioopen(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
/* everything will be done when the queueing scheme is attached. */
return 0;
}
int
rioclose(dev, flag, fmt, p)
dev_t dev;
int flag, fmt;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
rio_queue_t *rqp;
int err, error = 0;
while ((rqp = rio_list) != NULL) {
/* destroy all */
err = rio_detach(rqp);
if (err != 0 && error == 0)
error = err;
}
return error;
}
int
rioioctl(dev, cmd, addr, flag, p)
dev_t dev;
ioctlcmd_t cmd;
caddr_t addr;
int flag;
#if (__FreeBSD_version > 500000)
struct thread *p;
#else
struct proc *p;
#endif
{
rio_queue_t *rqp;
struct rio_interface *ifacep;
struct ifnet *ifp;
int error = 0;
/* check super-user privilege */
switch (cmd) {
case RIO_GETSTATS:
break;
default:
#if (__FreeBSD_version > 700000)
if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
return (error);
#elsif (__FreeBSD_version > 400000)
if ((error = suser(p)) != 0)
return (error);
#else
if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
return (error);
#endif
break;
}
switch (cmd) {
case RIO_ENABLE:
ifacep = (struct rio_interface *)addr;
if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
error = EBADF;
break;
}
error = altq_enable(rqp->rq_ifq);
break;
case RIO_DISABLE:
ifacep = (struct rio_interface *)addr;
if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
error = EBADF;
break;
}
error = altq_disable(rqp->rq_ifq);
break;
case RIO_IF_ATTACH:
ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
if (ifp == NULL) {
error = ENXIO;
break;
}
/* allocate and initialize rio_queue_t */
rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK);
if (rqp == NULL) {
error = ENOMEM;
break;
}
bzero(rqp, sizeof(rio_queue_t));
rqp->rq_q = malloc(sizeof(class_queue_t),
M_DEVBUF, M_WAITOK);
if (rqp->rq_q == NULL) {
free(rqp, M_DEVBUF);
error = ENOMEM;
break;
}
bzero(rqp->rq_q, sizeof(class_queue_t));
rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
if (rqp->rq_rio == NULL) {
free(rqp->rq_q, M_DEVBUF);
free(rqp, M_DEVBUF);
error = ENOMEM;
break;
}
rqp->rq_ifq = &ifp->if_snd;
qtail(rqp->rq_q) = NULL;
qlen(rqp->rq_q) = 0;
qlimit(rqp->rq_q) = RIO_LIMIT;
qtype(rqp->rq_q) = Q_RIO;
/*
* set RIO to this ifnet structure.
*/
error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
rio_enqueue, rio_dequeue, rio_request,
NULL, NULL);
if (error) {
rio_destroy(rqp->rq_rio);
free(rqp->rq_q, M_DEVBUF);
free(rqp, M_DEVBUF);
break;
}
/* add this state to the rio list */
rqp->rq_next = rio_list;
rio_list = rqp;
break;
case RIO_IF_DETACH:
ifacep = (struct rio_interface *)addr;
if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
error = EBADF;
break;
}
error = rio_detach(rqp);
break;
case RIO_GETSTATS:
do {
struct rio_stats *q_stats;
rio_t *rp;
int i;
q_stats = (struct rio_stats *)addr;
if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
ALTQT_RIO)) == NULL) {
error = EBADF;
break;
}
rp = rqp->rq_rio;
q_stats->q_limit = qlimit(rqp->rq_q);
q_stats->weight = rp->rio_weight;
q_stats->flags = rp->rio_flags;
for (i = 0; i < RIO_NDROPPREC; i++) {
q_stats->q_len[i] = rp->rio_precstate[i].qlen;
bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
sizeof(struct redstats));
q_stats->q_stats[i].q_avg =
rp->rio_precstate[i].avg >> rp->rio_wshift;
q_stats->q_params[i].inv_pmax
= rp->rio_precstate[i].inv_pmax;
q_stats->q_params[i].th_min
= rp->rio_precstate[i].th_min;
q_stats->q_params[i].th_max
= rp->rio_precstate[i].th_max;
}
} while (/*CONSTCOND*/ 0);
break;
case RIO_CONFIG:
do {
struct rio_conf *fc;
rio_t *new;
int s, limit, i;
fc = (struct rio_conf *)addr;
if ((rqp = altq_lookup(fc->iface.rio_ifname,
ALTQT_RIO)) == NULL) {
error = EBADF;
break;
}
new = rio_alloc(fc->rio_weight, &fc->q_params[0],
fc->rio_flags, fc->rio_pkttime);
if (new == NULL) {
error = ENOMEM;
break;
}
s = splnet();
_flushq(rqp->rq_q);
limit = fc->rio_limit;
if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
limit = fc->q_params[RIO_NDROPPREC-1].th_max;
qlimit(rqp->rq_q) = limit;
rio_destroy(rqp->rq_rio);
rqp->rq_rio = new;
splx(s);
/* write back new values */
fc->rio_limit = limit;
for (i = 0; i < RIO_NDROPPREC; i++) {
fc->q_params[i].inv_pmax =
rqp->rq_rio->rio_precstate[i].inv_pmax;
fc->q_params[i].th_min =
rqp->rq_rio->rio_precstate[i].th_min;
fc->q_params[i].th_max =
rqp->rq_rio->rio_precstate[i].th_max;
}
} while (/*CONSTCOND*/ 0);
break;
case RIO_SETDEFAULTS:
do {
struct redparams *rp;
int i;
rp = (struct redparams *)addr;
for (i = 0; i < RIO_NDROPPREC; i++)
default_rio_params[i] = rp[i];
} while (/*CONSTCOND*/ 0);
break;
default:
error = EINVAL;
break;
}
return error;
}
static int
rio_detach(rqp)
rio_queue_t *rqp;
{
rio_queue_t *tmp;
int error = 0;
if (ALTQ_IS_ENABLED(rqp->rq_ifq))
altq_disable(rqp->rq_ifq);
if ((error = altq_detach(rqp->rq_ifq)))
return (error);
if (rio_list == rqp)
rio_list = rqp->rq_next;
else {
for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
if (tmp->rq_next == rqp) {
tmp->rq_next = rqp->rq_next;
break;
}
if (tmp == NULL)
printf("rio_detach: no state found in rio_list!\n");
}
rio_destroy(rqp->rq_rio);
free(rqp->rq_q, M_DEVBUF);
free(rqp, M_DEVBUF);
return (error);
}
/*
* rio support routines
*/
static int
rio_request(ifq, req, arg)
struct ifaltq *ifq;
int req;
void *arg;
{
rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
IFQ_LOCK_ASSERT(ifq);
switch (req) {
case ALTRQ_PURGE:
_flushq(rqp->rq_q);
if (ALTQ_IS_ENABLED(ifq))
ifq->ifq_len = 0;
break;
}
return (0);
}
/*
* enqueue routine:
*
* returns: 0 when successfully queued.
* ENOBUFS when drop occurs.
*/
static int
rio_enqueue(ifq, m, pktattr)
struct ifaltq *ifq;
struct mbuf *m;
struct altq_pktattr *pktattr;
{
rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
int error = 0;
IFQ_LOCK_ASSERT(ifq);
if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
ifq->ifq_len++;
else
error = ENOBUFS;
return error;
}
/*
* dequeue routine:
* must be called in splimp.
*
* returns: mbuf dequeued.
* NULL when no packet is available in the queue.
*/
static struct mbuf *
rio_dequeue(ifq, op)
struct ifaltq *ifq;
int op;
{
rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
struct mbuf *m = NULL;
IFQ_LOCK_ASSERT(ifq);
if (op == ALTDQ_POLL)
return qhead(rqp->rq_q);
m = rio_getq(rqp->rq_rio, rqp->rq_q);
if (m != NULL)
ifq->ifq_len--;
return m;
}
#ifdef KLD_MODULE
static struct altqsw rio_sw =
{"rio", rioopen, rioclose, rioioctl};
ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
MODULE_VERSION(altq_rio, 1);
MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
#endif /* KLD_MODULE */
#endif /* ALTQ3_COMPAT */
#endif /* ALTQ_RIO */

View File

@ -38,32 +38,6 @@
*/
#define RIO_NDROPPREC 3 /* number of drop precedence values */
#ifdef ALTQ3_COMPAT
struct rio_interface {
char rio_ifname[IFNAMSIZ];
};
struct rio_stats {
struct rio_interface iface;
int q_len[RIO_NDROPPREC];
struct redstats q_stats[RIO_NDROPPREC];
/* static red parameters */
int q_limit;
int weight;
int flags;
struct redparams q_params[RIO_NDROPPREC];
};
struct rio_conf {
struct rio_interface iface;
struct redparams q_params[RIO_NDROPPREC];
int rio_weight; /* weight for EWMA */
int rio_limit; /* max queue length */
int rio_pkttime; /* average packet time in usec */
int rio_flags; /* see below */
};
#endif /* ALTQ3_COMPAT */
/* rio flags */
#define RIOF_ECN4 0x01 /* use packet marking for IPv4 packets */
@ -71,18 +45,6 @@ struct rio_conf {
#define RIOF_ECN (RIOF_ECN4 | RIOF_ECN6)
#define RIOF_CLEARDSCP 0x200 /* clear diffserv codepoint */
#ifdef ALTQ3_COMPAT
/*
* IOCTLs for RIO
*/
#define RIO_IF_ATTACH _IOW('Q', 1, struct rio_interface)
#define RIO_IF_DETACH _IOW('Q', 2, struct rio_interface)
#define RIO_ENABLE _IOW('Q', 3, struct rio_interface)
#define RIO_DISABLE _IOW('Q', 4, struct rio_interface)
#define RIO_CONFIG _IOWR('Q', 6, struct rio_conf)
#define RIO_GETSTATS _IOWR('Q', 12, struct rio_stats)
#define RIO_SETDEFAULTS _IOW('Q', 30, struct redparams[RIO_NDROPPREC])
#endif /* ALTQ3_COMPAT */
#ifdef _KERNEL
@ -122,16 +84,6 @@ typedef struct rio {
struct redstats q_stats[RIO_NDROPPREC]; /* statistics */
} rio_t;
#ifdef ALTQ3_COMPAT
typedef struct rio_queue {
struct rio_queue *rq_next; /* next red_state in the list */
struct ifaltq *rq_ifq; /* backpointer to ifaltq */
class_queue_t *rq_q;
rio_t *rq_rio;
} rio_queue_t;
#endif /* ALTQ3_COMPAT */
extern rio_t *rio_alloc(int, struct redparams *, int, int);
extern void rio_destroy(rio_t *);

View File

@ -49,17 +49,9 @@
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/time.h>
#ifdef ALTQ3_COMPAT
#include <sys/kernel.h>
#endif
#include <net/if.h>
#include <net/if_var.h>
#ifdef ALTQ3_COMPAT
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#endif
#include <net/altq/if_altq.h>
#include <net/altq/altq.h>

View File

@ -62,9 +62,6 @@
#include <netpfil/pf/pf.h>
#include <netpfil/pf/pf_altq.h>
#include <net/altq/altq.h>
#ifdef ALTQ3_COMPAT
#include <net/altq/altq_conf.h>
#endif
/* machine dependent clock related includes */
#include <sys/bus.h>
@ -155,22 +152,6 @@ altq_attach(ifq, type, discipline, enqueue, dequeue, request, clfier, classify)
return ENXIO;
}
#ifdef ALTQ3_COMPAT
/*
* pfaltq can override the existing discipline, but altq3 cannot.
* check these if clfier is not NULL (which implies altq3).
*/
if (clfier != NULL) {
if (ALTQ_IS_ENABLED(ifq)) {
IFQ_UNLOCK(ifq);
return EBUSY;
}
if (ALTQ_IS_ATTACHED(ifq)) {
IFQ_UNLOCK(ifq);
return EEXIST;
}
}
#endif
ifq->altq_type = type;
ifq->altq_disc = discipline;
ifq->altq_enqueue = enqueue;
@ -179,11 +160,6 @@ altq_attach(ifq, type, discipline, enqueue, dequeue, request, clfier, classify)
ifq->altq_clfier = clfier;
ifq->altq_classify = classify;
ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
#ifdef ALTQ3_COMPAT
#ifdef ALTQ_KLD
altq_module_incref(type);
#endif
#endif
IFQ_UNLOCK(ifq);
return 0;
}
@ -206,11 +182,6 @@ altq_detach(ifq)
IFQ_UNLOCK(ifq);
return (0);
}
#ifdef ALTQ3_COMPAT
#ifdef ALTQ_KLD
altq_module_declref(ifq->altq_type);
#endif
#endif
ifq->altq_type = ALTQT_NONE;
ifq->altq_disc = NULL;