T4 packet filtering/steering.

- Enable 5-tuple and every-packet lookup.

- Setup the default filter mode to allow filtering/steering based on IP
  protocol, ingress port, inner VLAN ID, IP frag, FCoE, and MPS match
  type; all combined together.  You can also filter based on MAC index,
  Ethernet type, IP TOS/IPv6 Traffic Class, and outer VLAN ID but you'll
  have to modify the default filter mode and exclude some of the
  match-fields in it.

  IPv4 and IPv6 SIP/DIP/SPORT/DPORT are always available in all filter
  rules.

- Add driver ioctls to get/set the global filter mode.

- Add driver ioctls to program and delete hardware filters.  A couple of
  the "switch" actions that rewrite Ethernet and VLAN information and
  switch the packet out of another port may not work as the L2 code is not
  yet in place.  Everything else, including all "drop" and "pass" rules
  with RSS or absolute qid, should work.

Obtained from:	 Chelsio Communications
This commit is contained in:
Navdeep Parhar 2011-05-05 02:04:56 +00:00
parent 3cea29603d
commit 8820ce5fe7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=221474
4 changed files with 671 additions and 4 deletions

View File

@ -61,8 +61,10 @@ struct tid_info {
union aopen_entry *atid_tab;
unsigned int natids;
struct filter_entry *ftid_tab;
unsigned int nftids;
unsigned int ftid_base;
unsigned int ftids_in_use;
union aopen_entry *afree;
unsigned int atids_in_use;

View File

@ -31,6 +31,9 @@
#ifndef __T4_IOCTL_H__
#define __T4_IOCTL_H__
#include <sys/types.h>
#include <net/ethernet.h>
/*
* Ioctl commands specific to this driver.
*/
@ -38,6 +41,11 @@ enum {
T4_GETREG = 0x40, /* read register */
T4_SETREG, /* write register */
T4_REGDUMP, /* dump of all registers */
T4_GET_FILTER_MODE, /* get global filter mode */
T4_SET_FILTER_MODE, /* set global filter mode */
T4_GET_FILTER, /* get information about a filter */
T4_SET_FILTER, /* program a filter */
T4_DEL_FILTER, /* delete a filter */
};
struct t4_reg {
@ -53,7 +61,133 @@ struct t4_regdump {
uint32_t *data;
};
/*
* A hardware filter is some valid combination of these.
*/
#define T4_FILTER_IPv4 0x1 /* IPv4 packet */
#define T4_FILTER_IPv6 0x2 /* IPv6 packet */
#define T4_FILTER_IP_SADDR 0x4 /* Source IP address or network */
#define T4_FILTER_IP_DADDR 0x8 /* Destination IP address or network */
#define T4_FILTER_IP_SPORT 0x10 /* Source IP port */
#define T4_FILTER_IP_DPORT 0x20 /* Destination IP port */
#define T4_FILTER_FCoE 0x40 /* Fibre Channel over Ethernet packet */
#define T4_FILTER_PORT 0x80 /* Physical ingress port */
#define T4_FILTER_OVLAN 0x100 /* Outer VLAN ID */
#define T4_FILTER_IVLAN 0x200 /* Inner VLAN ID */
#define T4_FILTER_IP_TOS 0x400 /* IPv4 TOS/IPv6 Traffic Class */
#define T4_FILTER_IP_PROTO 0x800 /* IP protocol */
#define T4_FILTER_ETH_TYPE 0x1000 /* Ethernet Type */
#define T4_FILTER_MAC_IDX 0x2000 /* MPS MAC address match index */
#define T4_FILTER_MPS_HIT_TYPE 0x4000 /* MPS match type */
#define T4_FILTER_IP_FRAGMENT 0x8000 /* IP fragment */
/* Filter action */
enum {
FILTER_PASS = 0, /* default */
FILTER_DROP,
FILTER_SWITCH
};
/* 802.1q manipulation on FILTER_SWITCH */
enum {
VLAN_NOCHANGE = 0, /* default */
VLAN_REMOVE,
VLAN_INSERT,
VLAN_REWRITE
};
/* MPS match type */
enum {
UCAST_EXACT = 0, /* exact unicast match */
UCAST_HASH = 1, /* inexact (hashed) unicast match */
MCAST_EXACT = 2, /* exact multicast match */
MCAST_HASH = 3, /* inexact (hashed) multicast match */
PROMISC = 4, /* no match but port is promiscuous */
HYPPROMISC = 5, /* port is hypervisor-promisuous + not bcast */
BCAST = 6, /* broadcast packet */
};
/* Rx steering */
enum {
DST_MODE_QUEUE, /* queue is directly specified by filter */
DST_MODE_RSS_QUEUE, /* filter specifies RSS entry containing queue */
DST_MODE_RSS, /* queue selected by default RSS hash lookup */
DST_MODE_FILT_RSS /* queue selected by hashing in filter-specified
RSS subtable */
};
struct t4_filter_tuple {
/*
* These are always available.
*/
uint8_t sip[16]; /* source IP address (IPv4 in [3:0]) */
uint8_t dip[16]; /* destinatin IP address (IPv4 in [3:0]) */
uint16_t sport; /* source port */
uint16_t dport; /* destination port */
/*
* A combination of these (upto 36 bits) is available. TP_VLAN_PRI_MAP
* is used to select the global mode and all filters are limited to the
* set of fields allowed by the global mode.
*/
uint16_t ovlan; /* outer VLAN */
uint16_t ivlan; /* inner VLAN */
uint16_t ethtype; /* Ethernet type */
uint8_t tos; /* TOS/Traffic Type */
uint8_t proto; /* protocol type */
uint32_t fcoe:1; /* FCoE packet */
uint32_t iport:3; /* ingress port */
uint32_t matchtype:3; /* MPS match type */
uint32_t frag:1; /* fragmentation extension header */
uint32_t macidx:9; /* exact match MAC index */
uint32_t ivlan_vld:1; /* inner VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN valid */
};
struct t4_filter_specification {
uint32_t hitcnts:1; /* count filter hits in TCB */
uint32_t prio:1; /* filter has priority over active/server */
uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
uint32_t action:2; /* drop, pass, switch */
uint32_t rpttid:1; /* report TID in RSS hash field */
uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
uint32_t iq:10; /* ingress queue */
uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
/* 1 => TCB contains IQ ID */
/*
* Switch proxy/rewrite fields. An ingress packet which matches a
* filter with "switch" set will be looped back out as an egress
* packet -- potentially with some Ethernet header rewriting.
*/
uint32_t eport:2; /* egress port to switch packet out */
uint32_t newdmac:1; /* rewrite destination MAC address */
uint32_t newsmac:1; /* rewrite source MAC address */
uint32_t newvlan:2; /* rewrite VLAN Tag */
uint8_t dmac[ETHER_ADDR_LEN]; /* new destination MAC address */
uint8_t smac[ETHER_ADDR_LEN]; /* new source MAC address */
uint16_t vlan; /* VLAN Tag to insert */
/*
* Filter rule value/mask pairs.
*/
struct t4_filter_tuple val;
struct t4_filter_tuple mask;
};
struct t4_filter {
uint32_t idx;
uint64_t hits;
struct t4_filter_specification fs;
};
#define CHELSIO_T4_GETREG _IOWR('f', T4_GETREG, struct t4_reg)
#define CHELSIO_T4_SETREG _IOW('f', T4_SETREG, struct t4_reg)
#define CHELSIO_T4_REGDUMP _IOWR('f', T4_REGDUMP, struct t4_regdump)
#define CHELSIO_T4_GET_FILTER_MODE _IOWR('f', T4_GET_FILTER_MODE, uint32_t)
#define CHELSIO_T4_SET_FILTER_MODE _IOW('f', T4_SET_FILTER_MODE, uint32_t)
#define CHELSIO_T4_GET_FILTER _IOWR('f', T4_GET_FILTER, struct t4_filter)
#define CHELSIO_T4_SET_FILTER _IOW('f', T4_SET_FILTER, struct t4_filter)
#define CHELSIO_T4_DEL_FILTER _IOW('f', T4_DEL_FILTER, struct t4_filter)
#endif

View File

@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
#include "common/t4_hw.h"
#include "common/common.h"
#include "common/t4_msg.h"
#include "common/t4_regs.h"
#include "common/t4_regs_values.h"
#include "common/t4fw_interface.h"
@ -218,6 +219,11 @@ TUNABLE_INT("hw.cxgbe.interrupt_forwarding", &intr_fwd);
SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_forwarding, CTLFLAG_RDTUN,
&intr_fwd, 0, "always use forwarded interrupts");
static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC;
TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode);
SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN,
&filter_mode, 0, "default global filter mode.");
struct intrs_and_queues {
int intr_type; /* INTx, MSI, or MSI-X */
int nirq; /* Number of vectors */
@ -228,6 +234,15 @@ struct intrs_and_queues {
int nrxq1g; /* # of NIC rxq's for each 1G port */
};
struct filter_entry {
uint32_t valid:1; /* filter allocated and valid */
uint32_t locked:1; /* filter is administratively locked */
uint32_t pending:1; /* filter action is pending firmware reply */
uint32_t smtidx:8; /* Source MAC Table index for smac */
struct t4_filter_specification fs;
};
enum {
MEMWIN0_APERTURE = 2048,
MEMWIN0_BASE = 0x1b800,
@ -280,6 +295,18 @@ static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
static inline void txq_start(struct ifnet *, struct sge_txq *);
static uint32_t fconf_to_mode(uint32_t);
static uint32_t mode_to_fconf(uint32_t);
static uint32_t fspec_to_fconf(struct t4_filter_specification *);
static int get_filter_mode(struct adapter *, uint32_t *);
static int set_filter_mode(struct adapter *, uint32_t);
static int get_filter(struct adapter *, struct t4_filter *);
static int set_filter(struct adapter *, struct t4_filter *);
static int del_filter(struct adapter *, struct t4_filter *);
static void clear_filter(struct adapter *, struct filter_entry *);
static int set_filter_wr(struct adapter *, int);
static int del_filter_wr(struct adapter *, int);
void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
static int t4_mod_event(module_t, int, void *);
struct t4_pciids {
@ -421,9 +448,12 @@ t4_attach(device_t dev)
t4_sge_init(sc);
/*
* XXX: This is the place to call t4_set_filter_mode()
*/
t4_set_filter_mode(sc, filter_mode);
t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG,
V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP),
V_FIVETUPLELOOKUP(M_FIVETUPLELOOKUP));
t4_tp_wr_bits_indirect(sc, A_TP_INGRESS_CONFIG, F_CSUM_HAS_PSEUDO_HDR,
F_LOOKUPEVERYPKT);
/* get basic stuff going */
rc = -t4_early_init(sc, sc->mbox);
@ -661,6 +691,7 @@ t4_detach(device_t dev)
free(sc->sge.fiq, M_CXGBE);
free(sc->sge.iqmap, M_CXGBE);
free(sc->sge.eqmap, M_CXGBE);
free(sc->tids.ftid_tab, M_CXGBE);
t4_destroy_dma_tag(sc);
mtx_destroy(&sc->sc_lock);
@ -2699,6 +2730,481 @@ cxgbe_txq_start(void *arg, int count)
TXQ_UNLOCK(txq);
}
static uint32_t
fconf_to_mode(uint32_t fconf)
{
uint32_t mode;
mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
if (fconf & F_FRAGMENTATION)
mode |= T4_FILTER_IP_FRAGMENT;
if (fconf & F_MPSHITTYPE)
mode |= T4_FILTER_MPS_HIT_TYPE;
if (fconf & F_MACMATCH)
mode |= T4_FILTER_MAC_IDX;
if (fconf & F_ETHERTYPE)
mode |= T4_FILTER_ETH_TYPE;
if (fconf & F_PROTOCOL)
mode |= T4_FILTER_IP_PROTO;
if (fconf & F_TOS)
mode |= T4_FILTER_IP_TOS;
if (fconf & F_VLAN)
mode |= T4_FILTER_IVLAN;
if (fconf & F_VNIC_ID)
mode |= T4_FILTER_OVLAN;
if (fconf & F_PORT)
mode |= T4_FILTER_PORT;
if (fconf & F_FCOE)
mode |= T4_FILTER_FCoE;
return (mode);
}
static uint32_t
mode_to_fconf(uint32_t mode)
{
uint32_t fconf = 0;
if (mode & T4_FILTER_IP_FRAGMENT)
fconf |= F_FRAGMENTATION;
if (mode & T4_FILTER_MPS_HIT_TYPE)
fconf |= F_MPSHITTYPE;
if (mode & T4_FILTER_MAC_IDX)
fconf |= F_MACMATCH;
if (mode & T4_FILTER_ETH_TYPE)
fconf |= F_ETHERTYPE;
if (mode & T4_FILTER_IP_PROTO)
fconf |= F_PROTOCOL;
if (mode & T4_FILTER_IP_TOS)
fconf |= F_TOS;
if (mode & T4_FILTER_IVLAN)
fconf |= F_VLAN;
if (mode & T4_FILTER_OVLAN)
fconf |= F_VNIC_ID;
if (mode & T4_FILTER_PORT)
fconf |= F_PORT;
if (mode & T4_FILTER_FCoE)
fconf |= F_FCOE;
return (fconf);
}
static uint32_t
fspec_to_fconf(struct t4_filter_specification *fs)
{
uint32_t fconf = 0;
if (fs->val.frag || fs->mask.frag)
fconf |= F_FRAGMENTATION;
if (fs->val.matchtype || fs->mask.matchtype)
fconf |= F_MPSHITTYPE;
if (fs->val.macidx || fs->mask.macidx)
fconf |= F_MACMATCH;
if (fs->val.ethtype || fs->mask.ethtype)
fconf |= F_ETHERTYPE;
if (fs->val.proto || fs->mask.proto)
fconf |= F_PROTOCOL;
if (fs->val.tos || fs->mask.tos)
fconf |= F_TOS;
if (fs->val.ivlan_vld || fs->mask.ivlan_vld)
fconf |= F_VLAN;
if (fs->val.ovlan_vld || fs->mask.ovlan_vld)
fconf |= F_VNIC_ID;
if (fs->val.iport || fs->mask.iport)
fconf |= F_PORT;
if (fs->val.fcoe || fs->mask.fcoe)
fconf |= F_FCOE;
return (fconf);
}
static int
get_filter_mode(struct adapter *sc, uint32_t *mode)
{
uint32_t fconf;
t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
A_TP_VLAN_PRI_MAP);
*mode = fconf_to_mode(fconf);
return (0);
}
static int
set_filter_mode(struct adapter *sc, uint32_t mode)
{
uint32_t fconf;
int rc;
fconf = mode_to_fconf(mode);
ADAPTER_LOCK(sc);
if (IS_BUSY(sc)) {
rc = EAGAIN;
goto done;
}
if (sc->tids.ftids_in_use > 0) {
rc = EBUSY;
goto done;
}
rc = -t4_set_filter_mode(sc, fconf);
done:
ADAPTER_UNLOCK(sc);
return (rc);
}
static int
get_filter(struct adapter *sc, struct t4_filter *t)
{
int i, nfilters = sc->tids.nftids;
struct filter_entry *f;
ADAPTER_LOCK_ASSERT_OWNED(sc);
if (IS_BUSY(sc))
return (EAGAIN);
if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
t->idx >= nfilters) {
t->idx = 0xffffffff;
return (0);
}
f = &sc->tids.ftid_tab[t->idx];
for (i = t->idx; i < nfilters; i++, f++) {
if (f->valid) {
t->idx = i;
t->fs = f->fs;
t->hits = 0; /* XXX implement */
return (0);
}
}
t->idx = 0xffffffff;
return (0);
}
static int
set_filter(struct adapter *sc, struct t4_filter *t)
{
uint32_t fconf;
unsigned int nfilters, nports;
struct filter_entry *f;
int i;
ADAPTER_LOCK_ASSERT_OWNED(sc);
nfilters = sc->tids.nftids;
nports = sc->params.nports;
if (nfilters == 0)
return (ENOTSUP);
if (!(sc->flags & FULL_INIT_DONE))
return (EAGAIN);
if (t->idx >= nfilters)
return (EINVAL);
/* Validate against the global filter mode */
t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
A_TP_VLAN_PRI_MAP);
if ((fconf | fspec_to_fconf(&t->fs)) != fconf)
return (E2BIG);
if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
return (EINVAL);
if (t->fs.val.iport >= nports)
return (EINVAL);
/* Can't specify an iq if not steering to it */
if (!t->fs.dirsteer && t->fs.iq)
return (EINVAL);
/* IPv6 filter idx must be 4 aligned */
if (t->fs.type == 1 &&
((t->idx & 0x3) || t->idx + 4 >= nfilters))
return (EINVAL);
if (sc->tids.ftid_tab == NULL) {
KASSERT(sc->tids.ftids_in_use == 0,
("%s: no memory allocated but filters_in_use > 0",
__func__));
sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
if (sc->tids.ftid_tab == NULL)
return (ENOMEM);
}
for (i = 0; i < 4; i++) {
f = &sc->tids.ftid_tab[t->idx + i];
if (f->pending || f->valid)
return (EBUSY);
if (f->locked)
return (EPERM);
if (t->fs.type == 0)
break;
}
f = &sc->tids.ftid_tab[t->idx];
f->fs = t->fs;
return set_filter_wr(sc, t->idx);
}
static int
del_filter(struct adapter *sc, struct t4_filter *t)
{
unsigned int nfilters;
struct filter_entry *f;
ADAPTER_LOCK_ASSERT_OWNED(sc);
if (IS_BUSY(sc))
return (EAGAIN);
nfilters = sc->tids.nftids;
if (nfilters == 0)
return (ENOTSUP);
if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
t->idx >= nfilters)
return (EINVAL);
if (!(sc->flags & FULL_INIT_DONE))
return (EAGAIN);
f = &sc->tids.ftid_tab[t->idx];
if (f->pending)
return (EBUSY);
if (f->locked)
return (EPERM);
if (f->valid) {
t->fs = f->fs; /* extra info for the caller */
return del_filter_wr(sc, t->idx);
}
return (0);
}
/* XXX: L2T */
static void
clear_filter(struct adapter *sc, struct filter_entry *f)
{
(void) sc;
bzero(f, sizeof (*f));
}
static int
set_filter_wr(struct adapter *sc, int fidx)
{
int rc;
struct filter_entry *f = &sc->tids.ftid_tab[fidx];
struct mbuf *m;
struct fw_filter_wr *fwr;
unsigned int ftid;
ADAPTER_LOCK_ASSERT_OWNED(sc);
if (f->fs.newdmac || f->fs.newvlan)
return (ENOTSUP); /* XXX: fix after L2T code */
ftid = sc->tids.ftid_base + fidx;
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOMEM);
fwr = mtod(m, struct fw_filter_wr *);
m->m_len = m->m_pkthdr.len = sizeof(*fwr);
bzero(fwr, sizeof (*fwr));
fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
fwr->tid_to_iq =
htobe32(V_FW_FILTER_WR_TID(ftid) |
V_FW_FILTER_WR_RQTYPE(f->fs.type) |
V_FW_FILTER_WR_NOREPLY(0) |
V_FW_FILTER_WR_IQ(f->fs.iq));
fwr->del_filter_to_l2tix =
htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
V_FW_FILTER_WR_PRIO(f->fs.prio) |
V_FW_FILTER_WR_L2TIX(0)); /* XXX: L2T */
fwr->ethtype = htobe16(f->fs.val.ethtype);
fwr->ethtypem = htobe16(f->fs.mask.ethtype);
fwr->frag_to_ovlan_vldm =
(V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
fwr->smac_sel = 0;
fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
fwr->maci_to_matchtypem =
htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
V_FW_FILTER_WR_PORT(f->fs.val.iport) |
V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
fwr->ptcl = f->fs.val.proto;
fwr->ptclm = f->fs.mask.proto;
fwr->ttyp = f->fs.val.tos;
fwr->ttypm = f->fs.mask.tos;
fwr->ivlan = htobe16(f->fs.val.ivlan);
fwr->ivlanm = htobe16(f->fs.mask.ivlan);
fwr->ovlan = htobe16(f->fs.val.ovlan);
fwr->ovlanm = htobe16(f->fs.mask.ovlan);
bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
fwr->lp = htobe16(f->fs.val.dport);
fwr->lpm = htobe16(f->fs.mask.dport);
fwr->fp = htobe16(f->fs.val.sport);
fwr->fpm = htobe16(f->fs.mask.sport);
if (f->fs.newsmac)
bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
f->pending = 1;
sc->tids.ftids_in_use++;
rc = t4_mgmt_tx(sc, m);
if (rc != 0) {
sc->tids.ftids_in_use--;
m_freem(m);
clear_filter(sc, f);
}
return (rc);
}
static int
del_filter_wr(struct adapter *sc, int fidx)
{
struct filter_entry *f = &sc->tids.ftid_tab[fidx];
struct mbuf *m;
struct fw_filter_wr *fwr;
unsigned int rc, ftid;
ADAPTER_LOCK_ASSERT_OWNED(sc);
ftid = sc->tids.ftid_base + fidx;
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOMEM);
fwr = mtod(m, struct fw_filter_wr *);
m->m_len = m->m_pkthdr.len = sizeof(*fwr);
bzero(fwr, sizeof (*fwr));
t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
f->pending = 1;
rc = t4_mgmt_tx(sc, m);
if (rc != 0) {
f->pending = 0;
m_freem(m);
}
return (rc);
}
/* XXX move intr handlers to main.c and make this static */
void
filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
{
unsigned int idx = GET_TID(rpl);
if (idx >= sc->tids.ftid_base &&
(idx -= sc->tids.ftid_base) < sc->tids.nftids) {
unsigned int rc = G_COOKIE(rpl->cookie);
struct filter_entry *f = &sc->tids.ftid_tab[idx];
if (rc == FW_FILTER_WR_FLT_DELETED) {
/*
* Clear the filter when we get confirmation from the
* hardware that the filter has been deleted.
*/
clear_filter(sc, f);
sc->tids.ftids_in_use--;
} else if (rc == FW_FILTER_WR_SMT_TBL_FULL) {
device_printf(sc->dev,
"filter %u setup failed due to full SMT\n", idx);
clear_filter(sc, f);
sc->tids.ftids_in_use--;
} else if (rc == FW_FILTER_WR_FLT_ADDED) {
f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
f->pending = 0; /* asynchronous setup completed */
f->valid = 1;
} else {
/*
* Something went wrong. Issue a warning about the
* problem and clear everything out.
*/
device_printf(sc->dev,
"filter %u setup failed with error %u\n", idx, rc);
clear_filter(sc, f);
sc->tids.ftids_in_use--;
}
}
}
int
t4_os_find_pci_capability(struct adapter *sc, int cap)
{
@ -2873,6 +3379,27 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
free(buf, M_CXGBE);
break;
}
case CHELSIO_T4_GET_FILTER_MODE:
rc = get_filter_mode(sc, (uint32_t *)data);
break;
case CHELSIO_T4_SET_FILTER_MODE:
rc = set_filter_mode(sc, *(uint32_t *)data);
break;
case CHELSIO_T4_GET_FILTER:
ADAPTER_LOCK(sc);
rc = get_filter(sc, (struct t4_filter *)data);
ADAPTER_UNLOCK(sc);
break;
case CHELSIO_T4_SET_FILTER:
ADAPTER_LOCK(sc);
rc = set_filter(sc, (struct t4_filter *)data);
ADAPTER_UNLOCK(sc);
break;
case CHELSIO_T4_DEL_FILTER:
ADAPTER_LOCK(sc);
rc = del_filter(sc, (struct t4_filter *)data);
ADAPTER_UNLOCK(sc);
break;
default:
rc = EINVAL;
}

View File

@ -142,6 +142,8 @@ static int handle_sge_egr_update(struct adapter *,
static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *);
extern void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
/*
* Called on MOD_LOAD and fills up fl_buf_info[].
*/
@ -580,7 +582,9 @@ t4_evt_rx(void *arg)
case CPL_SGE_EGR_UPDATE:
handle_sge_egr_update(sc, (const void *)(rss + 1));
break;
case CPL_SET_TCB_RPL:
filter_rpl(sc, (const void *) (rss + 1));
break;
default:
device_printf(sc->dev,
"can't handle CPL opcode %d.", rss->opcode);