cxgbe(4): significant rx rework.

- More flexible cluster size selection, including the ability to fall
  back to a safe cluster size (PAGE_SIZE from zone_jumbop by default) in
  case an allocation of a larger size fails.
- A single get_fl_payload() function that assembles the payload into an
  mbuf chain for any kind of freelist.  This replaces two variants: one
  for freelists with buffer packing enabled and another for those without.
- Buffer packing with any sized cluster.  It was limited to 4K clusters
  only before this change.
- Enable buffer packing for TOE rx queues as well.
- Statistics and tunables to go with all these changes.  The driver's
  man page will be updated separately.

MFC after:	5 weeks
This commit is contained in:
Navdeep Parhar 2014-03-18 20:14:13 +00:00
parent 3ca4ead75d
commit 38035ed6dc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=263317
4 changed files with 645 additions and 588 deletions

View File

@ -134,10 +134,11 @@ enum {
RX_FL_ESIZE = EQ_ESIZE, /* 8 64bit addresses */
#if MJUMPAGESIZE != MCLBYTES
FL_BUF_SIZES_MAX = 5, /* cluster, jumbop, jumbo9k, jumbo16k, extra */
SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
#else
FL_BUF_SIZES_MAX = 4, /* cluster, jumbo9k, jumbo16k, extra */
SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
#endif
CL_METADATA_SIZE = CACHE_LINE_SIZE,
CTRL_EQ_QSIZE = 128,
@ -241,15 +242,28 @@ struct port_info {
uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
};
struct fl_sdesc {
bus_dmamap_t map;
caddr_t cl;
uint8_t tag_idx; /* the fl->tag entry this map comes from */
/* Where the cluster came from, how it has been carved up. */
struct cluster_layout {
int8_t zidx;
int8_t hwidx;
uint16_t region1; /* mbufs laid out within this region */
/* region2 is the DMA region */
uint16_t region3; /* cluster_metadata within this region */
};
struct cluster_metadata {
u_int refcount;
#ifdef INVARIANTS
__be64 ba_hwtag;
struct fl_sdesc *sd; /* For debug only. Could easily be stale */
#endif
};
struct fl_sdesc {
caddr_t cl;
uint8_t nmbuf;
struct cluster_layout cll;
};
struct tx_desc {
__be64 flit[8];
};
@ -368,17 +382,19 @@ struct sge_eq {
uint32_t unstalled; /* recovered from stall */
};
struct fl_buf_info {
u_int size;
int type;
int hwtag:4; /* tag in low 4 bits of the pa. */
uma_zone_t zone;
struct sw_zone_info {
uma_zone_t zone; /* zone that this cluster comes from */
int size; /* size of cluster: 2K, 4K, 9K, 16K, etc. */
int type; /* EXT_xxx type of the cluster */
int8_t head_hwidx;
int8_t tail_hwidx;
};
struct hw_buf_info {
int8_t zidx; /* backpointer to zone; -ve means unused */
int8_t next; /* next hwidx for this zone; -1 means no more */
int size;
};
#define FL_BUF_SIZES(sc) (sc->sge.fl_buf_sizes)
#define FL_BUF_SIZE(sc, x) (sc->sge.fl_buf_info[x].size)
#define FL_BUF_TYPE(sc, x) (sc->sge.fl_buf_info[x].type)
#define FL_BUF_HWTAG(sc, x) (sc->sge.fl_buf_info[x].hwtag)
#define FL_BUF_ZONE(sc, x) (sc->sge.fl_buf_info[x].zone)
enum {
FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */
@ -392,9 +408,8 @@ enum {
struct sge_fl {
bus_dma_tag_t desc_tag;
bus_dmamap_t desc_map;
bus_dma_tag_t tag[FL_BUF_SIZES_MAX]; /* only first FL_BUF_SIZES(sc) are
valid */
uint8_t tag_idx;
struct cluster_layout cll_def; /* default refill zone, layout */
struct cluster_layout cll_alt; /* alternate refill zone, layout */
struct mtx fl_lock;
char lockname[16];
int flags;
@ -411,9 +426,17 @@ struct sge_fl {
uint32_t needed; /* # of buffers needed to fill up fl. */
uint32_t lowat; /* # of buffers <= this means fl needs help */
uint32_t pending; /* # of bufs allocated since last doorbell */
u_int dmamap_failed;
struct mbuf *mstash[8];
TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
struct mbuf *m0;
struct mbuf **pnext;
u_int remaining;
uint64_t mbuf_allocated;/* # of mbuf allocated from zone_mbuf */
uint64_t mbuf_inlined; /* # of mbuf created within clusters */
uint64_t cl_allocated; /* # of clusters allocated */
uint64_t cl_recycled; /* # of clusters recycled */
uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */
};
/* txq: SGE egress queue + what's needed for Ethernet NIC */
@ -547,8 +570,11 @@ struct sge {
struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
u_int fl_buf_sizes __aligned(CACHE_LINE_SIZE);
struct fl_buf_info fl_buf_info[FL_BUF_SIZES_MAX];
int pack_boundary;
int8_t safe_hwidx1; /* may not have room for metadata */
int8_t safe_hwidx2; /* with room for metadata and maybe more */
struct sw_zone_info sw_zone_info[SW_ZONE_SIZES];
struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES];
};
struct rss_header;

View File

@ -87,6 +87,7 @@ enum {
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_MAX_IQ_SIZE = 65520,
SGE_FLBUF_SIZES = 16,
};
struct sge_qstat { /* data written to SGE queue status entries */

View File

@ -494,6 +494,8 @@ CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
static int
t4_probe(device_t dev)
{

File diff suppressed because it is too large Load Diff