Limit the size of the posted receive buffers in Rx Rings to MJUMPAGESIZE. Previously for jumbo MTUs, the rx ring buffers were
MTU + any required pad. Now when this size greater than MJUMPAGESIZE, the packet is spanned across multiple buffers and the mbufs are stiched together. Submitted by:gary.zambrano@qlogic.com Approved by:davidcs@freebsd.org
This commit is contained in:
parent
c06534502b
commit
71bbce5b72
@ -3100,7 +3100,7 @@ static inline void
|
|||||||
bxe_update_sge_prod(struct bxe_softc *sc,
|
bxe_update_sge_prod(struct bxe_softc *sc,
|
||||||
struct bxe_fastpath *fp,
|
struct bxe_fastpath *fp,
|
||||||
uint16_t sge_len,
|
uint16_t sge_len,
|
||||||
struct eth_end_agg_rx_cqe *cqe)
|
union eth_sgl_or_raw_data *cqe)
|
||||||
{
|
{
|
||||||
uint16_t last_max, last_elem, first_elem;
|
uint16_t last_max, last_elem, first_elem;
|
||||||
uint16_t delta = 0;
|
uint16_t delta = 0;
|
||||||
@ -3113,17 +3113,17 @@ bxe_update_sge_prod(struct bxe_softc *sc,
|
|||||||
/* first mark all used pages */
|
/* first mark all used pages */
|
||||||
for (i = 0; i < sge_len; i++) {
|
for (i = 0; i < sge_len; i++) {
|
||||||
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
|
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
|
||||||
RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[i])));
|
RX_SGE(le16toh(cqe->sgl[i])));
|
||||||
}
|
}
|
||||||
|
|
||||||
BLOGD(sc, DBG_LRO,
|
BLOGD(sc, DBG_LRO,
|
||||||
"fp[%02d] fp_cqe->sgl[%d] = %d\n",
|
"fp[%02d] fp_cqe->sgl[%d] = %d\n",
|
||||||
fp->index, sge_len - 1,
|
fp->index, sge_len - 1,
|
||||||
le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
|
le16toh(cqe->sgl[sge_len - 1]));
|
||||||
|
|
||||||
/* assume that the last SGE index is the biggest */
|
/* assume that the last SGE index is the biggest */
|
||||||
bxe_update_last_max_sge(fp,
|
bxe_update_last_max_sge(fp,
|
||||||
le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
|
le16toh(cqe->sgl[sge_len - 1]));
|
||||||
|
|
||||||
last_max = RX_SGE(fp->last_max_sge);
|
last_max = RX_SGE(fp->last_max_sge);
|
||||||
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
|
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
|
||||||
@ -3238,6 +3238,53 @@ bxe_tpa_stop_exit:
|
|||||||
fp->rx_tpa_queue_used &= ~(1 << queue);
|
fp->rx_tpa_queue_used &= ~(1 << queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint8_t
|
||||||
|
bxe_service_rxsgl(
|
||||||
|
struct bxe_fastpath *fp,
|
||||||
|
uint16_t len,
|
||||||
|
uint16_t lenonbd,
|
||||||
|
struct mbuf *m,
|
||||||
|
struct eth_fast_path_rx_cqe *cqe_fp)
|
||||||
|
{
|
||||||
|
struct mbuf *m_frag;
|
||||||
|
uint16_t frags, frag_len;
|
||||||
|
uint16_t sge_idx = 0;
|
||||||
|
uint16_t j;
|
||||||
|
uint8_t i, rc = 0;
|
||||||
|
uint32_t frag_size;
|
||||||
|
|
||||||
|
/* adjust the mbuf */
|
||||||
|
m->m_len = lenonbd;
|
||||||
|
|
||||||
|
frag_size = len - lenonbd;
|
||||||
|
frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
|
||||||
|
|
||||||
|
for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
|
||||||
|
sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
|
||||||
|
|
||||||
|
m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
|
||||||
|
frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
|
||||||
|
m_frag->m_len = frag_len;
|
||||||
|
|
||||||
|
/* allocate a new mbuf for the SGE */
|
||||||
|
rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
|
||||||
|
if (rc) {
|
||||||
|
/* Leave all remaining SGEs in the ring! */
|
||||||
|
return (rc);
|
||||||
|
}
|
||||||
|
fp->eth_q_stats.mbuf_alloc_sge--;
|
||||||
|
|
||||||
|
/* concatenate the fragment to the head mbuf */
|
||||||
|
m_cat(m, m_frag);
|
||||||
|
|
||||||
|
frag_size -= frag_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
bxe_rxeof(struct bxe_softc *sc,
|
bxe_rxeof(struct bxe_softc *sc,
|
||||||
struct bxe_fastpath *fp)
|
struct bxe_fastpath *fp)
|
||||||
@ -3278,7 +3325,7 @@ bxe_rxeof(struct bxe_softc *sc,
|
|||||||
struct eth_fast_path_rx_cqe *cqe_fp;
|
struct eth_fast_path_rx_cqe *cqe_fp;
|
||||||
uint8_t cqe_fp_flags;
|
uint8_t cqe_fp_flags;
|
||||||
enum eth_rx_cqe_type cqe_fp_type;
|
enum eth_rx_cqe_type cqe_fp_type;
|
||||||
uint16_t len, pad;
|
uint16_t len, lenonbd, pad;
|
||||||
struct mbuf *m = NULL;
|
struct mbuf *m = NULL;
|
||||||
|
|
||||||
comp_ring_cons = RCQ(sw_cq_cons);
|
comp_ring_cons = RCQ(sw_cq_cons);
|
||||||
@ -3293,7 +3340,7 @@ bxe_rxeof(struct bxe_softc *sc,
|
|||||||
BLOGD(sc, DBG_RX,
|
BLOGD(sc, DBG_RX,
|
||||||
"fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
|
"fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
|
||||||
"BD prod=%d cons=%d CQE type=0x%x err=0x%x "
|
"BD prod=%d cons=%d CQE type=0x%x err=0x%x "
|
||||||
"status=0x%x rss_hash=0x%x vlan=0x%x len=%u\n",
|
"status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
|
||||||
fp->index,
|
fp->index,
|
||||||
hw_cq_cons,
|
hw_cq_cons,
|
||||||
sw_cq_cons,
|
sw_cq_cons,
|
||||||
@ -3304,7 +3351,8 @@ bxe_rxeof(struct bxe_softc *sc,
|
|||||||
cqe_fp->status_flags,
|
cqe_fp->status_flags,
|
||||||
le32toh(cqe_fp->rss_hash_result),
|
le32toh(cqe_fp->rss_hash_result),
|
||||||
le16toh(cqe_fp->vlan_tag),
|
le16toh(cqe_fp->vlan_tag),
|
||||||
le16toh(cqe_fp->pkt_len_or_gro_seg_len));
|
le16toh(cqe_fp->pkt_len_or_gro_seg_len),
|
||||||
|
le16toh(cqe_fp->len_on_bd));
|
||||||
|
|
||||||
/* is this a slowpath msg? */
|
/* is this a slowpath msg? */
|
||||||
if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
|
if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
|
||||||
@ -3351,7 +3399,7 @@ bxe_rxeof(struct bxe_softc *sc,
|
|||||||
bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
|
bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
|
||||||
&cqe->end_agg_cqe, comp_ring_cons);
|
&cqe->end_agg_cqe, comp_ring_cons);
|
||||||
|
|
||||||
bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe);
|
bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
|
||||||
|
|
||||||
goto next_cqe;
|
goto next_cqe;
|
||||||
}
|
}
|
||||||
@ -3367,6 +3415,7 @@ bxe_rxeof(struct bxe_softc *sc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
|
len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
|
||||||
|
lenonbd = le16toh(cqe_fp->len_on_bd);
|
||||||
pad = cqe_fp->placement_offset;
|
pad = cqe_fp->placement_offset;
|
||||||
|
|
||||||
m = rx_buf->m;
|
m = rx_buf->m;
|
||||||
@ -3413,6 +3462,12 @@ bxe_rxeof(struct bxe_softc *sc,
|
|||||||
m_adj(m, pad);
|
m_adj(m, pad);
|
||||||
m->m_pkthdr.len = m->m_len = len;
|
m->m_pkthdr.len = m->m_len = len;
|
||||||
|
|
||||||
|
if (len != lenonbd){
|
||||||
|
rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
|
||||||
|
if (rc)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* assign packet to this interface interface */
|
/* assign packet to this interface interface */
|
||||||
if_setrcvif(m, ifp);
|
if_setrcvif(m, ifp);
|
||||||
|
|
||||||
@ -6210,30 +6265,27 @@ static void
|
|||||||
bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
|
bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
uint32_t rx_buf_size;
|
||||||
|
|
||||||
BLOGD(sc, DBG_LOAD, "mtu = %d\n", sc->mtu);
|
rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
|
||||||
|
|
||||||
for (i = 0; i < sc->num_queues; i++) {
|
for (i = 0; i < sc->num_queues; i++) {
|
||||||
/* get the Rx buffer size for RX frames */
|
if(rx_buf_size <= MCLBYTES){
|
||||||
sc->fp[i].rx_buf_size =
|
sc->fp[i].rx_buf_size = rx_buf_size;
|
||||||
(IP_HEADER_ALIGNMENT_PADDING +
|
sc->fp[i].mbuf_alloc_size = MCLBYTES;
|
||||||
ETH_OVERHEAD +
|
}else if (rx_buf_size <= MJUMPAGESIZE){
|
||||||
sc->mtu);
|
sc->fp[i].rx_buf_size = rx_buf_size;
|
||||||
|
sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
|
||||||
BLOGD(sc, DBG_LOAD, "rx_buf_size for fp[%02d] = %d\n",
|
}else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
|
||||||
i, sc->fp[i].rx_buf_size);
|
sc->fp[i].rx_buf_size = MCLBYTES;
|
||||||
|
sc->fp[i].mbuf_alloc_size = MCLBYTES;
|
||||||
/* get the mbuf allocation size for RX frames */
|
}else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
|
||||||
if (sc->fp[i].rx_buf_size <= MCLBYTES) {
|
sc->fp[i].rx_buf_size = MJUMPAGESIZE;
|
||||||
|
sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
|
||||||
|
}else {
|
||||||
|
sc->fp[i].rx_buf_size = MCLBYTES;
|
||||||
sc->fp[i].mbuf_alloc_size = MCLBYTES;
|
sc->fp[i].mbuf_alloc_size = MCLBYTES;
|
||||||
} else if (sc->fp[i].rx_buf_size <= BCM_PAGE_SIZE) {
|
|
||||||
sc->fp[i].mbuf_alloc_size = PAGE_SIZE;
|
|
||||||
} else {
|
|
||||||
sc->fp[i].mbuf_alloc_size = MJUM9BYTES;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BLOGD(sc, DBG_LOAD, "mbuf_alloc_size for fp[%02d] = %d\n",
|
|
||||||
i, sc->fp[i].mbuf_alloc_size);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user