MFC r207628,207635:
r207628: Enable multi-descriptor transmisstion for fragmented mbufs. There is no more need to defragment mbufs. After transmitting the multi-fragmented frame, the controller updates only the first descriptor of multi-descriptor transmission so it's driver's responsibility to clear OWN bits of remaining descriptor of multi-descriptor transmission. It seems the controller behaves much like jme(4) controllers in descriptor handling. r207635: Free entire mbuf chain instead of the first mbuf.
This commit is contained in:
parent
c4879659eb
commit
862e7d9d59
@ -756,6 +756,8 @@ sge_dma_alloc(struct sge_softc *sc)
|
|||||||
{
|
{
|
||||||
struct sge_chain_data *cd;
|
struct sge_chain_data *cd;
|
||||||
struct sge_list_data *ld;
|
struct sge_list_data *ld;
|
||||||
|
struct sge_rxdesc *rxd;
|
||||||
|
struct sge_txdesc *txd;
|
||||||
int error, i;
|
int error, i;
|
||||||
|
|
||||||
cd = &sc->sge_cdata;
|
cd = &sc->sge_cdata;
|
||||||
@ -869,8 +871,12 @@ sge_dma_alloc(struct sge_softc *sc)
|
|||||||
|
|
||||||
/* Create DMA maps for Tx buffers. */
|
/* Create DMA maps for Tx buffers. */
|
||||||
for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
||||||
|
txd = &cd->sge_txdesc[i];
|
||||||
|
txd->tx_m = NULL;
|
||||||
|
txd->tx_dmamap = NULL;
|
||||||
|
txd->tx_ndesc = 0;
|
||||||
error = bus_dmamap_create(cd->sge_txmbuf_tag, 0,
|
error = bus_dmamap_create(cd->sge_txmbuf_tag, 0,
|
||||||
&cd->sge_tx_map[i]);
|
&txd->tx_dmamap);
|
||||||
if (error != 0) {
|
if (error != 0) {
|
||||||
device_printf(sc->sge_dev,
|
device_printf(sc->sge_dev,
|
||||||
"could not create Tx DMA map.\n");
|
"could not create Tx DMA map.\n");
|
||||||
@ -886,8 +892,11 @@ sge_dma_alloc(struct sge_softc *sc)
|
|||||||
}
|
}
|
||||||
/* Create DMA maps for Rx buffers. */
|
/* Create DMA maps for Rx buffers. */
|
||||||
for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
||||||
|
rxd = &cd->sge_rxdesc[i];
|
||||||
|
rxd->rx_m = NULL;
|
||||||
|
rxd->rx_dmamap = NULL;
|
||||||
error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0,
|
error = bus_dmamap_create(cd->sge_rxmbuf_tag, 0,
|
||||||
&cd->sge_rx_map[i]);
|
&rxd->rx_dmamap);
|
||||||
if (error) {
|
if (error) {
|
||||||
device_printf(sc->sge_dev,
|
device_printf(sc->sge_dev,
|
||||||
"could not create Rx DMA map.\n");
|
"could not create Rx DMA map.\n");
|
||||||
@ -903,6 +912,8 @@ sge_dma_free(struct sge_softc *sc)
|
|||||||
{
|
{
|
||||||
struct sge_chain_data *cd;
|
struct sge_chain_data *cd;
|
||||||
struct sge_list_data *ld;
|
struct sge_list_data *ld;
|
||||||
|
struct sge_rxdesc *rxd;
|
||||||
|
struct sge_txdesc *txd;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cd = &sc->sge_cdata;
|
cd = &sc->sge_cdata;
|
||||||
@ -934,10 +945,11 @@ sge_dma_free(struct sge_softc *sc)
|
|||||||
/* Rx buffers. */
|
/* Rx buffers. */
|
||||||
if (cd->sge_rxmbuf_tag != NULL) {
|
if (cd->sge_rxmbuf_tag != NULL) {
|
||||||
for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
||||||
if (cd->sge_rx_map[i] != NULL) {
|
rxd = &cd->sge_rxdesc[i];
|
||||||
|
if (rxd->rx_dmamap != NULL) {
|
||||||
bus_dmamap_destroy(cd->sge_rxmbuf_tag,
|
bus_dmamap_destroy(cd->sge_rxmbuf_tag,
|
||||||
cd->sge_rx_map[i]);
|
rxd->rx_dmamap);
|
||||||
cd->sge_rx_map[i] = NULL;
|
rxd->rx_dmamap = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (cd->sge_rx_spare_map != NULL) {
|
if (cd->sge_rx_spare_map != NULL) {
|
||||||
@ -951,10 +963,11 @@ sge_dma_free(struct sge_softc *sc)
|
|||||||
/* Tx buffers. */
|
/* Tx buffers. */
|
||||||
if (cd->sge_txmbuf_tag != NULL) {
|
if (cd->sge_txmbuf_tag != NULL) {
|
||||||
for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
||||||
if (cd->sge_tx_map[i] != NULL) {
|
txd = &cd->sge_txdesc[i];
|
||||||
|
if (txd->tx_dmamap != NULL) {
|
||||||
bus_dmamap_destroy(cd->sge_txmbuf_tag,
|
bus_dmamap_destroy(cd->sge_txmbuf_tag,
|
||||||
cd->sge_tx_map[i]);
|
txd->tx_dmamap);
|
||||||
cd->sge_tx_map[i] = NULL;
|
txd->tx_dmamap = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bus_dma_tag_destroy(cd->sge_txmbuf_tag);
|
bus_dma_tag_destroy(cd->sge_txmbuf_tag);
|
||||||
@ -991,18 +1004,20 @@ static int
|
|||||||
sge_list_tx_free(struct sge_softc *sc)
|
sge_list_tx_free(struct sge_softc *sc)
|
||||||
{
|
{
|
||||||
struct sge_chain_data *cd;
|
struct sge_chain_data *cd;
|
||||||
|
struct sge_txdesc *txd;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
SGE_LOCK_ASSERT(sc);
|
SGE_LOCK_ASSERT(sc);
|
||||||
cd = &sc->sge_cdata;
|
cd = &sc->sge_cdata;
|
||||||
for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
for (i = 0; i < SGE_TX_RING_CNT; i++) {
|
||||||
if (cd->sge_tx_mbuf[i] != NULL) {
|
txd = &cd->sge_txdesc[i];
|
||||||
bus_dmamap_sync(cd->sge_txmbuf_tag,
|
if (txd->tx_m != NULL) {
|
||||||
cd->sge_tx_map[i], BUS_DMASYNC_POSTWRITE);
|
bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap,
|
||||||
bus_dmamap_unload(cd->sge_txmbuf_tag,
|
BUS_DMASYNC_POSTWRITE);
|
||||||
cd->sge_tx_map[i]);
|
bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap);
|
||||||
m_free(cd->sge_tx_mbuf[i]);
|
m_freem(txd->tx_m);
|
||||||
cd->sge_tx_mbuf[i] = NULL;
|
txd->tx_m = NULL;
|
||||||
|
txd->tx_ndesc = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1037,18 +1052,20 @@ static int
|
|||||||
sge_list_rx_free(struct sge_softc *sc)
|
sge_list_rx_free(struct sge_softc *sc)
|
||||||
{
|
{
|
||||||
struct sge_chain_data *cd;
|
struct sge_chain_data *cd;
|
||||||
|
struct sge_rxdesc *rxd;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
SGE_LOCK_ASSERT(sc);
|
SGE_LOCK_ASSERT(sc);
|
||||||
cd = &sc->sge_cdata;
|
cd = &sc->sge_cdata;
|
||||||
for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
for (i = 0; i < SGE_RX_RING_CNT; i++) {
|
||||||
if (cd->sge_rx_mbuf[i] != NULL) {
|
rxd = &cd->sge_rxdesc[i];
|
||||||
bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[i],
|
if (rxd->rx_m != NULL) {
|
||||||
|
bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
|
||||||
BUS_DMASYNC_POSTREAD);
|
BUS_DMASYNC_POSTREAD);
|
||||||
bus_dmamap_unload(cd->sge_rxmbuf_tag,
|
bus_dmamap_unload(cd->sge_rxmbuf_tag,
|
||||||
cd->sge_rx_map[i]);
|
rxd->rx_dmamap);
|
||||||
m_free(cd->sge_rx_mbuf[i]);
|
m_freem(rxd->rx_m);
|
||||||
cd->sge_rx_mbuf[i] = NULL;
|
rxd->rx_m = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (0);
|
return (0);
|
||||||
@ -1063,6 +1080,7 @@ sge_newbuf(struct sge_softc *sc, int prod)
|
|||||||
struct mbuf *m;
|
struct mbuf *m;
|
||||||
struct sge_desc *desc;
|
struct sge_desc *desc;
|
||||||
struct sge_chain_data *cd;
|
struct sge_chain_data *cd;
|
||||||
|
struct sge_rxdesc *rxd;
|
||||||
bus_dma_segment_t segs[1];
|
bus_dma_segment_t segs[1];
|
||||||
bus_dmamap_t map;
|
bus_dmamap_t map;
|
||||||
int error, nsegs;
|
int error, nsegs;
|
||||||
@ -1082,17 +1100,18 @@ sge_newbuf(struct sge_softc *sc, int prod)
|
|||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
|
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
|
||||||
if (cd->sge_rx_mbuf[prod] != NULL) {
|
rxd = &cd->sge_rxdesc[prod];
|
||||||
bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod],
|
if (rxd->rx_m != NULL) {
|
||||||
|
bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
|
||||||
BUS_DMASYNC_POSTREAD);
|
BUS_DMASYNC_POSTREAD);
|
||||||
bus_dmamap_unload(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod]);
|
bus_dmamap_unload(cd->sge_rxmbuf_tag, rxd->rx_dmamap);
|
||||||
}
|
}
|
||||||
map = cd->sge_rx_map[prod];
|
map = rxd->rx_dmamap;
|
||||||
cd->sge_rx_map[prod] = cd->sge_rx_spare_map;
|
rxd->rx_dmamap = cd->sge_rx_spare_map;
|
||||||
cd->sge_rx_spare_map = map;
|
cd->sge_rx_spare_map = map;
|
||||||
bus_dmamap_sync(cd->sge_rxmbuf_tag, cd->sge_rx_map[prod],
|
bus_dmamap_sync(cd->sge_rxmbuf_tag, rxd->rx_dmamap,
|
||||||
BUS_DMASYNC_PREREAD);
|
BUS_DMASYNC_PREREAD);
|
||||||
cd->sge_rx_mbuf[prod] = m;
|
rxd->rx_m = m;
|
||||||
|
|
||||||
desc = &sc->sge_ldata.sge_rx_ring[prod];
|
desc = &sc->sge_ldata.sge_rx_ring[prod];
|
||||||
desc->sge_sts_size = 0;
|
desc->sge_sts_size = 0;
|
||||||
@ -1178,7 +1197,7 @@ sge_rxeof(struct sge_softc *sc)
|
|||||||
ifp->if_ierrors++;
|
ifp->if_ierrors++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
m = cd->sge_rx_mbuf[cons];
|
m = cd->sge_rxdesc[cons].rx_m;
|
||||||
if (sge_newbuf(sc, cons) != 0) {
|
if (sge_newbuf(sc, cons) != 0) {
|
||||||
sge_discard_rxbuf(sc, cons);
|
sge_discard_rxbuf(sc, cons);
|
||||||
ifp->if_iqdrops++;
|
ifp->if_iqdrops++;
|
||||||
@ -1245,8 +1264,9 @@ sge_txeof(struct sge_softc *sc)
|
|||||||
struct ifnet *ifp;
|
struct ifnet *ifp;
|
||||||
struct sge_list_data *ld;
|
struct sge_list_data *ld;
|
||||||
struct sge_chain_data *cd;
|
struct sge_chain_data *cd;
|
||||||
|
struct sge_txdesc *txd;
|
||||||
uint32_t txstat;
|
uint32_t txstat;
|
||||||
int cons, prod;
|
int cons, nsegs, prod;
|
||||||
|
|
||||||
SGE_LOCK_ASSERT(sc);
|
SGE_LOCK_ASSERT(sc);
|
||||||
|
|
||||||
@ -1260,33 +1280,47 @@ sge_txeof(struct sge_softc *sc)
|
|||||||
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
||||||
cons = cd->sge_tx_cons;
|
cons = cd->sge_tx_cons;
|
||||||
prod = cd->sge_tx_prod;
|
prod = cd->sge_tx_prod;
|
||||||
for (; cons != prod; SGE_INC(cons, SGE_TX_RING_CNT)) {
|
for (; cons != prod;) {
|
||||||
txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts);
|
txstat = le32toh(ld->sge_tx_ring[cons].sge_cmdsts);
|
||||||
if ((txstat & TDC_OWN) != 0)
|
if ((txstat & TDC_OWN) != 0)
|
||||||
break;
|
break;
|
||||||
cd->sge_tx_cnt--;
|
/*
|
||||||
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
* Only the first descriptor of multi-descriptor transmission
|
||||||
if (cd->sge_tx_mbuf[cons] != NULL) {
|
* is updated by controller. Driver should skip entire
|
||||||
bus_dmamap_sync(cd->sge_txmbuf_tag,
|
* chained buffers for the transmitted frame. In other words
|
||||||
cd->sge_tx_map[cons], BUS_DMASYNC_POSTWRITE);
|
* TDC_OWN bit is valid only at the first descriptor of a
|
||||||
bus_dmamap_unload(cd->sge_txmbuf_tag,
|
* multi-descriptor transmission.
|
||||||
cd->sge_tx_map[cons]);
|
*/
|
||||||
m_freem(cd->sge_tx_mbuf[cons]);
|
if (SGE_TX_ERROR(txstat) != 0) {
|
||||||
cd->sge_tx_mbuf[cons] = NULL;
|
|
||||||
if (SGE_TX_ERROR(txstat) != 0) {
|
|
||||||
#ifdef SGE_SHOW_ERRORS
|
#ifdef SGE_SHOW_ERRORS
|
||||||
device_printf(sc->sge_dev, "Tx error : 0x%b\n",
|
device_printf(sc->sge_dev, "Tx error : 0x%b\n",
|
||||||
txstat, TX_ERR_BITS);
|
txstat, TX_ERR_BITS);
|
||||||
#endif
|
#endif
|
||||||
ifp->if_oerrors++;
|
ifp->if_oerrors++;
|
||||||
} else {
|
} else {
|
||||||
#ifdef notyet
|
#ifdef notyet
|
||||||
ifp->if_collisions += (txstat & 0xFFFF) - 1;
|
ifp->if_collisions += (txstat & 0xFFFF) - 1;
|
||||||
#endif
|
#endif
|
||||||
ifp->if_opackets++;
|
ifp->if_opackets++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
txd = &cd->sge_txdesc[cons];
|
||||||
|
for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
|
||||||
|
ld->sge_tx_ring[cons].sge_cmdsts = 0;
|
||||||
|
SGE_INC(cons, SGE_TX_RING_CNT);
|
||||||
|
}
|
||||||
|
/* Reclaim transmitted mbuf. */
|
||||||
|
KASSERT(txd->tx_m != NULL,
|
||||||
|
("%s: freeing NULL mbuf\n", __func__));
|
||||||
|
bus_dmamap_sync(cd->sge_txmbuf_tag, txd->tx_dmamap,
|
||||||
|
BUS_DMASYNC_POSTWRITE);
|
||||||
|
bus_dmamap_unload(cd->sge_txmbuf_tag, txd->tx_dmamap);
|
||||||
|
m_freem(txd->tx_m);
|
||||||
|
txd->tx_m = NULL;
|
||||||
|
cd->sge_tx_cnt -= txd->tx_ndesc;
|
||||||
|
KASSERT(cd->sge_tx_cnt >= 0,
|
||||||
|
("%s: Active Tx desc counter was garbled\n", __func__));
|
||||||
|
txd->tx_ndesc = 0;
|
||||||
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
||||||
}
|
}
|
||||||
cd->sge_tx_cons = cons;
|
cd->sge_tx_cons = cons;
|
||||||
if (cd->sge_tx_cnt == 0)
|
if (cd->sge_tx_cnt == 0)
|
||||||
@ -1388,73 +1422,78 @@ sge_encap(struct sge_softc *sc, struct mbuf **m_head)
|
|||||||
{
|
{
|
||||||
struct mbuf *m;
|
struct mbuf *m;
|
||||||
struct sge_desc *desc;
|
struct sge_desc *desc;
|
||||||
|
struct sge_txdesc *txd;
|
||||||
bus_dma_segment_t txsegs[SGE_MAXTXSEGS];
|
bus_dma_segment_t txsegs[SGE_MAXTXSEGS];
|
||||||
bus_dmamap_t map;
|
|
||||||
uint32_t cflags;
|
uint32_t cflags;
|
||||||
int error, nsegs, prod;
|
int error, i, nsegs, prod, si;
|
||||||
|
|
||||||
SGE_LOCK_ASSERT(sc);
|
SGE_LOCK_ASSERT(sc);
|
||||||
|
|
||||||
prod = sc->sge_cdata.sge_tx_prod;
|
si = prod = sc->sge_cdata.sge_tx_prod;
|
||||||
map = sc->sge_cdata.sge_tx_map[prod];
|
txd = &sc->sge_cdata.sge_txdesc[prod];
|
||||||
/*
|
error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag,
|
||||||
* Reading Windows inf file indicates SiS controller supports
|
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
|
||||||
* TSO, VLAN hardware tag insertion/stripping, interrupt
|
if (error == EFBIG) {
|
||||||
* moderation and Tx/Rx checksum offloading. Unfortunately
|
m = m_collapse(*m_head, M_DONTWAIT, SGE_MAXTXSEGS);
|
||||||
* vendor didn't release these information so we're guessing
|
|
||||||
* descriptor usage with trial and errors.
|
|
||||||
*
|
|
||||||
* Controller seems to support multi-fragmented buffers but
|
|
||||||
* don't know how to enable that feature so limit number of
|
|
||||||
* fragmented Tx buffers to single buffer until we understand
|
|
||||||
* the controller internals.
|
|
||||||
* I assume the controller can pad zero bytes if frame length
|
|
||||||
* is less than 60 bytes and I also think the controller has
|
|
||||||
* no Tx buffer alignment limitation. - Need testing!
|
|
||||||
*/
|
|
||||||
if ((*m_head)->m_next != NULL) {
|
|
||||||
m = m_defrag(*m_head, M_DONTWAIT);
|
|
||||||
if (m == NULL) {
|
if (m == NULL) {
|
||||||
m_freem(*m_head);
|
m_freem(*m_head);
|
||||||
*m_head = NULL;
|
*m_head = NULL;
|
||||||
return (ENOBUFS);
|
return (ENOBUFS);
|
||||||
}
|
}
|
||||||
*m_head = m;
|
*m_head = m;
|
||||||
}
|
error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag,
|
||||||
error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag, map,
|
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
|
||||||
*m_head, txsegs, &nsegs, 0);
|
if (error != 0) {
|
||||||
if (error != 0) {
|
m_freem(*m_head);
|
||||||
m_freem(*m_head);
|
*m_head = NULL;
|
||||||
*m_head = NULL;
|
return (error);
|
||||||
|
}
|
||||||
|
} else if (error != 0)
|
||||||
return (error);
|
return (error);
|
||||||
}
|
|
||||||
|
KASSERT(nsegs != 0, ("zero segment returned"));
|
||||||
/* Check descriptor overrun. */
|
/* Check descriptor overrun. */
|
||||||
if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) {
|
if (sc->sge_cdata.sge_tx_cnt + nsegs >= SGE_TX_RING_CNT) {
|
||||||
bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, map);
|
bus_dmamap_unload(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap);
|
||||||
return (ENOBUFS);
|
return (ENOBUFS);
|
||||||
}
|
}
|
||||||
bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, map,
|
bus_dmamap_sync(sc->sge_cdata.sge_txmbuf_tag, txd->tx_dmamap,
|
||||||
BUS_DMASYNC_PREWRITE);
|
BUS_DMASYNC_PREWRITE);
|
||||||
|
|
||||||
|
m = *m_head;
|
||||||
cflags = 0;
|
cflags = 0;
|
||||||
if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP)
|
if (m->m_pkthdr.csum_flags & CSUM_IP)
|
||||||
cflags |= TDC_IP_CSUM;
|
cflags |= TDC_IP_CSUM;
|
||||||
if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP)
|
if (m->m_pkthdr.csum_flags & CSUM_TCP)
|
||||||
cflags |= TDC_TCP_CSUM;
|
cflags |= TDC_TCP_CSUM;
|
||||||
if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP)
|
if (m->m_pkthdr.csum_flags & CSUM_UDP)
|
||||||
cflags |= TDC_UDP_CSUM;
|
cflags |= TDC_UDP_CSUM;
|
||||||
desc = &sc->sge_ldata.sge_tx_ring[prod];
|
for (i = 0; i < nsegs; i++) {
|
||||||
desc->sge_sts_size = htole32((*m_head)->m_pkthdr.len);
|
desc = &sc->sge_ldata.sge_tx_ring[prod];
|
||||||
desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[0].ds_addr));
|
if (i == 0) {
|
||||||
desc->sge_flags = htole32(txsegs[0].ds_len);
|
desc->sge_sts_size = htole32(m->m_pkthdr.len);
|
||||||
if (prod == SGE_TX_RING_CNT - 1)
|
desc->sge_cmdsts = 0;
|
||||||
desc->sge_flags |= htole32(RING_END);
|
} else {
|
||||||
|
desc->sge_sts_size = 0;
|
||||||
|
desc->sge_cmdsts = htole32(TDC_OWN);
|
||||||
|
}
|
||||||
|
desc->sge_ptr = htole32(SGE_ADDR_LO(txsegs[i].ds_addr));
|
||||||
|
desc->sge_flags = htole32(txsegs[i].ds_len);
|
||||||
|
if (prod == SGE_TX_RING_CNT - 1)
|
||||||
|
desc->sge_flags |= htole32(RING_END);
|
||||||
|
sc->sge_cdata.sge_tx_cnt++;
|
||||||
|
SGE_INC(prod, SGE_TX_RING_CNT);
|
||||||
|
}
|
||||||
|
/* Update producer index. */
|
||||||
|
sc->sge_cdata.sge_tx_prod = prod;
|
||||||
|
|
||||||
|
desc = &sc->sge_ldata.sge_tx_ring[si];
|
||||||
/* Configure VLAN. */
|
/* Configure VLAN. */
|
||||||
if(((*m_head)->m_flags & M_VLANTAG) != 0) {
|
if((m->m_flags & M_VLANTAG) != 0) {
|
||||||
cflags |= (*m_head)->m_pkthdr.ether_vtag;
|
cflags |= m->m_pkthdr.ether_vtag;
|
||||||
desc->sge_sts_size |= htole32(TDS_INS_VLAN);
|
desc->sge_sts_size |= htole32(TDS_INS_VLAN);
|
||||||
}
|
}
|
||||||
desc->sge_cmdsts = htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags);
|
desc->sge_cmdsts |= htole32(TDC_DEF | TDC_CRC | TDC_PAD | cflags);
|
||||||
#if 1
|
#if 1
|
||||||
if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0)
|
if ((sc->sge_flags & SGE_FLAG_SPEED_1000) != 0)
|
||||||
desc->sge_cmdsts |= htole32(TDC_BST);
|
desc->sge_cmdsts |= htole32(TDC_BST);
|
||||||
@ -1466,13 +1505,9 @@ sge_encap(struct sge_softc *sc, struct mbuf **m_head)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/* Request interrupt and give ownership to controller. */
|
/* Request interrupt and give ownership to controller. */
|
||||||
if ((prod % SGE_TX_INTR_FRAMES) == 0)
|
desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR);
|
||||||
desc->sge_cmdsts |= htole32(TDC_OWN | TDC_INTR);
|
txd->tx_m = m;
|
||||||
else
|
txd->tx_ndesc = nsegs;
|
||||||
desc->sge_cmdsts |= htole32(TDC_OWN);
|
|
||||||
sc->sge_cdata.sge_tx_mbuf[prod] = *m_head;
|
|
||||||
sc->sge_cdata.sge_tx_cnt++;
|
|
||||||
SGE_INC(sc->sge_cdata.sge_tx_prod, SGE_TX_RING_CNT);
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1503,7 +1538,8 @@ sge_start_locked(struct ifnet *ifp)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
|
for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
|
||||||
if (sc->sge_cdata.sge_tx_cnt == SGE_TX_RING_CNT - 1) {
|
if (sc->sge_cdata.sge_tx_cnt > (SGE_TX_RING_CNT -
|
||||||
|
SGE_MAXTXSEGS)) {
|
||||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -283,7 +283,7 @@ struct sge_desc {
|
|||||||
#define SGE_RX_RING_CNT 256 /* [8, 1024] */
|
#define SGE_RX_RING_CNT 256 /* [8, 1024] */
|
||||||
#define SGE_TX_RING_CNT 256 /* [8, 8192] */
|
#define SGE_TX_RING_CNT 256 /* [8, 8192] */
|
||||||
#define SGE_DESC_ALIGN 16
|
#define SGE_DESC_ALIGN 16
|
||||||
#define SGE_MAXTXSEGS 1
|
#define SGE_MAXTXSEGS 16
|
||||||
#define SGE_RX_BUF_ALIGN sizeof(uint64_t)
|
#define SGE_RX_BUF_ALIGN sizeof(uint64_t)
|
||||||
|
|
||||||
#define SGE_RX_RING_SZ (SGE_RX_RING_CNT * sizeof(struct sge_desc))
|
#define SGE_RX_RING_SZ (SGE_RX_RING_CNT * sizeof(struct sge_desc))
|
||||||
@ -298,6 +298,17 @@ struct sge_list_data {
|
|||||||
bus_addr_t sge_tx_paddr;
|
bus_addr_t sge_tx_paddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct sge_txdesc {
|
||||||
|
struct mbuf *tx_m;
|
||||||
|
bus_dmamap_t tx_dmamap;
|
||||||
|
int tx_ndesc;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sge_rxdesc {
|
||||||
|
struct mbuf *rx_m;
|
||||||
|
bus_dmamap_t rx_dmamap;
|
||||||
|
};
|
||||||
|
|
||||||
struct sge_chain_data {
|
struct sge_chain_data {
|
||||||
bus_dma_tag_t sge_tag;
|
bus_dma_tag_t sge_tag;
|
||||||
bus_dma_tag_t sge_rx_tag;
|
bus_dma_tag_t sge_rx_tag;
|
||||||
@ -306,11 +317,9 @@ struct sge_chain_data {
|
|||||||
bus_dmamap_t sge_tx_dmamap;
|
bus_dmamap_t sge_tx_dmamap;
|
||||||
bus_dma_tag_t sge_txmbuf_tag;
|
bus_dma_tag_t sge_txmbuf_tag;
|
||||||
bus_dma_tag_t sge_rxmbuf_tag;
|
bus_dma_tag_t sge_rxmbuf_tag;
|
||||||
struct mbuf *sge_rx_mbuf[SGE_RX_RING_CNT];
|
struct sge_txdesc sge_txdesc[SGE_TX_RING_CNT];
|
||||||
struct mbuf *sge_tx_mbuf[SGE_TX_RING_CNT];
|
struct sge_rxdesc sge_rxdesc[SGE_RX_RING_CNT];
|
||||||
bus_dmamap_t sge_rx_map[SGE_RX_RING_CNT];
|
|
||||||
bus_dmamap_t sge_rx_spare_map;
|
bus_dmamap_t sge_rx_spare_map;
|
||||||
bus_dmamap_t sge_tx_map[SGE_TX_RING_CNT];
|
|
||||||
int sge_rx_cons;
|
int sge_rx_cons;
|
||||||
int sge_tx_prod;
|
int sge_tx_prod;
|
||||||
int sge_tx_cons;
|
int sge_tx_cons;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user