- move WR_LEN in to cxgb_adapter.h add PIO_LEN to make intent clearer
- move cxgb_tx_common in to cxgb_multiq.c and rename to cxgb_tx - move cxgb_tx_common dependencies - further simplify cxgb_dequeue_packet for the non-multiqueue case - only launch one service thread per port in the non-multiq case - remove dead cleaning code from cxgb_sge.c - simplify PIO case substantially in by returning directly from mbuf collapse and just using m_copydata - remove gratuitous m_gethdr in the rx path - clarify freeing of mbufs in collapse
This commit is contained in:
parent
9696aa5a5b
commit
1287cf02b7
@ -149,13 +149,24 @@ enum { /* adapter flags */
|
|||||||
#define FL_Q_SIZE 4096
|
#define FL_Q_SIZE 4096
|
||||||
#define JUMBO_Q_SIZE 1024
|
#define JUMBO_Q_SIZE 1024
|
||||||
#define RSPQ_Q_SIZE 1024
|
#define RSPQ_Q_SIZE 1024
|
||||||
|
#if 0
|
||||||
#define TX_ETH_Q_SIZE 1024
|
#define TX_ETH_Q_SIZE 1024
|
||||||
|
#else
|
||||||
|
#define TX_ETH_Q_SIZE 64
|
||||||
|
#endif
|
||||||
|
|
||||||
enum { TXQ_ETH = 0,
|
enum { TXQ_ETH = 0,
|
||||||
TXQ_OFLD = 1,
|
TXQ_OFLD = 1,
|
||||||
TXQ_CTRL = 2, };
|
TXQ_CTRL = 2, };
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* work request size in bytes
|
||||||
|
*/
|
||||||
|
#define WR_LEN (WR_FLITS * 8)
|
||||||
|
#define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt))
|
||||||
|
|
||||||
|
|
||||||
/* careful, the following are set on priv_flags and must not collide with
|
/* careful, the following are set on priv_flags and must not collide with
|
||||||
* IFF_ flags!
|
* IFF_ flags!
|
||||||
*/
|
*/
|
||||||
@ -288,7 +299,6 @@ struct sge_txq {
|
|||||||
struct mtx lock;
|
struct mtx lock;
|
||||||
struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
|
struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
|
||||||
bus_dma_segment_t txq_segs[TX_MAX_SEGS];
|
bus_dma_segment_t txq_segs[TX_MAX_SEGS];
|
||||||
struct mbuf *txq_m_vec[TX_WR_COUNT_MAX];
|
|
||||||
#define TXQ_NAME_LEN 32
|
#define TXQ_NAME_LEN 32
|
||||||
char lockbuf[TXQ_NAME_LEN];
|
char lockbuf[TXQ_NAME_LEN];
|
||||||
};
|
};
|
||||||
@ -610,10 +620,7 @@ void cxgb_pcpu_shutdown_threads(struct adapter *sc);
|
|||||||
void cxgb_pcpu_startup_threads(struct adapter *sc);
|
void cxgb_pcpu_startup_threads(struct adapter *sc);
|
||||||
|
|
||||||
int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
|
int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
|
||||||
int cxgb_tx_common(struct ifnet *ifp, struct sge_qset *qs, uint32_t txmax);
|
|
||||||
void t3_free_qset(adapter_t *sc, struct sge_qset *q);
|
void t3_free_qset(adapter_t *sc, struct sge_qset *q);
|
||||||
int cxgb_dequeue_packet(struct ifnet *, struct sge_txq *, struct mbuf **);
|
|
||||||
void cxgb_start(struct ifnet *ifp);
|
void cxgb_start(struct ifnet *ifp);
|
||||||
void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
|
void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
|
||||||
int reclaim_completed_tx(struct sge_txq *q, int reclaim_min);
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -281,21 +281,6 @@ struct cxgb_ident {
|
|||||||
|
|
||||||
static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
|
static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset);
|
||||||
|
|
||||||
static __inline void
|
|
||||||
check_pkt_coalesce(struct sge_qset *qs)
|
|
||||||
{
|
|
||||||
struct adapter *sc;
|
|
||||||
struct sge_txq *txq;
|
|
||||||
|
|
||||||
txq = &qs->txq[TXQ_ETH];
|
|
||||||
sc = qs->port->adapter;
|
|
||||||
|
|
||||||
if (sc->tunq_fill[qs->idx] && (txq->in_use < (txq->size - (txq->size>>2))))
|
|
||||||
sc->tunq_fill[qs->idx] = 0;
|
|
||||||
else if (!sc->tunq_fill[qs->idx] && (txq->in_use > (txq->size - (txq->size>>2))))
|
|
||||||
sc->tunq_fill[qs->idx] = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline char
|
static __inline char
|
||||||
t3rev2char(struct adapter *adapter)
|
t3rev2char(struct adapter *adapter)
|
||||||
{
|
{
|
||||||
@ -1865,64 +1850,6 @@ cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
|
|||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
cxgb_tx_common(struct ifnet *ifp, struct sge_qset *qs, uint32_t txmax)
|
|
||||||
{
|
|
||||||
struct sge_txq *txq;
|
|
||||||
int err, in_use_init, count;
|
|
||||||
struct mbuf **m_vec;
|
|
||||||
|
|
||||||
txq = &qs->txq[TXQ_ETH];
|
|
||||||
m_vec = txq->txq_m_vec;
|
|
||||||
in_use_init = txq->in_use;
|
|
||||||
err = 0;
|
|
||||||
while ((txq->in_use - in_use_init < txmax) &&
|
|
||||||
(txq->size > txq->in_use + TX_MAX_DESC)) {
|
|
||||||
check_pkt_coalesce(qs);
|
|
||||||
count = cxgb_dequeue_packet(ifp, txq, m_vec);
|
|
||||||
if (count == 0)
|
|
||||||
break;
|
|
||||||
ETHER_BPF_MTAP(ifp, m_vec[0]);
|
|
||||||
|
|
||||||
if ((err = t3_encap(qs, m_vec, count)) != 0)
|
|
||||||
break;
|
|
||||||
txq->txq_enqueued += count;
|
|
||||||
}
|
|
||||||
#if 0 /* !MULTIQ */
|
|
||||||
if (__predict_false(err)) {
|
|
||||||
if (err == ENOMEM) {
|
|
||||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
|
||||||
IFQ_LOCK(&ifp->if_snd);
|
|
||||||
IFQ_DRV_PREPEND(&ifp->if_snd, m_vec[0]);
|
|
||||||
IFQ_UNLOCK(&ifp->if_snd);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (err == 0 && m_vec[0] == NULL) {
|
|
||||||
err = ENOBUFS;
|
|
||||||
}
|
|
||||||
else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
|
|
||||||
(ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
|
|
||||||
setbit(&qs->txq_stopped, TXQ_ETH);
|
|
||||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
|
||||||
err = ENOSPC;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC)) {
|
|
||||||
err = ENOSPC;
|
|
||||||
setbit(&qs->txq_stopped, TXQ_ETH);
|
|
||||||
}
|
|
||||||
if (err == ENOMEM) {
|
|
||||||
int i;
|
|
||||||
/*
|
|
||||||
* Sub-optimal :-/
|
|
||||||
*/
|
|
||||||
for (i = 0; i < count; i++)
|
|
||||||
m_freem(m_vec[i]);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return (err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
cxgb_media_change(struct ifnet *ifp)
|
cxgb_media_change(struct ifnet *ifp)
|
||||||
{
|
{
|
||||||
|
@ -115,6 +115,9 @@ static void cxgb_pcpu_start_proc(void *arg);
|
|||||||
#ifdef IFNET_MULTIQUEUE
|
#ifdef IFNET_MULTIQUEUE
|
||||||
static int cxgb_pcpu_cookie_to_qidx(struct port_info *, uint32_t cookie);
|
static int cxgb_pcpu_cookie_to_qidx(struct port_info *, uint32_t cookie);
|
||||||
#endif
|
#endif
|
||||||
|
static int cxgb_tx(struct sge_qset *qs, uint32_t txmax);
|
||||||
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
cxgb_pcpu_enqueue_packet_(struct sge_qset *qs, struct mbuf *m)
|
cxgb_pcpu_enqueue_packet_(struct sge_qset *qs, struct mbuf *m)
|
||||||
{
|
{
|
||||||
@ -124,7 +127,6 @@ cxgb_pcpu_enqueue_packet_(struct sge_qset *qs, struct mbuf *m)
|
|||||||
#ifndef IFNET_MULTIQUEUE
|
#ifndef IFNET_MULTIQUEUE
|
||||||
panic("not expecting enqueue without multiqueue");
|
panic("not expecting enqueue without multiqueue");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
KASSERT(m != NULL, ("null mbuf"));
|
KASSERT(m != NULL, ("null mbuf"));
|
||||||
KASSERT(m->m_type == MT_DATA, ("bad mbuf type %d", m->m_type));
|
KASSERT(m->m_type == MT_DATA, ("bad mbuf type %d", m->m_type));
|
||||||
if (qs->qs_flags & QS_EXITING) {
|
if (qs->qs_flags & QS_EXITING) {
|
||||||
@ -164,8 +166,8 @@ cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m)
|
|||||||
return (err);
|
return (err);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
static int
|
||||||
cxgb_dequeue_packet(struct ifnet *unused, struct sge_txq *txq, struct mbuf **m_vec)
|
cxgb_dequeue_packet(struct sge_txq *txq, struct mbuf **m_vec)
|
||||||
{
|
{
|
||||||
struct mbuf *m;
|
struct mbuf *m;
|
||||||
struct sge_qset *qs;
|
struct sge_qset *qs;
|
||||||
@ -176,10 +178,16 @@ cxgb_dequeue_packet(struct ifnet *unused, struct sge_txq *txq, struct mbuf **m_v
|
|||||||
|
|
||||||
if (txq->immpkt != NULL)
|
if (txq->immpkt != NULL)
|
||||||
panic("immediate packet set");
|
panic("immediate packet set");
|
||||||
#endif
|
|
||||||
|
|
||||||
mtx_assert(&txq->lock, MA_OWNED);
|
mtx_assert(&txq->lock, MA_OWNED);
|
||||||
|
|
||||||
|
IFQ_DRV_DEQUEUE(&pi->ifp->if_snd, m);
|
||||||
|
if (m == NULL)
|
||||||
|
return (0);
|
||||||
|
|
||||||
|
m_vec[0] = m;
|
||||||
|
return (1);
|
||||||
|
#endif
|
||||||
|
|
||||||
coalesced = count = size = 0;
|
coalesced = count = size = 0;
|
||||||
qs = txq_to_qset(txq, TXQ_ETH);
|
qs = txq_to_qset(txq, TXQ_ETH);
|
||||||
if (qs->qs_flags & QS_EXITING)
|
if (qs->qs_flags & QS_EXITING)
|
||||||
@ -193,39 +201,20 @@ cxgb_dequeue_packet(struct ifnet *unused, struct sge_txq *txq, struct mbuf **m_v
|
|||||||
}
|
}
|
||||||
sc = qs->port->adapter;
|
sc = qs->port->adapter;
|
||||||
|
|
||||||
#ifndef IFNET_MULTIQUEUE
|
|
||||||
/*
|
|
||||||
* This is terrible from a cache and locking efficiency standpoint
|
|
||||||
* but then again ... so is ifnet.
|
|
||||||
*/
|
|
||||||
while (((qs->qs_flags & QS_EXITING) == 0) && !IFQ_DRV_IS_EMPTY(&pi->ifp->if_snd) && !buf_ring_full(&txq->txq_mr)) {
|
|
||||||
|
|
||||||
struct mbuf *m = NULL;
|
|
||||||
|
|
||||||
IFQ_DRV_DEQUEUE(&pi->ifp->if_snd, m);
|
|
||||||
if (m) {
|
|
||||||
KASSERT(m->m_type == MT_DATA, ("bad mbuf type %d", m->m_type));
|
|
||||||
if (buf_ring_enqueue(&txq->txq_mr, m))
|
|
||||||
panic("ring full");
|
|
||||||
} else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
m = buf_ring_dequeue(&txq->txq_mr);
|
m = buf_ring_dequeue(&txq->txq_mr);
|
||||||
if (m == NULL)
|
if (m == NULL)
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
buf_ring_scan(&txq->txq_mr, m, __FILE__, __LINE__);
|
|
||||||
KASSERT(m->m_type == MT_DATA, ("bad mbuf type %d", m->m_type));
|
|
||||||
m_vec[0] = m;
|
|
||||||
if (m->m_pkthdr.tso_segsz > 0 || m->m_pkthdr.len > TX_WR_SIZE_MAX || m->m_next != NULL ||
|
|
||||||
(cxgb_pcpu_tx_coalesce == 0)) {
|
|
||||||
return (1);
|
|
||||||
}
|
|
||||||
#ifndef IFNET_MULTIQUEUE
|
|
||||||
panic("coalesce not supported yet");
|
|
||||||
#endif
|
|
||||||
count = 1;
|
count = 1;
|
||||||
|
KASSERT(m->m_type == MT_DATA,
|
||||||
|
("m=%p is bad mbuf type %d from ring cons=%d prod=%d", m,
|
||||||
|
m->m_type, txq->txq_mr.br_cons, txq->txq_mr.br_prod));
|
||||||
|
m_vec[0] = m;
|
||||||
|
if (m->m_pkthdr.tso_segsz > 0 || m->m_pkthdr.len > TX_WR_SIZE_MAX ||
|
||||||
|
m->m_next != NULL || (cxgb_pcpu_tx_coalesce == 0)) {
|
||||||
|
return (count);
|
||||||
|
}
|
||||||
|
|
||||||
size = m->m_pkthdr.len;
|
size = m->m_pkthdr.len;
|
||||||
for (m = buf_ring_peek(&txq->txq_mr); m != NULL;
|
for (m = buf_ring_peek(&txq->txq_mr); m != NULL;
|
||||||
m = buf_ring_peek(&txq->txq_mr)) {
|
m = buf_ring_peek(&txq->txq_mr)) {
|
||||||
@ -381,13 +370,15 @@ cxgb_pcpu_free(struct sge_qset *qs)
|
|||||||
{
|
{
|
||||||
struct mbuf *m;
|
struct mbuf *m;
|
||||||
struct sge_txq *txq = &qs->txq[TXQ_ETH];
|
struct sge_txq *txq = &qs->txq[TXQ_ETH];
|
||||||
|
|
||||||
|
mtx_lock(&txq->lock);
|
||||||
while ((m = mbufq_dequeue(&txq->sendq)) != NULL)
|
while ((m = mbufq_dequeue(&txq->sendq)) != NULL)
|
||||||
m_freem(m);
|
m_freem(m);
|
||||||
while ((m = buf_ring_dequeue(&txq->txq_mr)) != NULL)
|
while ((m = buf_ring_dequeue(&txq->txq_mr)) != NULL)
|
||||||
m_freem(m);
|
m_freem(m);
|
||||||
|
|
||||||
t3_free_tx_desc_all(txq);
|
t3_free_tx_desc_all(txq);
|
||||||
|
mtx_unlock(&txq->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -400,6 +391,7 @@ cxgb_pcpu_reclaim_tx(struct sge_txq *txq)
|
|||||||
KASSERT(qs->qs_cpuid == curcpu, ("cpu qset mismatch cpuid=%d curcpu=%d",
|
KASSERT(qs->qs_cpuid == curcpu, ("cpu qset mismatch cpuid=%d curcpu=%d",
|
||||||
qs->qs_cpuid, curcpu));
|
qs->qs_cpuid, curcpu));
|
||||||
#endif
|
#endif
|
||||||
|
mtx_assert(&txq->lock, MA_OWNED);
|
||||||
|
|
||||||
reclaimable = desc_reclaimable(txq);
|
reclaimable = desc_reclaimable(txq);
|
||||||
if (reclaimable == 0)
|
if (reclaimable == 0)
|
||||||
@ -429,6 +421,8 @@ cxgb_pcpu_start_(struct sge_qset *qs, struct mbuf *immpkt, int tx_flush)
|
|||||||
sc = pi->adapter;
|
sc = pi->adapter;
|
||||||
txq = &qs->txq[TXQ_ETH];
|
txq = &qs->txq[TXQ_ETH];
|
||||||
|
|
||||||
|
mtx_assert(&txq->lock, MA_OWNED);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
if (!pi->link_config.link_ok)
|
if (!pi->link_config.link_ok)
|
||||||
initerr = ENXIO;
|
initerr = ENXIO;
|
||||||
@ -474,10 +468,12 @@ cxgb_pcpu_start_(struct sge_qset *qs, struct mbuf *immpkt, int tx_flush)
|
|||||||
DPRINTF("stopped=%d flush=%d max_desc=%d\n",
|
DPRINTF("stopped=%d flush=%d max_desc=%d\n",
|
||||||
stopped, flush, max_desc);
|
stopped, flush, max_desc);
|
||||||
|
|
||||||
err = flush ? cxgb_tx_common(qs->port->ifp, qs, max_desc) : ENOSPC;
|
err = flush ? cxgb_tx(qs, max_desc) : ENOSPC;
|
||||||
|
|
||||||
|
|
||||||
if ((tx_flush && flush && err == 0) && !buf_ring_empty(&txq->txq_mr)) {
|
if ((tx_flush && flush && err == 0) &&
|
||||||
|
(!buf_ring_empty(&txq->txq_mr) ||
|
||||||
|
!IFQ_DRV_IS_EMPTY(&pi->ifp->if_snd))) {
|
||||||
struct thread *td = curthread;
|
struct thread *td = curthread;
|
||||||
|
|
||||||
if (++i > 1) {
|
if (++i > 1) {
|
||||||
@ -525,7 +521,8 @@ cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *immpkt)
|
|||||||
|
|
||||||
txq = &qs->txq[TXQ_ETH];
|
txq = &qs->txq[TXQ_ETH];
|
||||||
|
|
||||||
if (((sc->tunq_coalesce == 0) || (buf_ring_count(&txq->txq_mr) >= TX_WR_COUNT_MAX) ||
|
if (((sc->tunq_coalesce == 0) ||
|
||||||
|
(buf_ring_count(&txq->txq_mr) >= TX_WR_COUNT_MAX) ||
|
||||||
(cxgb_pcpu_tx_coalesce == 0)) && mtx_trylock(&txq->lock)) {
|
(cxgb_pcpu_tx_coalesce == 0)) && mtx_trylock(&txq->lock)) {
|
||||||
if (cxgb_debug)
|
if (cxgb_debug)
|
||||||
printf("doing immediate transmit\n");
|
printf("doing immediate transmit\n");
|
||||||
@ -658,12 +655,18 @@ cxgb_pcpu_cookie_to_qidx(struct port_info *pi, uint32_t cookie)
|
|||||||
void
|
void
|
||||||
cxgb_pcpu_startup_threads(struct adapter *sc)
|
cxgb_pcpu_startup_threads(struct adapter *sc)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j, nqsets;
|
||||||
struct proc *p;
|
struct proc *p;
|
||||||
|
|
||||||
|
|
||||||
for (i = 0; i < (sc)->params.nports; ++i) {
|
for (i = 0; i < (sc)->params.nports; ++i) {
|
||||||
struct port_info *pi = adap2pinfo(sc, i);
|
struct port_info *pi = adap2pinfo(sc, i);
|
||||||
|
|
||||||
|
#ifdef IFNET_MULTIQUEUE
|
||||||
|
nqsets = pi->nqsets;
|
||||||
|
#else
|
||||||
|
nqsets = 1;
|
||||||
|
#endif
|
||||||
for (j = 0; j < pi->nqsets; ++j) {
|
for (j = 0; j < pi->nqsets; ++j) {
|
||||||
struct sge_qset *qs;
|
struct sge_qset *qs;
|
||||||
|
|
||||||
@ -701,3 +704,84 @@ cxgb_pcpu_shutdown_threads(struct adapter *sc)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __inline void
|
||||||
|
check_pkt_coalesce(struct sge_qset *qs)
|
||||||
|
{
|
||||||
|
struct adapter *sc;
|
||||||
|
struct sge_txq *txq;
|
||||||
|
|
||||||
|
txq = &qs->txq[TXQ_ETH];
|
||||||
|
sc = qs->port->adapter;
|
||||||
|
|
||||||
|
if (sc->tunq_fill[qs->idx] && (txq->in_use < (txq->size - (txq->size>>2))))
|
||||||
|
sc->tunq_fill[qs->idx] = 0;
|
||||||
|
else if (!sc->tunq_fill[qs->idx] && (txq->in_use > (txq->size - (txq->size>>2))))
|
||||||
|
sc->tunq_fill[qs->idx] = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
cxgb_tx(struct sge_qset *qs, uint32_t txmax)
|
||||||
|
{
|
||||||
|
struct sge_txq *txq;
|
||||||
|
struct ifnet *ifp = qs->port->ifp;
|
||||||
|
int i, err, in_use_init, count;
|
||||||
|
struct mbuf *m_vec[TX_WR_COUNT_MAX];
|
||||||
|
|
||||||
|
txq = &qs->txq[TXQ_ETH];
|
||||||
|
ifp = qs->port->ifp;
|
||||||
|
in_use_init = txq->in_use;
|
||||||
|
err = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < TX_WR_COUNT_MAX; i++)
|
||||||
|
m_vec[i] = NULL;
|
||||||
|
|
||||||
|
mtx_assert(&txq->lock, MA_OWNED);
|
||||||
|
while ((txq->in_use - in_use_init < txmax) &&
|
||||||
|
(txq->size > txq->in_use + TX_MAX_DESC)) {
|
||||||
|
check_pkt_coalesce(qs);
|
||||||
|
count = cxgb_dequeue_packet(txq, m_vec);
|
||||||
|
if (count == 0) {
|
||||||
|
err = ENOBUFS;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ETHER_BPF_MTAP(ifp, m_vec[0]);
|
||||||
|
|
||||||
|
if ((err = t3_encap(qs, m_vec, count)) != 0)
|
||||||
|
break;
|
||||||
|
txq->txq_enqueued += count;
|
||||||
|
m_vec[0] = NULL;
|
||||||
|
}
|
||||||
|
#if 0 /* !MULTIQ */
|
||||||
|
if (__predict_false(err)) {
|
||||||
|
if (err == ENOMEM) {
|
||||||
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||||
|
IFQ_LOCK(&ifp->if_snd);
|
||||||
|
IFQ_DRV_PREPEND(&ifp->if_snd, m_vec[0]);
|
||||||
|
IFQ_UNLOCK(&ifp->if_snd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) &&
|
||||||
|
(ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
|
||||||
|
setbit(&qs->txq_stopped, TXQ_ETH);
|
||||||
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||||
|
err = ENOSPC;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC)) {
|
||||||
|
err = ENOSPC;
|
||||||
|
setbit(&qs->txq_stopped, TXQ_ETH);
|
||||||
|
}
|
||||||
|
if (err == ENOMEM) {
|
||||||
|
int i;
|
||||||
|
/*
|
||||||
|
* Sub-optimal :-/
|
||||||
|
*/
|
||||||
|
printf("ENOMEM!!!");
|
||||||
|
for (i = 0; i < count; i++)
|
||||||
|
m_freem(m_vec[i]);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
return (err);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,8 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|||||||
POSSIBILITY OF SUCH DAMAGE.
|
POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
***************************************************************************/
|
***************************************************************************/
|
||||||
|
#define DEBUG_BUFRING
|
||||||
|
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
__FBSDID("$FreeBSD$");
|
__FBSDID("$FreeBSD$");
|
||||||
@ -72,7 +74,7 @@ __FBSDID("$FreeBSD$");
|
|||||||
|
|
||||||
int txq_fills = 0;
|
int txq_fills = 0;
|
||||||
static int bogus_imm = 0;
|
static int bogus_imm = 0;
|
||||||
static int recycle_enable = 1;
|
static int recycle_enable = 0;
|
||||||
extern int cxgb_txq_buf_ring_size;
|
extern int cxgb_txq_buf_ring_size;
|
||||||
int cxgb_cached_allocations;
|
int cxgb_cached_allocations;
|
||||||
int cxgb_cached;
|
int cxgb_cached;
|
||||||
@ -92,10 +94,6 @@ extern int cxgb_use_16k_clusters;
|
|||||||
*/
|
*/
|
||||||
#define TX_RECLAIM_PERIOD (hz >> 1)
|
#define TX_RECLAIM_PERIOD (hz >> 1)
|
||||||
|
|
||||||
/*
|
|
||||||
* work request size in bytes
|
|
||||||
*/
|
|
||||||
#define WR_LEN (WR_FLITS * 8)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Values for sge_txq.flags
|
* Values for sge_txq.flags
|
||||||
@ -218,13 +216,6 @@ reclaim_completed_tx_(struct sge_txq *q, int reclaim_min)
|
|||||||
return (reclaim);
|
return (reclaim);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
reclaim_completed_tx(struct sge_txq *q, int reclaim_min)
|
|
||||||
{
|
|
||||||
return reclaim_completed_tx_(q, reclaim_min);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* should_restart_tx - are there enough resources to restart a Tx queue?
|
* should_restart_tx - are there enough resources to restart a Tx queue?
|
||||||
* @q: the Tx queue
|
* @q: the Tx queue
|
||||||
@ -672,8 +663,7 @@ alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
|
|||||||
|
|
||||||
if (sw_size) {
|
if (sw_size) {
|
||||||
len = nelem * sw_size;
|
len = nelem * sw_size;
|
||||||
s = malloc(len, M_DEVBUF, M_WAITOK);
|
s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
|
||||||
bzero(s, len);
|
|
||||||
*(void **)sdesc = s;
|
*(void **)sdesc = s;
|
||||||
}
|
}
|
||||||
if (parent_entry_tag == NULL)
|
if (parent_entry_tag == NULL)
|
||||||
@ -736,17 +726,16 @@ sge_timer_cb(void *arg)
|
|||||||
struct sge_qset *qs;
|
struct sge_qset *qs;
|
||||||
struct sge_txq *txq;
|
struct sge_txq *txq;
|
||||||
int i, j;
|
int i, j;
|
||||||
int reclaim_eth, reclaim_ofl, refill_rx;
|
int reclaim_ofl, refill_rx;
|
||||||
|
|
||||||
for (i = 0; i < sc->params.nports; i++)
|
for (i = 0; i < sc->params.nports; i++)
|
||||||
for (j = 0; j < sc->port[i].nqsets; j++) {
|
for (j = 0; j < sc->port[i].nqsets; j++) {
|
||||||
qs = &sc->sge.qs[i + j];
|
qs = &sc->sge.qs[i + j];
|
||||||
txq = &qs->txq[0];
|
txq = &qs->txq[0];
|
||||||
reclaim_eth = txq[TXQ_ETH].processed - txq[TXQ_ETH].cleaned;
|
|
||||||
reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
|
reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
|
||||||
refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
|
refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
|
||||||
(qs->fl[1].credits < qs->fl[1].size));
|
(qs->fl[1].credits < qs->fl[1].size));
|
||||||
if (reclaim_eth || reclaim_ofl || refill_rx) {
|
if (reclaim_ofl || refill_rx) {
|
||||||
pi = &sc->port[i];
|
pi = &sc->port[i];
|
||||||
taskqueue_enqueue(pi->tq, &pi->timer_reclaim_task);
|
taskqueue_enqueue(pi->tq, &pi->timer_reclaim_task);
|
||||||
break;
|
break;
|
||||||
@ -834,31 +823,14 @@ refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
|
|||||||
static __inline void
|
static __inline void
|
||||||
sge_txq_reclaim_(struct sge_txq *txq, int force)
|
sge_txq_reclaim_(struct sge_txq *txq, int force)
|
||||||
{
|
{
|
||||||
int reclaimable, n;
|
|
||||||
struct port_info *pi;
|
|
||||||
|
|
||||||
pi = txq->port;
|
if (desc_reclaimable(txq) < 16)
|
||||||
reclaim_more:
|
|
||||||
n = 0;
|
|
||||||
if ((reclaimable = desc_reclaimable(txq)) < 16)
|
|
||||||
return;
|
return;
|
||||||
if (mtx_trylock(&txq->lock) == 0)
|
if (mtx_trylock(&txq->lock) == 0)
|
||||||
return;
|
return;
|
||||||
n = reclaim_completed_tx_(txq, 16);
|
reclaim_completed_tx_(txq, 16);
|
||||||
mtx_unlock(&txq->lock);
|
mtx_unlock(&txq->lock);
|
||||||
|
|
||||||
if (pi && pi->ifp->if_drv_flags & IFF_DRV_OACTIVE &&
|
|
||||||
txq->size - txq->in_use >= TX_START_MAX_DESC) {
|
|
||||||
struct sge_qset *qs = txq_to_qset(txq, TXQ_ETH);
|
|
||||||
|
|
||||||
txq_fills++;
|
|
||||||
pi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
|
||||||
clrbit(&qs->txq_stopped, TXQ_ETH);
|
|
||||||
taskqueue_enqueue(pi->tq, &pi->start_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (n)
|
|
||||||
goto reclaim_more;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -886,8 +858,6 @@ sge_timer_reclaim(void *arg, int ncount)
|
|||||||
#endif
|
#endif
|
||||||
for (i = 0; i < nqsets; i++) {
|
for (i = 0; i < nqsets; i++) {
|
||||||
qs = &sc->sge.qs[i];
|
qs = &sc->sge.qs[i];
|
||||||
txq = &qs->txq[TXQ_ETH];
|
|
||||||
sge_txq_reclaim_(txq, FALSE);
|
|
||||||
|
|
||||||
txq = &qs->txq[TXQ_OFLD];
|
txq = &qs->txq[TXQ_OFLD];
|
||||||
sge_txq_reclaim_(txq, FALSE);
|
sge_txq_reclaim_(txq, FALSE);
|
||||||
@ -1184,7 +1154,7 @@ write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs
|
|||||||
* is freed all clusters will be freed
|
* is freed all clusters will be freed
|
||||||
* with it
|
* with it
|
||||||
*/
|
*/
|
||||||
txsd->mi.mi_base = NULL;
|
KASSERT(txsd->mi.mi_base == NULL, ("overwrting valid entry mi_base==%p", txsd->mi.mi_base));
|
||||||
wrp = (struct work_request_hdr *)txd;
|
wrp = (struct work_request_hdr *)txd;
|
||||||
wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
|
wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
|
||||||
V_WR_SGLSFLT(1)) | wr_hi;
|
V_WR_SGLSFLT(1)) | wr_hi;
|
||||||
@ -1244,6 +1214,7 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
|||||||
|
|
||||||
DPRINTF("t3_encap cpu=%d ", curcpu);
|
DPRINTF("t3_encap cpu=%d ", curcpu);
|
||||||
|
|
||||||
|
mi = NULL;
|
||||||
pi = qs->port;
|
pi = qs->port;
|
||||||
sc = pi->adapter;
|
sc = pi->adapter;
|
||||||
txq = &qs->txq[TXQ_ETH];
|
txq = &qs->txq[TXQ_ETH];
|
||||||
@ -1252,9 +1223,13 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
|||||||
sgl = txq->txq_sgl;
|
sgl = txq->txq_sgl;
|
||||||
segs = txq->txq_segs;
|
segs = txq->txq_segs;
|
||||||
m0 = *m;
|
m0 = *m;
|
||||||
|
|
||||||
DPRINTF("t3_encap port_id=%d qsidx=%d ", pi->port_id, pi->first_qset);
|
DPRINTF("t3_encap port_id=%d qsidx=%d ", pi->port_id, pi->first_qset);
|
||||||
DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", m[0]->m_pkthdr.len, pi->txpkt_intf, pi->tx_chan);
|
DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", m[0]->m_pkthdr.len, pi->txpkt_intf, pi->tx_chan);
|
||||||
|
if (cxgb_debug)
|
||||||
|
printf("mi_base=%p cidx=%d pidx=%d\n\n", txsd->mi.mi_base, txq->cidx, txq->pidx);
|
||||||
|
|
||||||
|
mtx_assert(&txq->lock, MA_OWNED);
|
||||||
cntrl = V_TXPKT_INTF(pi->txpkt_intf);
|
cntrl = V_TXPKT_INTF(pi->txpkt_intf);
|
||||||
/*
|
/*
|
||||||
* XXX need to add VLAN support for 6.x
|
* XXX need to add VLAN support for 6.x
|
||||||
@ -1263,7 +1238,9 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
|||||||
if (m0->m_pkthdr.csum_flags & (CSUM_TSO))
|
if (m0->m_pkthdr.csum_flags & (CSUM_TSO))
|
||||||
tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
|
tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
|
||||||
#endif
|
#endif
|
||||||
|
KASSERT(txsd->mi.mi_base == NULL, ("overwrting valid entry mi_base==%p",
|
||||||
|
txsd->mi.mi_base));
|
||||||
|
|
||||||
if (count > 1) {
|
if (count > 1) {
|
||||||
panic("count > 1 not support in CVS\n");
|
panic("count > 1 not support in CVS\n");
|
||||||
if ((err = busdma_map_sg_vec(m, &m0, segs, count)))
|
if ((err = busdma_map_sg_vec(m, &m0, segs, count)))
|
||||||
@ -1276,13 +1253,10 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
|||||||
}
|
}
|
||||||
KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d count=%d", nsegs, count));
|
KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d count=%d", nsegs, count));
|
||||||
|
|
||||||
if (m0->m_type == MT_DATA)
|
if (m0->m_pkthdr.len > PIO_LEN) {
|
||||||
DPRINTF("mbuf type=%d tags:%d head=%p", m0->m_type, !SLIST_EMPTY(&m0->m_pkthdr.tags),
|
mi_collapse_mbuf(&txsd->mi, m0);
|
||||||
SLIST_FIRST(&m0->m_pkthdr.tags));
|
mi = &txsd->mi;
|
||||||
|
}
|
||||||
mi_collapse_mbuf(&txsd->mi, m0);
|
|
||||||
mi = &txsd->mi;
|
|
||||||
|
|
||||||
if (count > 1) {
|
if (count > 1) {
|
||||||
struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
|
struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
|
||||||
int i, fidx;
|
int i, fidx;
|
||||||
@ -1373,25 +1347,11 @@ t3_encap(struct sge_qset *qs, struct mbuf **m, int count)
|
|||||||
mlen = m0->m_pkthdr.len;
|
mlen = m0->m_pkthdr.len;
|
||||||
cpl->len = htonl(mlen | 0x80000000);
|
cpl->len = htonl(mlen | 0x80000000);
|
||||||
|
|
||||||
if (mlen <= WR_LEN - sizeof(*cpl)) {
|
if (mlen <= PIO_LEN) {
|
||||||
txq_prod(txq, 1, &txqs);
|
txq_prod(txq, 1, &txqs);
|
||||||
|
m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
|
||||||
DPRINTF("mlen==%d max=%ld\n", mlen, (WR_LEN - sizeof(*cpl)));
|
m_freem(m0);
|
||||||
if (mi->mi_type != MT_IOVEC &&
|
m0 = NULL;
|
||||||
mi->mi_type != MT_CLIOVEC)
|
|
||||||
memcpy(&txd->flit[2], mi->mi_data, mlen);
|
|
||||||
else {
|
|
||||||
/*
|
|
||||||
* XXX mbuf_iovec
|
|
||||||
*/
|
|
||||||
#if 0
|
|
||||||
m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
|
|
||||||
#endif
|
|
||||||
printf("bailing on m_copydata\n");
|
|
||||||
}
|
|
||||||
m_freem_iovec(&txsd->mi);
|
|
||||||
txsd->mi.mi_base = NULL;
|
|
||||||
|
|
||||||
flits = (mlen + 7) / 8 + 2;
|
flits = (mlen + 7) / 8 + 2;
|
||||||
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
|
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
|
||||||
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
|
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
|
||||||
@ -1797,13 +1757,18 @@ t3_sge_stop(adapter_t *sc)
|
|||||||
|
|
||||||
for (nqsets = i = 0; i < (sc)->params.nports; i++)
|
for (nqsets = i = 0; i < (sc)->params.nports; i++)
|
||||||
nqsets += sc->port[i].nqsets;
|
nqsets += sc->port[i].nqsets;
|
||||||
|
#ifdef notyet
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* XXX
|
||||||
|
*/
|
||||||
for (i = 0; i < nqsets; ++i) {
|
for (i = 0; i < nqsets; ++i) {
|
||||||
struct sge_qset *qs = &sc->sge.qs[i];
|
struct sge_qset *qs = &sc->sge.qs[i];
|
||||||
|
|
||||||
taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
|
taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
|
||||||
taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
|
taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1839,7 +1804,8 @@ t3_free_tx_desc(struct sge_txq *q, int reclaimable)
|
|||||||
bus_dmamap_unload(q->entry_tag, txsd->map);
|
bus_dmamap_unload(q->entry_tag, txsd->map);
|
||||||
txsd->flags &= ~TX_SW_DESC_MAPPED;
|
txsd->flags &= ~TX_SW_DESC_MAPPED;
|
||||||
}
|
}
|
||||||
m_freem_iovec(&txsd->mi);
|
m_freem_iovec(&txsd->mi);
|
||||||
|
buf_ring_scan(&q->txq_mr, txsd->mi.mi_base, __FILE__, __LINE__);
|
||||||
txsd->mi.mi_base = NULL;
|
txsd->mi.mi_base = NULL;
|
||||||
|
|
||||||
#if defined(DIAGNOSTIC) && 0
|
#if defined(DIAGNOSTIC) && 0
|
||||||
@ -2513,7 +2479,7 @@ init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone)
|
|||||||
|
|
||||||
static int
|
static int
|
||||||
get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
|
get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
|
||||||
struct t3_mbuf_hdr *mh, struct rsp_desc *r, struct mbuf *m)
|
struct t3_mbuf_hdr *mh, struct rsp_desc *r)
|
||||||
{
|
{
|
||||||
|
|
||||||
unsigned int len_cq = ntohl(r->len_cq);
|
unsigned int len_cq = ntohl(r->len_cq);
|
||||||
@ -2522,6 +2488,7 @@ get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
|
|||||||
uint32_t len = G_RSPD_LEN(len_cq);
|
uint32_t len = G_RSPD_LEN(len_cq);
|
||||||
uint32_t flags = ntohl(r->flags);
|
uint32_t flags = ntohl(r->flags);
|
||||||
uint8_t sopeop = G_RSPD_SOP_EOP(flags);
|
uint8_t sopeop = G_RSPD_SOP_EOP(flags);
|
||||||
|
struct mbuf *m;
|
||||||
uint32_t *ref;
|
uint32_t *ref;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -2536,13 +2503,13 @@ get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
|
|||||||
cl = mtod(m0, void *);
|
cl = mtod(m0, void *);
|
||||||
memcpy(cl, sd->data, len);
|
memcpy(cl, sd->data, len);
|
||||||
recycle_rx_buf(adap, fl, fl->cidx);
|
recycle_rx_buf(adap, fl, fl->cidx);
|
||||||
*m = m0;
|
m = m0;
|
||||||
} else {
|
} else {
|
||||||
skip_recycle:
|
skip_recycle:
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
bus_dmamap_unload(fl->entry_tag, sd->map);
|
bus_dmamap_unload(fl->entry_tag, sd->map);
|
||||||
cl = sd->rxsd_cl;
|
cl = sd->rxsd_cl;
|
||||||
*m = m0 = (struct mbuf *)cl;
|
m = m0 = (struct mbuf *)cl;
|
||||||
|
|
||||||
m0->m_len = len;
|
m0->m_len = len;
|
||||||
if ((sopeop == RSPQ_SOP_EOP) ||
|
if ((sopeop == RSPQ_SOP_EOP) ||
|
||||||
@ -2561,8 +2528,7 @@ get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
|
|||||||
case RSPQ_NSOP_NEOP:
|
case RSPQ_NSOP_NEOP:
|
||||||
DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
|
DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
|
||||||
if (mh->mh_tail == NULL) {
|
if (mh->mh_tail == NULL) {
|
||||||
if (cxgb_debug)
|
printf("discarding intermediate descriptor entry\n");
|
||||||
printf("discarding intermediate descriptor entry\n");
|
|
||||||
m_freem(m);
|
m_freem(m);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2798,18 +2764,7 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
|
|||||||
int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
|
int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
|
||||||
|
|
||||||
#ifdef DISABLE_MBUF_IOVEC
|
#ifdef DISABLE_MBUF_IOVEC
|
||||||
struct mbuf *m;
|
eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r);
|
||||||
m = m_gethdr(M_DONTWAIT, MT_DATA);
|
|
||||||
|
|
||||||
if (m == NULL) {
|
|
||||||
log(LOG_WARNING, "failed to get mbuf for packet\n");
|
|
||||||
budget_left--;
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
m->m_next = m->m_nextpkt = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r, m);
|
|
||||||
#else
|
#else
|
||||||
eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
|
eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
|
||||||
#ifdef IFNET_MULTIQUEUE
|
#ifdef IFNET_MULTIQUEUE
|
||||||
|
@ -232,13 +232,15 @@ m_freem_iovec(struct mbuf_iovec *mi)
|
|||||||
struct mbuf *m;
|
struct mbuf *m;
|
||||||
|
|
||||||
switch (mi->mi_type) {
|
switch (mi->mi_type) {
|
||||||
|
case EXT_MBUF:
|
||||||
|
m_free_fast((struct mbuf *)mi->mi_base);
|
||||||
|
break;
|
||||||
case EXT_IOVEC:
|
case EXT_IOVEC:
|
||||||
case EXT_CLIOVEC:
|
case EXT_CLIOVEC:
|
||||||
case EXT_JMPIOVEC:
|
case EXT_JMPIOVEC:
|
||||||
m = (struct mbuf *)mi->mi_base;
|
m = (struct mbuf *)mi->mi_base;
|
||||||
m_free_iovec(m, mi->mi_type);
|
m_free_iovec(m, mi->mi_type);
|
||||||
break;
|
break;
|
||||||
case EXT_MBUF:
|
|
||||||
case EXT_CLUSTER:
|
case EXT_CLUSTER:
|
||||||
case EXT_JUMBOP:
|
case EXT_JUMBOP:
|
||||||
case EXT_JUMBO9:
|
case EXT_JUMBO9:
|
||||||
|
@ -162,7 +162,7 @@ _mcl_collapse_mbuf(struct mbuf_iovec *mi, struct mbuf *m)
|
|||||||
}
|
}
|
||||||
KASSERT(mi->mi_len != 0, ("miov has len 0"));
|
KASSERT(mi->mi_len != 0, ("miov has len 0"));
|
||||||
KASSERT(mi->mi_type > 0, ("mi_type is invalid"));
|
KASSERT(mi->mi_type > 0, ("mi_type is invalid"));
|
||||||
|
KASSERT(mi->mi_base, ("mi_base is invalid"));
|
||||||
return (n);
|
return (n);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,40 +204,15 @@ busdma_map_sg_collapse(struct mbuf **m, bus_dma_segment_t *segs, int *nsegs)
|
|||||||
if (n->m_flags & M_PKTHDR && !SLIST_EMPTY(&n->m_pkthdr.tags))
|
if (n->m_flags & M_PKTHDR && !SLIST_EMPTY(&n->m_pkthdr.tags))
|
||||||
m_tag_delete_chain(n, NULL);
|
m_tag_delete_chain(n, NULL);
|
||||||
|
|
||||||
|
if (n->m_pkthdr.len <= PIO_LEN)
|
||||||
|
return (0);
|
||||||
retry:
|
retry:
|
||||||
seg_count = 0;
|
seg_count = 0;
|
||||||
if (n->m_next == NULL) {
|
if (n->m_next == NULL) {
|
||||||
busdma_map_mbuf_fast(n, segs);
|
busdma_map_mbuf_fast(n, segs);
|
||||||
*nsegs = 1;
|
*nsegs = 1;
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n->m_pkthdr.len <= 104) {
|
|
||||||
caddr_t data;
|
|
||||||
|
|
||||||
if ((m0 = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
|
|
||||||
return (ENOMEM);
|
|
||||||
|
|
||||||
data = m0->m_data;
|
|
||||||
memcpy(m0, n, sizeof(struct m_hdr) + sizeof(struct pkthdr));
|
|
||||||
m0->m_data = data;
|
|
||||||
m0->m_len = n->m_pkthdr.len;
|
|
||||||
m0->m_flags &= ~M_EXT;
|
|
||||||
m0->m_next = NULL;
|
|
||||||
m0->m_type = n->m_type;
|
|
||||||
n->m_flags &= ~M_PKTHDR;
|
|
||||||
while (n) {
|
|
||||||
memcpy(data, n->m_data, n->m_len);
|
|
||||||
data += n->m_len;
|
|
||||||
n = n->m_next;
|
|
||||||
}
|
|
||||||
m_freem(*m);
|
|
||||||
n = m0;
|
|
||||||
*m = n;
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (n && seg_count < TX_MAX_SEGS) {
|
while (n && seg_count < TX_MAX_SEGS) {
|
||||||
marray[seg_count] = n;
|
marray[seg_count] = n;
|
||||||
|
|
||||||
@ -249,18 +224,6 @@ retry:
|
|||||||
|
|
||||||
n = n->m_next;
|
n = n->m_next;
|
||||||
}
|
}
|
||||||
#if 0
|
|
||||||
/*
|
|
||||||
* XXX needs more careful consideration
|
|
||||||
*/
|
|
||||||
if (__predict_false(seg_count == 1)) {
|
|
||||||
n = marray[0];
|
|
||||||
if (n != *m)
|
|
||||||
|
|
||||||
/* XXX */
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if (seg_count == 0) {
|
if (seg_count == 0) {
|
||||||
if (cxgb_debug)
|
if (cxgb_debug)
|
||||||
printf("empty segment chain\n");
|
printf("empty segment chain\n");
|
||||||
@ -302,16 +265,20 @@ retry:
|
|||||||
}
|
}
|
||||||
n = *m;
|
n = *m;
|
||||||
while (n) {
|
while (n) {
|
||||||
if (((n->m_flags & (M_EXT|M_NOFREE)) == M_EXT) &&
|
if (n->m_ext.ext_type == EXT_PACKET)
|
||||||
(n->m_len > 0) && (n->m_ext.ext_type != EXT_PACKET) )
|
goto skip;
|
||||||
|
else if (n->m_len == 0)
|
||||||
|
/* do nothing */;
|
||||||
|
else if ((n->m_flags & (M_EXT|M_NOFREE)) == M_EXT)
|
||||||
n->m_flags &= ~M_EXT;
|
n->m_flags &= ~M_EXT;
|
||||||
else if ((n->m_len > 0) || (n->m_ext.ext_type == EXT_PACKET)) {
|
else
|
||||||
n = n->m_next;
|
goto skip;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
mhead = n->m_next;
|
mhead = n->m_next;
|
||||||
m_free(n);
|
m_free(n);
|
||||||
n = mhead;
|
n = mhead;
|
||||||
|
continue;
|
||||||
|
skip:
|
||||||
|
n = n->m_next;
|
||||||
}
|
}
|
||||||
*nsegs = seg_count;
|
*nsegs = seg_count;
|
||||||
*m = m0;
|
*m = m0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user