- fix dma map handling for !x86 case

- fix allocation failure handing in refill_fl
This commit is contained in:
kmacy 2009-06-20 18:57:14 +00:00
parent 4cdb86f203
commit 4f52e22a6e
3 changed files with 15 additions and 13 deletions

View File

@ -714,6 +714,7 @@ refill_fl(adapter_t *sc, struct sge_fl *q, int n)
if (q->zone = zone_pack)
uma_zfree(q->zone, cl);
m_free(m);
goto done;
}
#else
cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
@ -1157,7 +1158,7 @@ busdma_map_mbufs(struct mbuf **m, struct sge_txq *txq,
m0 = *m;
pktlen = m0->m_pkthdr.len;
#if defined(__i386__) || defined(__amd64__)
if (busdma_map_sg_collapse(txq, txsd, m, segs, nsegs) == 0) {
if (busdma_map_sg_collapse(txq, txsd->map, m, segs, nsegs) == 0) {
goto done;
} else
#endif
@ -1411,11 +1412,11 @@ t3_encap(struct sge_qset *qs, struct mbuf **m)
tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
#endif
if (m0->m_nextpkt != NULL) {
busdma_map_sg_vec(txq, txsd, m0, segs, &nsegs);
busdma_map_sg_vec(txq, txsd->map, m0, segs, &nsegs);
ndesc = 1;
mlen = 0;
} else {
if ((err = busdma_map_sg_collapse(txq, txsd, &m0, segs, &nsegs))) {
if ((err = busdma_map_sg_collapse(txq, txsd->map, &m0, segs, &nsegs))) {
if (cxgb_debug)
printf("failed ... err=%d\n", err);
return (err);

View File

@ -43,7 +43,7 @@
#define m_ulp_mode m_pkthdr.tso_segsz /* upper level protocol */
static __inline void
busdma_map_mbuf_fast(struct sge_txq *txq, struct tx_sw_desc *txsd,
busdma_map_mbuf_fast(struct sge_txq *txq, bus_dmamap_t map,
struct mbuf *m, bus_dma_segment_t *seg)
{
#if defined(__i386__) || defined(__amd64__)
@ -52,14 +52,15 @@ busdma_map_mbuf_fast(struct sge_txq *txq, struct tx_sw_desc *txsd,
#else
int nsegstmp;
bus_dmamap_load_mbuf_sg(txq->entry_tag, txsd->map, m, seg,
bus_dmamap_load_mbuf_sg(txq->entry_tag, map, m, seg,
&nsegstmp, 0);
#endif
}
int busdma_map_sg_collapse(struct sge_txq *txq, struct tx_sw_desc *txsd,
int busdma_map_sg_collapse(struct sge_txq *txq, bus_dmamap_t map,
struct mbuf **m, bus_dma_segment_t *segs, int *nsegs);
void busdma_map_sg_vec(struct sge_txq *txq, struct tx_sw_desc *txsd, struct mbuf *m, bus_dma_segment_t *segs, int *nsegs);
void busdma_map_sg_vec(struct sge_txq *txq, bus_dmamap_t map,
struct mbuf *m, bus_dma_segment_t *segs, int *nsegs);
static __inline int
busdma_map_sgl(bus_dma_segment_t *vsegs, bus_dma_segment_t *segs, int count)
{

View File

@ -59,7 +59,7 @@ __FBSDID("$FreeBSD$");
#endif
int
busdma_map_sg_collapse(struct sge_txq *txq, struct tx_sw_desc *txsd,
busdma_map_sg_collapse(struct sge_txq *txq, bus_dmamap_t map,
struct mbuf **m, bus_dma_segment_t *segs, int *nsegs)
{
struct mbuf *n = *m;
@ -73,7 +73,7 @@ busdma_map_sg_collapse(struct sge_txq *txq, struct tx_sw_desc *txsd,
psegs = segs;
seg_count = 0;
if (n->m_next == NULL) {
busdma_map_mbuf_fast(txq, txsd, n, segs);
busdma_map_mbuf_fast(txq, map, n, segs);
*nsegs = 1;
return (0);
}
@ -84,13 +84,13 @@ busdma_map_sg_collapse(struct sge_txq *txq, struct tx_sw_desc *txsd,
*/
if (__predict_true(n->m_len != 0)) {
seg_count++;
busdma_map_mbuf_fast(txq, txsd, n, psegs);
busdma_map_mbuf_fast(txq, map, n, psegs);
psegs++;
}
n = n->m_next;
}
#else
err = bus_dmamap_load_mbuf_sg(txq->entry_tag, txsd->map, m, segs,
err = bus_dmamap_load_mbuf_sg(txq->entry_tag, map, m, segs,
&seg_count, 0);
#endif
if (seg_count == 0) {
@ -122,11 +122,11 @@ busdma_map_sg_collapse(struct sge_txq *txq, struct tx_sw_desc *txsd,
}
void
busdma_map_sg_vec(struct sge_txq *txq, struct tx_sw_desc *txsd,
busdma_map_sg_vec(struct sge_txq *txq, bus_dmamap_t map,
struct mbuf *m, bus_dma_segment_t *segs, int *nsegs)
{
for (*nsegs = 0; m != NULL ; segs++, *nsegs += 1, m = m->m_nextpkt)
busdma_map_mbuf_fast(txq, txsd, m, segs);
busdma_map_mbuf_fast(txq, map, m, segs);
}