Add the bus_dmamap_load_mbuf_sg() function to sparc64.

This commit is contained in:
scottl 2005-01-15 09:20:47 +00:00
parent f4c183f0ba
commit c1d168f9a4
3 changed files with 99 additions and 15 deletions

View File

@ -988,6 +988,8 @@ struct bus_dma_methods {
bus_size_t, bus_dmamap_callback_t *, void *, int);
int (*dm_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
int (*dm_dmamap_load_mbuf_sg)(bus_dma_tag_t, bus_dmamap_t,
struct mbuf *, bus_dma_segment_t *segs, int *nsegs, int);
int (*dm_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t, struct uio *,
bus_dmamap_callback2_t *, void *, int);
void (*dm_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
@ -1039,6 +1041,8 @@ int bus_dma_tag_destroy(bus_dma_tag_t);
((t)->dt_mt->dm_dmamap_load((t), (m), (p), (s), (cb), (cba), (f)))
#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \
((t)->dt_mt->dm_dmamap_load_mbuf((t), (m), (mb), (cb), (cba), (f)))
#define bus_dmamap_load_mbuf_sg(t, m, mb, segs, nsegs, f) \
((t)->dt_mt->dm_dmamap_load_mbuf_sg((t), (m), (mb), (segs), (nsegs), (f)))
#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \
((t)->dt_mt->dm_dmamap_load_uio((t), (m), (ui), (cb), (cba), (f)))
#define bus_dmamap_unload(t, p) \

View File

@ -351,17 +351,15 @@ nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
*/
static int
_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
struct thread *td, int flags, bus_addr_t *lastaddrp, int *segp, int first)
struct thread *td, int flags, bus_addr_t *lastaddrp,
bus_dma_segment_t *segs, int *segp, int first)
{
bus_dma_segment_t *segs;
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
segs = dmat->dt_segments;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
@ -450,7 +448,7 @@ nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
int error, nsegs;
error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
&lastaddr, &nsegs, 1);
&lastaddr, dmat->dt_segments, &nsegs, 1);
if (error == 0) {
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
@ -483,7 +481,7 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
if (m->m_len > 0) {
error = _nexus_dmamap_load_buffer(dmat,
m->m_data, m->m_len,NULL, flags, &lastaddr,
&nsegs, first);
dmat->dt_segments, &nsegs, first);
first = 0;
}
}
@ -502,6 +500,37 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
return (error);
}
static int
nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags)
{
int error;
M_ASSERTPKTHDR(m0);
*nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _nexus_dmamap_load_buffer(dmat,
m->m_data, m->m_len,NULL, flags, &lastaddr,
segs, nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
++*nsegs;
return (error);
}
/*
* Like nexus_dmamap_load(), but for uios.
*/
@ -538,7 +567,8 @@ nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
if (minlen > 0) {
error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
td, flags, &lastaddr, &nsegs, first);
td, flags, &lastaddr, dmat->dt_segments, &nsegs,
first);
first = 0;
resid -= minlen;
@ -654,6 +684,7 @@ struct bus_dma_methods nexus_dma_methods = {
nexus_dmamap_destroy,
nexus_dmamap_load,
nexus_dmamap_load_mbuf,
nexus_dmamap_load_mbuf_sg,
nexus_dmamap_load_uio,
nexus_dmamap_unload,
nexus_dmamap_sync,

View File

@ -891,7 +891,7 @@ iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
static int
iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
int flags, int *segp, int align)
int flags, bus_dma_segment_t *segs, int *segp, int align)
{
bus_addr_t amask, dvmaddr;
bus_size_t sgsize, esize;
@ -948,8 +948,8 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
*/
if (!firstpg) {
esize = ulmin(sgsize,
dt->dt_maxsegsz - dt->dt_segments[sgcnt].ds_len);
dt->dt_segments[sgcnt].ds_len += esize;
dt->dt_maxsegsz - segs[sgcnt].ds_len);
segs[sgcnt].ds_len += esize;
sgsize -= esize;
dvmaddr += esize;
}
@ -965,8 +965,8 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
* that such tags have maxsegsize >= maxsize.
*/
esize = ulmin(sgsize, dt->dt_maxsegsz);
dt->dt_segments[sgcnt].ds_addr = dvmaddr;
dt->dt_segments[sgcnt].ds_len = esize;
segs[sgcnt].ds_addr = dvmaddr;
segs[sgcnt].ds_len = esize;
sgsize -= esize;
dvmaddr += esize;
}
@ -1002,7 +1002,7 @@ iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
IS_UNLOCK(is);
error = iommu_dvmamap_load_buffer(dt, is, map, buf, buflen, NULL,
flags, &seg, 1);
flags, dt->dt_segments, &seg, 1);
IS_LOCK(is);
iommu_map_insq(is, map);
@ -1045,7 +1045,8 @@ iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
if (m->m_len == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map,
m->m_data, m->m_len, NULL, flags, &nsegs, first);
m->m_data, m->m_len, NULL, flags, dt->dt_segments,
&nsegs, first);
first = 0;
}
} else
@ -1066,6 +1067,52 @@ iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
return (error);
}
static int
iommu_dvamamp_load_mbuf_sg(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags)
{
struct iommu_state *is = dt->dt_cookie;
struct mbuf *m;
int error = 0, first = 1;
M_ASSERTPKTHDR(m0);
*nsegs = -1;
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("iommu_dvmamap_load_mbuf: map still in use\n");
#endif
bus_dmamap_unload(dt, map);
}
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
if (m0->m_pkthdr.len <= dt->dt_maxsize) {
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map,
m->m_data, m->m_len, NULL, flags, segs,
nsegs, first);
first = 0;
}
} else
error = EINVAL;
IS_LOCK(is);
iommu_map_insq(is, map);
if (error != 0) {
iommu_dvmamap_vunload(is, map);
} else {
map->dm_flags |= DMF_LOADED;
++*nsegs;
}
IS_UNLOCK(is);
return (error);
}
static int
iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *cb, void *cba, int flags)
@ -1106,7 +1153,8 @@ iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
continue;
error = iommu_dvmamap_load_buffer(dt, is, map,
iov[i].iov_base, minlen, td, flags, &nsegs, first);
iov[i].iov_base, minlen, td, flags, dt->dt_segments,
&nsegs, first);
first = 0;
resid -= minlen;
@ -1219,6 +1267,7 @@ struct bus_dma_methods iommu_dma_methods = {
iommu_dvmamap_destroy,
iommu_dvmamap_load,
iommu_dvmamap_load_mbuf,
iommu_dvamamp_load_mbuf_sg,
iommu_dvmamap_load_uio,
iommu_dvmamap_unload,
iommu_dvmamap_sync,