Switch sparc64 busdma to use a dynamically allocated segment list rather

than a a stack-limited list.  This removes the artifical limit on s/g list
size.
cvs: ----------------------------------------------------------------------
This commit is contained in:
Scott Long 2004-06-28 03:49:13 +00:00
parent 29b95d5a7e
commit 8e0bfc6b32
3 changed files with 45 additions and 60 deletions

View File

@ -1020,6 +1020,7 @@ struct bus_dma_tag {
int dt_map_count;
bus_dma_lock_t *dt_lockfunc;
void * *dt_lockfuncarg;
bus_dma_segment_t *dt_segments;
struct bus_dma_methods *dt_mt;
};

View File

@ -246,7 +246,9 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->dt_lockfunc = dflt_lock;
newtag->dt_lockfuncarg = NULL;
}
newtag->dt_segments = NULL;
/* Take into account any restrictions imposed by our parent tag */
if (parent != NULL) {
newtag->dt_lowaddr = ulmin(parent->dt_lowaddr,
@ -278,6 +280,8 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
parent = dmat->dt_parent;
atomic_subtract_int(&dmat->dt_ref_count, 1);
if (dmat->dt_ref_count == 0) {
if (dmat->dt_segments != NULL)
free(dmat->dt_segments, M_DEVBUF);
free(dmat, M_DEVBUF);
/*
* Last reference count, so
@ -297,6 +301,13 @@ int
sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
{
if (dmat->dt_segments == NULL) {
dmat->dt_segments = (bus_dma_segment_t *)malloc(
sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF,
M_NOWAIT);
if (dmat->dt_segments == NULL)
return (ENOMEM);
}
*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
if (*mapp == NULL)
return (ENOMEM);
@ -336,16 +347,18 @@ nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
* first indicates if this is the first invocation of this function.
*/
static int
_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
void *buf, bus_size_t buflen, struct thread *td, int flags,
bus_addr_t *lastaddrp, int *segp, int first)
_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
struct thread *td, int flags, bus_addr_t *lastaddrp, int *segp, int first)
{
bus_dma_segment_t *segs;
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
segs = dmat->dt_segments;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
@ -430,19 +443,14 @@ nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->dt_nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
bus_addr_t lastaddr;
int error, nsegs;
error = _nexus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
NULL, flags, &lastaddr, &nsegs, 1);
error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
&lastaddr, &nsegs, 1);
if (error == 0) {
(*callback)(callback_arg, dm_segments, nsegs + 1, 0);
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
map->dm_flags |= DMF_LOADED;
} else
(*callback)(callback_arg, NULL, 0, error);
@ -457,11 +465,6 @@ static int
nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->dt_nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error;
M_ASSERTPKTHDR(m0);
@ -476,8 +479,8 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _nexus_dmamap_load_buffer(dmat,
dm_segments, m->m_data, m->m_len, NULL,
flags, &lastaddr, &nsegs, first);
m->m_data, m->m_len,NULL, flags, &lastaddr,
&nsegs, first);
first = 0;
}
}
@ -487,10 +490,10 @@ nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
(*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
} else {
map->dm_flags |= DMF_LOADED;
(*callback)(callback_arg, dm_segments, nsegs + 1,
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
m0->m_pkthdr.len, error);
}
return (error);
@ -504,11 +507,6 @@ nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
bus_addr_t lastaddr;
#ifdef __GNUC__
bus_dma_segment_t dm_segments[dmat->dt_nsegments];
#else
bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
#endif
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
@ -536,8 +534,8 @@ nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = _nexus_dmamap_load_buffer(dmat, dm_segments,
addr, minlen, td, flags, &lastaddr, &nsegs, first);
error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
td, flags, &lastaddr, &nsegs, first);
first = 0;
resid -= minlen;
@ -546,10 +544,10 @@ nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dm_segments, 0, 0, error);
(*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
} else {
map->dm_flags |= DMF_LOADED;
(*callback)(callback_arg, dm_segments, nsegs + 1,
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
uio->uio_resid, error);
}
return (error);
@ -677,6 +675,7 @@ struct bus_dma_tag nexus_dmatag = {
0,
NULL,
NULL,
NULL,
&nexus_dma_methods,
};

View File

@ -889,8 +889,8 @@ iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
*/
static int
iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
bus_dmamap_t map, bus_dma_segment_t sgs[], void *buf,
bus_size_t buflen, struct thread *td, int flags, int *segp, int align)
bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
int flags, int *segp, int align)
{
bus_addr_t amask, dvmaddr;
bus_size_t sgsize, esize;
@ -947,8 +947,8 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
*/
if (!firstpg) {
esize = ulmin(sgsize,
dt->dt_maxsegsz - sgs[sgcnt].ds_len);
sgs[sgcnt].ds_len += esize;
dt->dt_maxsegsz - dt->dt_segments[sgcnt].ds_len);
dt->dt_segments[sgcnt].ds_len += esize;
sgsize -= esize;
dvmaddr += esize;
}
@ -965,8 +965,8 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
* that such tags have maxsegsize >= maxsize.
*/
esize = ulmin(sgsize, dt->dt_maxsegsz);
sgs[sgcnt].ds_addr = dvmaddr;
sgs[sgcnt].ds_len = esize;
dt->dt_segments[sgcnt].ds_addr = dvmaddr;
dt->dt_segments[sgcnt].ds_len = esize;
sgsize -= esize;
dvmaddr += esize;
}
@ -983,11 +983,6 @@ iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
int flags)
{
struct iommu_state *is = dt->dt_cookie;
#ifdef __GNUC__
bus_dma_segment_t sgs[dt->dt_nsegments];
#else
bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
#endif
int error, seg = -1;
if ((map->dm_flags & DMF_LOADED) != 0) {
@ -1006,7 +1001,7 @@ iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
iommu_map_remq(is, map);
IS_UNLOCK(is);
error = iommu_dvmamap_load_buffer(dt, is, map, sgs, buf, buflen, NULL,
error = iommu_dvmamap_load_buffer(dt, is, map, buf, buflen, NULL,
flags, &seg, 1);
IS_LOCK(is);
@ -1014,11 +1009,11 @@ iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
if (error != 0) {
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
(*cb)(cba, sgs, 0, error);
(*cb)(cba, dt->dt_segments, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
(*cb)(cba, sgs, seg + 1, 0);
(*cb)(cba, dt->dt_segments, seg + 1, 0);
}
return (error);
@ -1029,11 +1024,6 @@ iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *cb, void *cba, int flags)
{
struct iommu_state *is = dt->dt_cookie;
#ifdef __GNUC__
bus_dma_segment_t sgs[dt->dt_nsegments];
#else
bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
#endif
struct mbuf *m;
int error = 0, first = 1, nsegs = -1;
@ -1054,7 +1044,7 @@ iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map, sgs,
error = iommu_dvmamap_load_buffer(dt, is, map,
m->m_data, m->m_len, NULL, flags, &nsegs, first);
first = 0;
}
@ -1067,11 +1057,11 @@ iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
/* force "no valid mappings" in callback */
(*cb)(cba, sgs, 0, 0, error);
(*cb)(cba, dt->dt_segments, 0, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
(*cb)(cba, sgs, nsegs + 1, m0->m_pkthdr.len, 0);
(*cb)(cba, dt->dt_segments, nsegs + 1, m0->m_pkthdr.len, 0);
}
return (error);
}
@ -1081,11 +1071,6 @@ iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *cb, void *cba, int flags)
{
struct iommu_state *is = dt->dt_cookie;
#ifdef __GNUC__
bus_dma_segment_t sgs[dt->dt_nsegments];
#else
bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
#endif
struct iovec *iov;
struct thread *td = NULL;
bus_size_t minlen, resid;
@ -1120,7 +1105,7 @@ iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
if (minlen == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map, sgs,
error = iommu_dvmamap_load_buffer(dt, is, map,
iov[i].iov_base, minlen, td, flags, &nsegs, first);
first = 0;
@ -1133,11 +1118,11 @@ iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
/* force "no valid mappings" in callback */
(*cb)(cba, sgs, 0, 0, error);
(*cb)(cba, dt->dt_segments, 0, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
(*cb)(cba, sgs, nsegs + 1, uio->uio_resid, 0);
(*cb)(cba, dt->dt_segments, nsegs + 1, uio->uio_resid, 0);
}
return (error);
}