Change the preallocation of a busdma segment mapping array from per-tag to
per-map. The per-tag scheme is not safe, and a mutex can't be used to protect it because the mapping routines can't sleep. Code brought in from armv6 implementation.
This commit is contained in:
parent
c9ec6256cb
commit
ba4e597710
@ -81,6 +81,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/md_var.h>
|
||||
|
||||
#define MAX_BPAGES 64
|
||||
#define MAX_DMA_SEGMENTS 4096
|
||||
#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
|
||||
#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
|
||||
|
||||
@ -111,13 +112,6 @@ struct bus_dma_tag {
|
||||
*/
|
||||
struct arm32_dma_range *ranges;
|
||||
int _nranges;
|
||||
/*
|
||||
* Most tags need one or two segments, and can use the local tagsegs
|
||||
* array. For tags with a larger limit, we'll allocate a bigger array
|
||||
* on first use.
|
||||
*/
|
||||
bus_dma_segment_t *segments;
|
||||
bus_dma_segment_t tagsegs[2];
|
||||
};
|
||||
|
||||
struct bounce_page {
|
||||
@ -175,20 +169,19 @@ struct bus_dmamap {
|
||||
bus_dmamap_callback_t *callback;
|
||||
void *callback_arg;
|
||||
int flags;
|
||||
#define DMAMAP_COHERENT 0x8
|
||||
#define DMAMAP_CACHE_ALIGNED 0x10
|
||||
#define DMAMAP_COHERENT (1 << 0)
|
||||
#define DMAMAP_DMAMEM_ALLOC (1 << 1)
|
||||
#define DMAMAP_MBUF (1 << 2)
|
||||
#define DMAMAP_CACHE_ALIGNED (1 << 3)
|
||||
STAILQ_ENTRY(bus_dmamap) links;
|
||||
bus_dma_segment_t *segments;
|
||||
int sync_count;
|
||||
struct sync_list *slist;
|
||||
struct sync_list slist[];
|
||||
};
|
||||
|
||||
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
|
||||
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
|
||||
|
||||
static struct mtx busdma_mtx;
|
||||
|
||||
MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
|
||||
|
||||
static void init_bounce_pages(void *dummy);
|
||||
static int alloc_bounce_zone(bus_dma_tag_t dmat);
|
||||
static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
|
||||
@ -208,57 +201,16 @@ bus_dma_tag_t arm_root_dma_tag;
|
||||
* Begin block of code useful to transplant to other implementations.
|
||||
*/
|
||||
|
||||
static uma_zone_t dmamap_zone; /* Cache of struct bus_dmamap items */
|
||||
|
||||
static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
|
||||
static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
|
||||
|
||||
MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
|
||||
MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
|
||||
|
||||
/*
|
||||
* This is the ctor function passed to uma_zcreate() for the pool of dma maps.
|
||||
* It'll need platform-specific changes if this code is copied.
|
||||
*/
|
||||
static int
|
||||
dmamap_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
bus_dmamap_t map;
|
||||
bus_dma_tag_t dmat;
|
||||
|
||||
map = (bus_dmamap_t)mem;
|
||||
dmat = (bus_dma_tag_t)arg;
|
||||
|
||||
dmat->map_count++;
|
||||
|
||||
map->dmat = dmat;
|
||||
map->flags = 0;
|
||||
STAILQ_INIT(&map->bpages);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the dtor function passed to uma_zcreate() for the pool of dma maps.
|
||||
* It may need platform-specific changes if this code is copied .
|
||||
*/
|
||||
static void
|
||||
dmamap_dtor(void *mem, int size, void *arg)
|
||||
{
|
||||
bus_dmamap_t map;
|
||||
|
||||
map = (bus_dmamap_t)mem;
|
||||
|
||||
map->dmat->map_count--;
|
||||
}
|
||||
|
||||
static void
|
||||
busdma_init(void *dummy)
|
||||
{
|
||||
|
||||
/* Create a cache of maps for bus_dmamap_create(). */
|
||||
dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
|
||||
dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
|
||||
/* Create a cache of buffers in standard (cacheable) memory. */
|
||||
standard_allocator = busdma_bufalloc_create("buffer",
|
||||
@ -280,11 +232,11 @@ busdma_init(void *dummy)
|
||||
|
||||
/*
|
||||
* This init historically used SI_SUB_VM, but now the init code requires
|
||||
* malloc(9) using M_BUSDMA memory, which is set up later than SI_SUB_VM, by
|
||||
* SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using
|
||||
* SI_SUB_KMEM and SI_ORDER_FOURTH.
|
||||
* malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get
|
||||
* set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by
|
||||
* using SI_SUB_KMEM+1.
|
||||
*/
|
||||
SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
|
||||
SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL);
|
||||
|
||||
/*
|
||||
* End block of code useful to transplant to other implementations.
|
||||
@ -406,8 +358,6 @@ dflt_lock(void *arg, bus_dma_lock_op_t op)
|
||||
/*
|
||||
* Allocate a device specific dma_tag.
|
||||
*/
|
||||
#define SEG_NB 1024
|
||||
|
||||
int
|
||||
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
|
||||
@ -451,21 +401,8 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
newtag->lockfunc = dflt_lock;
|
||||
newtag->lockfuncarg = NULL;
|
||||
}
|
||||
/*
|
||||
* If all the segments we need fit into the local tagsegs array, set the
|
||||
* pointer now. Otherwise NULL the pointer and an array of segments
|
||||
* will be allocated later, on first use. We don't pre-allocate now
|
||||
* because some tags exist just to pass contraints to children in the
|
||||
* device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we
|
||||
* sure don't want to try to allocate an array for that.
|
||||
*/
|
||||
if (newtag->nsegments <= nitems(newtag->tagsegs))
|
||||
newtag->segments = newtag->tagsegs;
|
||||
else
|
||||
newtag->segments = NULL;
|
||||
/*
|
||||
* Take into account any restrictions imposed by our parent tag
|
||||
*/
|
||||
|
||||
/* Take into account any restrictions imposed by our parent tag */
|
||||
if (parent != NULL) {
|
||||
newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
|
||||
newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
|
||||
@ -546,9 +483,6 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
|
||||
parent = dmat->parent;
|
||||
atomic_subtract_int(&dmat->ref_count, 1);
|
||||
if (dmat->ref_count == 0) {
|
||||
if (dmat->segments != NULL &&
|
||||
dmat->segments != dmat->tagsegs)
|
||||
free(dmat->segments, M_BUSDMA);
|
||||
free(dmat, M_BUSDMA);
|
||||
/*
|
||||
* Last reference count, so
|
||||
@ -565,7 +499,31 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#include <sys/kdb.h>
|
||||
static bus_dmamap_t
|
||||
allocate_map(bus_dma_tag_t dmat, int mflags)
|
||||
{
|
||||
int mapsize, segsize;
|
||||
bus_dmamap_t map;
|
||||
|
||||
/*
|
||||
* Allocate the map. The map structure ends with an embedded
|
||||
* variable-sized array of sync_list structures. Following that
|
||||
* we allocate enough extra space to hold the array of bus_dma_segments.
|
||||
*/
|
||||
KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
|
||||
("cannot allocate %u dma segments (max is %u)",
|
||||
dmat->nsegments, MAX_DMA_SEGMENTS));
|
||||
segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
|
||||
mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
|
||||
map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO);
|
||||
if (map == NULL) {
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
|
||||
return (NULL);
|
||||
}
|
||||
map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize);
|
||||
return (map);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a handle for mapping from kva/uva/physical
|
||||
* address space into bus device space.
|
||||
@ -573,40 +531,21 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
|
||||
int
|
||||
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
{
|
||||
struct sync_list *slist;
|
||||
bus_dmamap_t map;
|
||||
int error = 0;
|
||||
|
||||
slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
|
||||
if (slist == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
|
||||
*mapp = map;
|
||||
*mapp = map = allocate_map(dmat, M_NOWAIT);
|
||||
if (map == NULL) {
|
||||
free(slist, M_BUSDMA);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
|
||||
return (ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the tag's segments haven't been allocated yet we need to do it
|
||||
* now, because we can't sleep for resources at map load time.
|
||||
*/
|
||||
if (dmat->segments == NULL) {
|
||||
dmat->segments = malloc(dmat->nsegments *
|
||||
sizeof(*dmat->segments), M_BUSDMA, M_NOWAIT);
|
||||
if (dmat->segments == NULL) {
|
||||
free(slist, M_BUSDMA);
|
||||
uma_zfree(dmamap_zone, map);
|
||||
*mapp = NULL;
|
||||
return (ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Bouncing might be required if the driver asks for an active
|
||||
* exclusion region, a data alignment that is stricter than 1, and/or
|
||||
* an active address boundary.
|
||||
* Bouncing might be required if the driver asks for an exclusion
|
||||
* region, a data alignment that is stricter than 1, or DMA that begins
|
||||
* or ends with a partial cacheline. Whether bouncing will actually
|
||||
* happen can't be known until mapping time, but we need to pre-allocate
|
||||
* resources now because we might not be allowed to at mapping time.
|
||||
*/
|
||||
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
|
||||
|
||||
@ -616,8 +555,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
|
||||
if (dmat->bounce_zone == NULL) {
|
||||
if ((error = alloc_bounce_zone(dmat)) != 0) {
|
||||
free(slist, M_BUSDMA);
|
||||
uma_zfree(dmamap_zone, map);
|
||||
free(map, M_BUSDMA);
|
||||
*mapp = NULL;
|
||||
return (error);
|
||||
}
|
||||
@ -652,7 +590,6 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
bz->map_count++;
|
||||
}
|
||||
map->sync_count = 0;
|
||||
map->slist = slist;
|
||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
|
||||
__func__, dmat, dmat->flags, error);
|
||||
|
||||
@ -672,12 +609,12 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
__func__, dmat, EBUSY);
|
||||
return (EBUSY);
|
||||
}
|
||||
free(map->slist, M_BUSDMA);
|
||||
uma_zfree(dmamap_zone, map);
|
||||
if (dmat->bounce_zone)
|
||||
dmat->bounce_zone->map_count--;
|
||||
free(map, M_BUSDMA);
|
||||
dmat->map_count--;
|
||||
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
|
||||
return (0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -686,37 +623,31 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
* the allocated memory, and a pointer to an associated bus_dmamap.
|
||||
*/
|
||||
int
|
||||
bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
|
||||
bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
||||
bus_dmamap_t *mapp)
|
||||
{
|
||||
struct sync_list *slist;
|
||||
void * vaddr;
|
||||
struct busdma_bufzone *bufzone;
|
||||
busdma_bufalloc_t ba;
|
||||
struct busdma_bufzone *bufzone;
|
||||
bus_dmamap_t map;
|
||||
int mflags;
|
||||
vm_memattr_t memattr;
|
||||
int mflags;
|
||||
|
||||
if (flags & BUS_DMA_NOWAIT)
|
||||
mflags = M_NOWAIT;
|
||||
else
|
||||
mflags = M_WAITOK;
|
||||
/*
|
||||
* If the tag's segments haven't been allocated yet we need to do it
|
||||
* now, because we can't sleep for resources at map load time.
|
||||
*/
|
||||
if (dmat->segments == NULL)
|
||||
dmat->segments = malloc(dmat->nsegments *
|
||||
sizeof(*dmat->segments), M_BUSDMA, mflags);
|
||||
if (flags & BUS_DMA_ZERO)
|
||||
mflags |= M_ZERO;
|
||||
|
||||
slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
|
||||
if (slist == NULL)
|
||||
return (ENOMEM);
|
||||
map = uma_zalloc_arg(dmamap_zone, dmat, mflags);
|
||||
*mapp = map = allocate_map(dmat, mflags);
|
||||
if (map == NULL) {
|
||||
free(slist, M_BUSDMA);
|
||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
|
||||
__func__, dmat, dmat->flags, ENOMEM);
|
||||
return (ENOMEM);
|
||||
}
|
||||
map->flags = DMAMAP_DMAMEM_ALLOC;
|
||||
|
||||
/* Choose a busdma buffer allocator based on memory type flags. */
|
||||
if (flags & BUS_DMA_COHERENT) {
|
||||
memattr = VM_MEMATTR_UNCACHEABLE;
|
||||
ba = coherent_allocator;
|
||||
@ -725,11 +656,6 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
|
||||
memattr = VM_MEMATTR_DEFAULT;
|
||||
ba = standard_allocator;
|
||||
}
|
||||
/* All buffers we allocate are cache-aligned. */
|
||||
map->flags |= DMAMAP_CACHE_ALIGNED;
|
||||
|
||||
if (flags & BUS_DMA_ZERO)
|
||||
mflags |= M_ZERO;
|
||||
|
||||
/*
|
||||
* Try to find a bufzone in the allocator that holds a cache of buffers
|
||||
@ -753,28 +679,28 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
|
||||
*/
|
||||
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
|
||||
vaddr = uma_zalloc(bufzone->umazone, mflags);
|
||||
*vaddr = uma_zalloc(bufzone->umazone, mflags);
|
||||
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
|
||||
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
|
||||
vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
||||
*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
||||
mflags, 0, dmat->lowaddr, memattr);
|
||||
} else {
|
||||
vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
|
||||
*vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
|
||||
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
|
||||
memattr);
|
||||
}
|
||||
if (vaddr == NULL) {
|
||||
free(slist, M_BUSDMA);
|
||||
uma_zfree(dmamap_zone, map);
|
||||
map = NULL;
|
||||
} else {
|
||||
map->slist = slist;
|
||||
map->sync_count = 0;
|
||||
if (*vaddr == NULL) {
|
||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
|
||||
__func__, dmat, dmat->flags, ENOMEM);
|
||||
free(map, M_BUSDMA);
|
||||
*mapp = NULL;
|
||||
return (ENOMEM);
|
||||
}
|
||||
*vaddrp = vaddr;
|
||||
*mapp = map;
|
||||
dmat->map_count++;
|
||||
|
||||
return (vaddr == NULL ? ENOMEM : 0);
|
||||
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
|
||||
__func__, dmat, dmat->flags, 0);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -792,9 +718,6 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
else
|
||||
ba = standard_allocator;
|
||||
|
||||
free(map->slist, M_BUSDMA);
|
||||
uma_zfree(dmamap_zone, map);
|
||||
|
||||
bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
|
||||
|
||||
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
||||
@ -802,6 +725,10 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
uma_zfree(bufzone->umazone, vaddr);
|
||||
else
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
|
||||
|
||||
dmat->map_count--;
|
||||
free(map, M_BUSDMA);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -962,7 +889,7 @@ _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
|
||||
int error;
|
||||
|
||||
if (segs == NULL)
|
||||
segs = dmat->segments;
|
||||
segs = map->segments;
|
||||
|
||||
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
|
||||
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
|
||||
@ -1046,7 +973,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
|
||||
int error = 0;
|
||||
|
||||
if (segs == NULL)
|
||||
segs = dmat->segments;
|
||||
segs = map->segments;
|
||||
if ((flags & BUS_DMA_LOAD_MBUF) != 0)
|
||||
map->flags |= DMAMAP_CACHE_ALIGNED;
|
||||
|
||||
@ -1147,7 +1074,7 @@ _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
{
|
||||
|
||||
if (segs == NULL)
|
||||
segs = dmat->segments;
|
||||
segs = map->segments;
|
||||
return (segs);
|
||||
}
|
||||
|
||||
@ -1158,12 +1085,22 @@ void
|
||||
_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
|
||||
{
|
||||
struct bounce_page *bpage;
|
||||
struct bounce_zone *bz;
|
||||
|
||||
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
|
||||
STAILQ_REMOVE_HEAD(&map->bpages, links);
|
||||
free_bounce_page(dmat, bpage);
|
||||
if ((bz = dmat->bounce_zone) != NULL) {
|
||||
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
|
||||
STAILQ_REMOVE_HEAD(&map->bpages, links);
|
||||
free_bounce_page(dmat, bpage);
|
||||
}
|
||||
|
||||
bz = dmat->bounce_zone;
|
||||
bz->free_bpages += map->pagesreserved;
|
||||
bz->reserved_bpages -= map->pagesreserved;
|
||||
map->pagesreserved = 0;
|
||||
map->pagesneeded = 0;
|
||||
}
|
||||
map->sync_count = 0;
|
||||
map->flags &= ~DMAMAP_MBUF;
|
||||
}
|
||||
|
||||
static void
|
||||
|
Loading…
Reference in New Issue
Block a user