Add support for NUMA domains to bus dma tags. This causes all memory

allocated with a tag to come from the specified domain if it meets the
other constraints provided by the tag.  Automatically create a tag at
the root of each bus specifying the domain local to that bus if
available.

Reviewed by:	jhb, kib
Tested by:	pho
Sponsored by:	Netflix, Dell/EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D13545
This commit is contained in:
Jeff Roberson 2018-01-12 23:34:16 +00:00
parent ab3185d15e
commit 6f4acaf4c9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=327901
13 changed files with 223 additions and 72 deletions

View File

@ -499,6 +499,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -562,6 +562,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -223,3 +223,9 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
return (tc->impl->tag_destroy(dmat));
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}

View File

@ -60,6 +60,7 @@ ACPI_MODULE_NAME("PCI_ACPI")
struct acpi_hpcib_softc {
device_t ap_dev;
ACPI_HANDLE ap_handle;
bus_dma_tag_t ap_dma_tag;
int ap_flags;
uint32_t ap_osc_ctl;
@ -108,6 +109,7 @@ static int acpi_pcib_acpi_release_resource(device_t dev,
#endif
static int acpi_pcib_request_feature(device_t pcib, device_t dev,
enum pci_feature feature);
static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child);
static device_method_t acpi_pcib_acpi_methods[] = {
/* Device interface */
@ -136,6 +138,7 @@ static device_method_t acpi_pcib_acpi_methods[] = {
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus),
DEVMETHOD(bus_get_dma_tag, acpi_pcib_get_dma_tag),
/* pcib interface */
DEVMETHOD(pcib_maxslots, pcib_maxslots),
@ -366,6 +369,7 @@ acpi_pcib_acpi_attach(device_t dev)
rman_res_t start;
int rid;
#endif
int error, domain;
uint8_t busno;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@ -537,15 +541,33 @@ acpi_pcib_acpi_attach(device_t dev)
acpi_pcib_fetch_prt(dev, &sc->ap_prt);
error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->ap_dma_tag);
if (error != 0)
goto errout;
error = bus_get_domain(dev, &domain);
if (error == 0)
error = bus_dma_tag_set_domain(sc->ap_dma_tag, domain);
/* Don't fail to attach if the domain can't be queried or set. */
error = 0;
bus_generic_probe(dev);
if (device_add_child(dev, "pci", -1) == NULL) {
device_printf(device_get_parent(dev), "couldn't attach pci bus\n");
#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
pcib_host_res_free(dev, &sc->ap_host_res);
#endif
return (ENXIO);
bus_dma_tag_destroy(sc->ap_dma_tag);
sc->ap_dma_tag = NULL;
error = ENXIO;
goto errout;
}
return (bus_generic_attach(dev));
errout:
device_printf(device_get_parent(dev), "couldn't attach pci bus\n");
#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
pcib_host_res_free(dev, &sc->ap_host_res);
#endif
return (error);
}
/*
@ -753,3 +775,13 @@ acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature)
return (acpi_pcib_osc(sc, osc_ctl));
}
static bus_dma_tag_t
acpi_pcib_get_dma_tag(device_t bus, device_t child)
{
struct acpi_hpcib_softc *sc;
sc = device_get_softc(bus);
return (sc->ap_dma_tag);
}

View File

@ -480,6 +480,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -340,6 +340,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -176,6 +176,14 @@ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat);
/*
* Set the memory domain to be used for allocations.
*
* Automatic for PCI devices. Must be set prior to creating maps or
* allocating memory.
*/
int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain);
int bus_dma_tag_destroy(bus_dma_tag_t dmat);
/*

View File

@ -198,20 +198,32 @@ vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
(uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
}
boolean_t
vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
int
vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high)
{
struct vm_phys_seg *s;
int idx;
#ifdef VM_NUMA_ALLOC
domainset_t mask;
int i;
while ((idx = ffsl(mask)) != 0) {
idx--; /* ffsl counts from 1 */
mask &= ~(1UL << idx);
s = &vm_phys_segs[idx];
if (low < s->end && high > s->start)
return (TRUE);
}
return (FALSE);
if (vm_ndomains == 1 || mem_affinity == NULL)
return (0);
DOMAINSET_ZERO(&mask);
/*
* Check for any memory that overlaps low, high.
*/
for (i = 0; mem_affinity[i].end != 0; i++)
if (mem_affinity[i].start <= high &&
mem_affinity[i].end >= low)
DOMAINSET_SET(mem_affinity[i].domain, &mask);
if (prefer != -1 && DOMAINSET_ISSET(prefer, &mask))
return (prefer);
if (DOMAINSET_EMPTY(&mask))
panic("vm_phys_domain_match: Impossible constraint");
return (DOMAINSET_FFS(&mask) - 1);
#else
return (0);
#endif
}
/*

View File

@ -77,7 +77,7 @@ vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low,
vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool,
int order);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
boolean_t vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr);
void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);

View File

@ -50,6 +50,7 @@ struct bus_dma_tag_common {
bus_dma_lock_t *lockfunc;
void *lockfuncarg;
int ref_count;
int domain;
};
struct bus_dma_impl {
@ -60,6 +61,7 @@ struct bus_dma_impl {
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat);
int (*tag_destroy)(bus_dma_tag_t dmat);
int (*tag_set_domain)(bus_dma_tag_t);
int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,

View File

@ -325,6 +325,13 @@ dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
static int
dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
{
return (0);
}
static int
dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
{
@ -345,7 +352,7 @@ dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
1) {
if (dmat == &dmat->ctx->ctx_tag)
dmar_free_ctx(dmat->ctx);
free(dmat->segments, M_DMAR_DMAMAP);
free_domain(dmat->segments, M_DMAR_DMAMAP);
free(dmat, M_DEVBUF);
dmat = parent;
} else
@ -366,16 +373,18 @@ dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "%s", __func__);
tag = (struct bus_dma_tag_dmar *)dmat;
map = malloc(sizeof(*map), M_DMAR_DMAMAP, M_NOWAIT | M_ZERO);
map = malloc_domain(sizeof(*map), M_DMAR_DMAMAP,
tag->common.domain, M_NOWAIT | M_ZERO);
if (map == NULL) {
*mapp = NULL;
return (ENOMEM);
}
if (tag->segments == NULL) {
tag->segments = malloc(sizeof(bus_dma_segment_t) *
tag->common.nsegments, M_DMAR_DMAMAP, M_NOWAIT);
tag->segments = malloc_domain(sizeof(bus_dma_segment_t) *
tag->common.nsegments, M_DMAR_DMAMAP,
tag->common.domain, M_NOWAIT);
if (tag->segments == NULL) {
free(map, M_DMAR_DMAMAP);
free_domain(map, M_DMAR_DMAMAP);
*mapp = NULL;
return (ENOMEM);
}
@ -407,7 +416,7 @@ dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
return (EBUSY);
}
DMAR_DOMAIN_UNLOCK(domain);
free(map, M_DMAR_DMAMAP);
free_domain(map, M_DMAR_DMAMAP);
}
tag->map_count--;
return (0);
@ -438,10 +447,11 @@ dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
if (tag->common.maxsize < PAGE_SIZE &&
tag->common.alignment <= tag->common.maxsize &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc(tag->common.maxsize, M_DEVBUF, mflags);
*vaddr = malloc_domain(tag->common.maxsize, M_DEVBUF,
tag->common.domain, mflags);
map->flags |= BUS_DMAMAP_DMAR_MALLOC;
} else {
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
*vaddr = (void *)kmem_alloc_attr_domain(tag->common.domain,
tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR,
attr);
map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
@ -464,7 +474,7 @@ dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
map = (struct bus_dmamap_dmar *)map1;
if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
free(vaddr, M_DEVBUF);
free_domain(vaddr, M_DEVBUF);
map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
} else {
KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
@ -832,6 +842,7 @@ dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
struct bus_dma_impl bus_dma_dmar_impl = {
.tag_create = dmar_bus_dma_tag_create,
.tag_destroy = dmar_bus_dma_tag_destroy,
.tag_set_domain = dmar_bus_dma_tag_set_domain,
.map_create = dmar_bus_dmamap_create,
.map_destroy = dmar_bus_dmamap_destroy,
.mem_alloc = dmar_bus_dmamem_alloc,
@ -842,7 +853,7 @@ struct bus_dma_impl bus_dma_dmar_impl = {
.map_waitok = dmar_bus_dmamap_waitok,
.map_complete = dmar_bus_dmamap_complete,
.map_unload = dmar_bus_dmamap_unload,
.map_sync = dmar_bus_dmamap_sync
.map_sync = dmar_bus_dmamap_sync,
};
static void

View File

@ -99,6 +99,7 @@ struct bounce_zone {
int total_bounced;
int total_deferred;
int map_count;
int domain;
bus_size_t alignment;
bus_addr_t lowaddr;
char zoneid[8];
@ -150,6 +151,32 @@ static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int flags);
static int
bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
{
struct bounce_zone *bz;
int error;
/* Must bounce */
if ((error = alloc_bounce_zone(dmat)) != 0)
return (error);
bz = dmat->bounce_zone;
if (ptoa(bz->total_bpages) < dmat->common.maxsize) {
int pages;
pages = atop(dmat->common.maxsize) - bz->total_bpages;
/* Add pages to our bounce pool */
if (alloc_bounce_pages(dmat, pages) < pages)
return (ENOMEM);
}
/* Performed initial allocation */
dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
return (0);
}
/*
* Allocate a device specific dma_tag.
*/
@ -184,28 +211,9 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
if (((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) &&
(flags & BUS_DMA_ALLOCNOW) != 0) {
struct bounce_zone *bz;
/* Must bounce */
if ((error = alloc_bounce_zone(newtag)) != 0) {
free(newtag, M_DEVBUF);
return (error);
}
bz = newtag->bounce_zone;
if (ptoa(bz->total_bpages) < maxsize) {
int pages;
pages = atop(maxsize) - bz->total_bpages;
/* Add pages to our bounce pool */
if (alloc_bounce_pages(newtag, pages) < pages)
error = ENOMEM;
}
/* Performed initial allocation */
newtag->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
} else
(flags & BUS_DMA_ALLOCNOW) != 0)
error = bounce_bus_dma_zone_setup(newtag);
else
error = 0;
if (error != 0)
@ -218,6 +226,23 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
/*
* Update the domain for the tag. We may need to reallocate the zone and
* bounce pages.
*/
static int
bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
{
KASSERT(dmat->map_count == 0,
("bounce_bus_dma_tag_set_domain: Domain set after use.\n"));
if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 ||
dmat->bounce_zone == NULL)
return (0);
dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP;
return (bounce_bus_dma_zone_setup(dmat));
}
static int
bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
@ -237,7 +262,7 @@ bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
atomic_subtract_int(&dmat->common.ref_count, 1);
if (dmat->common.ref_count == 0) {
if (dmat->segments != NULL)
free(dmat->segments, M_DEVBUF);
free_domain(dmat->segments, M_DEVBUF);
free(dmat, M_DEVBUF);
/*
* Last reference count, so
@ -269,9 +294,9 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
error = 0;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc(
dmat->segments = (bus_dma_segment_t *)malloc_domain(
sizeof(bus_dma_segment_t) * dmat->common.nsegments,
M_DEVBUF, M_NOWAIT);
M_DEVBUF, dmat->common.domain, M_NOWAIT);
if (dmat->segments == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
@ -292,8 +317,8 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
bz = dmat->bounce_zone;
*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
M_NOWAIT | M_ZERO);
*mapp = (bus_dmamap_t)malloc_domain(sizeof(**mapp), M_DEVBUF,
dmat->common.domain, M_NOWAIT | M_ZERO);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
@ -355,7 +380,7 @@ bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
}
if (dmat->bounce_zone)
dmat->bounce_zone->map_count--;
free(map, M_DEVBUF);
free_domain(map, M_DEVBUF);
}
dmat->map_count--;
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
@ -386,9 +411,9 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*mapp = NULL;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc(
dmat->segments = (bus_dma_segment_t *)malloc_domain(
sizeof(bus_dma_segment_t) * dmat->common.nsegments,
M_DEVBUF, mflags);
M_DEVBUF, dmat->common.domain, mflags);
if (dmat->segments == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
@ -427,18 +452,19 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
(dmat->common.alignment <= dmat->common.maxsize) &&
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
*vaddr = malloc_domain(dmat->common.maxsize, M_DEVBUF,
dmat->common.domain, mflags);
} else if (dmat->common.nsegments >=
howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
dmat->common.alignment <= PAGE_SIZE &&
(dmat->common.boundary % PAGE_SIZE) == 0) {
/* Page-based multi-segment allocations allowed */
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
*vaddr = (void *)kmem_alloc_attr_domain(dmat->common.domain,
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
attr);
dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
} else {
*vaddr = (void *)kmem_alloc_contig(kernel_arena,
*vaddr = (void *)kmem_alloc_contig_domain(dmat->common.domain,
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
dmat->common.boundary, attr);
@ -471,7 +497,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
if (map != NULL)
panic("bus_dmamem_free: Invalid map freed\n");
if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
free(vaddr, M_DEVBUF);
free_domain(vaddr, M_DEVBUF);
else
kmem_free(kernel_arena, (vm_offset_t)vaddr,
dmat->common.maxsize);
@ -1041,7 +1067,8 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
/* Check to see if we already have a suitable zone */
STAILQ_FOREACH(bz, &bounce_zone_list, links) {
if ((dmat->common.alignment <= bz->alignment) &&
(dmat->common.lowaddr >= bz->lowaddr)) {
(dmat->common.lowaddr >= bz->lowaddr) &&
(dmat->common.domain == bz->domain)) {
dmat->bounce_zone = bz;
return (0);
}
@ -1058,6 +1085,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
bz->lowaddr = dmat->common.lowaddr;
bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE);
bz->map_count = 0;
bz->domain = dmat->common.domain;
snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
busdma_zonecount++;
snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
@ -1103,6 +1131,10 @@ alloc_bounce_zone(bus_dma_tag_t dmat)
SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"alignment", CTLFLAG_RD, &bz->alignment, "");
SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
"domain", CTLFLAG_RD, &bz->domain, 0,
"memory domain");
return (0);
}
@ -1118,18 +1150,16 @@ alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
while (numpages > 0) {
struct bounce_page *bpage;
bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
M_NOWAIT | M_ZERO);
bpage = (struct bounce_page *)malloc_domain(sizeof(*bpage),
M_DEVBUF, dmat->common.domain, M_NOWAIT | M_ZERO);
if (bpage == NULL)
break;
bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
M_NOWAIT, 0ul,
bz->lowaddr,
PAGE_SIZE,
0);
bpage->vaddr = (vm_offset_t)contigmalloc_domain(PAGE_SIZE,
M_DEVBUF, dmat->common.domain, M_NOWAIT, 0ul,
bz->lowaddr, PAGE_SIZE, 0);
if (bpage->vaddr == 0) {
free(bpage, M_DEVBUF);
free_domain(bpage, M_DEVBUF);
break;
}
bpage->busaddr = pmap_kextract(bpage->vaddr);
@ -1271,6 +1301,7 @@ busdma_swi(void)
struct bus_dma_impl bus_dma_bounce_impl = {
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,
.tag_set_domain = bounce_bus_dma_tag_set_domain,
.map_create = bounce_bus_dmamap_create,
.map_destroy = bounce_bus_dmamap_destroy,
.mem_alloc = bounce_bus_dmamem_alloc,
@ -1281,5 +1312,5 @@ struct bus_dma_impl bus_dma_bounce_impl = {
.map_waitok = bounce_bus_dmamap_waitok,
.map_complete = bounce_bus_dmamap_complete,
.map_unload = bounce_bus_dmamap_unload,
.map_sync = bounce_bus_dmamap_sync
.map_sync = bounce_bus_dmamap_sync,
};

View File

@ -43,8 +43,12 @@ __FBSDID("$FreeBSD$");
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/uio.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/pmap.h>
#include <machine/bus.h>
#include <x86/include/busdma_impl.h>
@ -180,12 +184,29 @@ common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
common->filterarg = parent->filterarg;
common->parent = parent->parent;
}
common->domain = parent->domain;
atomic_add_int(&parent->ref_count, 1);
}
common->domain = vm_phys_domain_match(common->domain, 0ul,
common->lowaddr);
*dmat = common;
return (0);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
struct bus_dma_tag_common *tc;
tc = (struct bus_dma_tag_common *)dmat;
domain = vm_phys_domain_match(domain, 0ul, tc->lowaddr);
/* Only call the callback if it changes. */
if (domain == tc->domain)
return (0);
tc->domain = domain;
return (tc->impl->tag_set_domain(dmat));
}
/*
* Allocate a device specific dma_tag.
*/