From 85b46073242d4666e1c9037d52220422449f9584 Mon Sep 17 00:00:00 2001 From: John Baldwin Date: Wed, 5 Jan 2022 13:50:40 -0800 Subject: [PATCH] Deduplicate bus_dma bounce code. Move mostly duplicated code in various MD bus_dma backends to support bounce pages into sys/kern/subr_busdma_bounce.c. This file is currently #include'd into the backends rather than compiled standalone since it requires access to internal members of opaque bus_dma structures such as bus_dmamap_t and bus_dma_tag_t. Reviewed by: kib Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D33684 --- sys/arm/arm/busdma_machdep.c | 351 +-------------------- sys/arm64/arm64/busdma_bounce.c | 351 +-------------------- sys/kern/subr_busdma_bounce.c | 438 +++++++++++++++++++++++++++ sys/powerpc/powerpc/busdma_machdep.c | 350 +-------------------- sys/riscv/riscv/busdma_bounce.c | 353 +-------------------- sys/x86/x86/busdma_bounce.c | 365 +--------------------- 6 files changed, 496 insertions(+), 1712 deletions(-) create mode 100644 sys/kern/subr_busdma_bounce.c diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c index d2abb0d46dbd..758517323ff1 100644 --- a/sys/arm/arm/busdma_machdep.c +++ b/sys/arm/arm/busdma_machdep.c @@ -76,6 +76,7 @@ __FBSDID("$FreeBSD$"); #define BUS_DMA_COULD_BOUNCE (BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE) #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 +struct bounce_page; struct bounce_zone; struct bus_dma_tag { @@ -97,16 +98,6 @@ struct bus_dma_tag { struct bounce_zone *bounce_zone; }; -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - vm_page_t datapage; /* physical page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - struct sync_list { vm_offset_t vaddr; /* kva of client data */ bus_addr_t paddr; /* physical address */ @@ -114,27 +105,6 @@ struct sync_list { bus_size_t datacount; /* client data count */ }; -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; static uint32_t tags_total; static uint32_t maps_total; static uint32_t maps_dmamem; @@ -148,9 +118,6 @@ static counter_u64_t maploads_mbuf; static counter_u64_t maploads_physmem; #endif -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; -static void *busdma_ih; - SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, @@ -175,11 +142,9 @@ SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, &maploads_physmem, "Number of load operations on physical buffers"); #endif -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); struct bus_dmamap { - struct bp_list bpages; + STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; @@ -196,23 +161,10 @@ struct bus_dmamap { struct sync_list slist[]; }; -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; - -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); -static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int flags); static void dma_preread_safe(vm_offset_t va, vm_paddr_t pa, vm_size_t size); static void dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op); @@ -220,7 +172,14 @@ static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); -MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); + +#define dmat_alignment(dmat) ((dmat)->alignment) +#define dmat_flags(dmat) ((dmat)->flags) +#define dmat_lowaddr(dmat) ((dmat)->lowaddr) +#define dmat_lockfunc(dmat) ((dmat)->lockfunc) +#define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg) + +#include "../../kern/subr_busdma_bounce.c" static void busdma_init(void *dummy) @@ -975,31 +934,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map, } } -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - map->pagesneeded = 0; - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - /* * Add a single contiguous physical range to the segment list. */ @@ -1506,268 +1440,3 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) dma_dcache_sync(sl, op); } } - -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - - return (bz->sysctl_tree_top); -} - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->alignment <= bz->alignment) && - (dmat->lowaddr >= bz->lowaddr)) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->lowaddr; - bz->alignment = MAX(dmat->alignment, PAGE_SIZE); - bz->map_count = 0; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests (pages bounced)"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA, - M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, - M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); - if (bpage->vaddr == 0) { - free(bpage, M_BUSDMA); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= addr & PAGE_MASK; - bpage->busaddr |= addr & PAGE_MASK; - } - bpage->datavaddr = vaddr; - bpage->datapage = PHYS_TO_VM_PAGE(addr); - bpage->dataoffs = addr & PAGE_MASK; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - bool schedule_swi; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - schedule_swi = false; - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - bz->total_deferred++; - schedule_swi = true; - } - } - mtx_unlock(&bounce_lock); - if (schedule_swi) - swi_sched(busdma_ih, 0); -} - -static void -busdma_swi(void *dummy __unused) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, - map->callback_arg, BUS_DMA_WAITOK); - dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); -} - -static void -start_busdma_swi(void *dummy __unused) -{ - if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE, - &busdma_ih)) - panic("died while creating busdma swi ithread"); -} -SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi, - NULL); diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c index 701411ba5b74..0f17fdb9bffc 100644 --- a/sys/arm64/arm64/busdma_bounce.c +++ b/sys/arm64/arm64/busdma_bounce.c @@ -68,6 +68,7 @@ enum { BF_COHERENT = 0x10, }; +struct bounce_page; struct bounce_zone; struct bus_dma_tag { @@ -80,44 +81,8 @@ struct bus_dma_tag { struct bounce_zone *bounce_zone; }; -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - vm_page_t datapage; /* physical page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; -static void *busdma_ih; - static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); struct sync_list { vm_offset_t vaddr; /* kva of client data */ @@ -127,7 +92,7 @@ struct sync_list { }; struct bus_dmamap { - struct bp_list bpages; + STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; @@ -143,17 +108,6 @@ struct bus_dmamap { struct sync_list slist[]; }; -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; - -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int *pagesneeded); @@ -161,8 +115,16 @@ static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); -static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int flags); + +static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); + +#define dmat_alignment(dmat) ((dmat)->common.alignment) +#define dmat_flags(dmat) ((dmat)->common.flags) +#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr) +#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc) +#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg) + +#include "../../kern/subr_busdma_bounce.c" /* * Return true if the DMA should bounce because the start or end does not fall @@ -736,30 +698,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, } } -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - /* * Add a single contiguous physical range to the segment list. */ @@ -1211,271 +1149,6 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, } } -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - - return (bz->sysctl_tree_top); -} - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->common.alignment <= bz->alignment) && - (dmat->common.lowaddr >= bz->lowaddr)) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->common.lowaddr; - bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); - bz->map_count = 0; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, - M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, - M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); - if (bpage->vaddr == 0) { - free(bpage, M_DEVBUF); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= addr & PAGE_MASK; - bpage->busaddr |= addr & PAGE_MASK; - } - bpage->datavaddr = vaddr; - bpage->datapage = PHYS_TO_VM_PAGE(addr); - bpage->dataoffs = addr & PAGE_MASK; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - bool schedule_swi; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - schedule_swi = false; - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - bz->total_deferred++; - schedule_swi = true; - } - } - mtx_unlock(&bounce_lock); - if (schedule_swi) - swi_sched(busdma_ih, 0); -} - -static void -busdma_swi(void *dummy __unused) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, - map->callback, map->callback_arg, BUS_DMA_WAITOK); - (dmat->common.lockfunc)(dmat->common.lockfuncarg, - BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); -} - -static void -start_busdma_swi(void *dummy __unused) -{ - if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE, - &busdma_ih)) - panic("died while creating busdma swi ithread"); -} -SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi, - NULL); - struct bus_dma_impl bus_dma_bounce_impl = { .tag_create = bounce_bus_dma_tag_create, .tag_destroy = bounce_bus_dma_tag_destroy, diff --git a/sys/kern/subr_busdma_bounce.c b/sys/kern/subr_busdma_bounce.c new file mode 100644 index 000000000000..e7a387ffd71d --- /dev/null +++ b/sys/kern/subr_busdma_bounce.c @@ -0,0 +1,438 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 1997, 1998 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Common code for managing bounce pages for bus_dma backends. As + * this code currently assumes it can access internal members of + * opaque types like bus_dma_tag_t and bus_dmamap it is #include'd in + * backends rather than being compiled standalone. + * + * Prerequisites: + * + * - M_BUSDMA malloc type + * - struct bus_dmamap + * - hw_busdma SYSCTL_NODE + * - macros to access the following fields of bus_dma_tag_t: + * - dmat_alignment() + * - dmat_flags() + * - dmat_lowaddr() + * - dmat_lockfunc() + * - dmat_lockarg() + */ + +struct bounce_page { + vm_offset_t vaddr; /* kva of bounce buffer */ + bus_addr_t busaddr; /* Physical address */ + vm_offset_t datavaddr; /* kva of client data */ +#if defined(__amd64__) || defined(__i386__) + vm_page_t datapage[2]; /* physical page(s) of client data */ +#else + vm_page_t datapage; /* physical page of client data */ +#endif + vm_offset_t dataoffs; /* page offset of client data */ + bus_size_t datacount; /* client data count */ + STAILQ_ENTRY(bounce_page) links; +}; + +struct bounce_zone { + STAILQ_ENTRY(bounce_zone) links; + STAILQ_HEAD(, bounce_page) bounce_page_list; + int total_bpages; + int free_bpages; + int reserved_bpages; + int active_bpages; + int total_bounced; + int total_deferred; + int map_count; +#ifdef dmat_domain + int domain; +#endif + bus_size_t alignment; + bus_addr_t lowaddr; + char zoneid[8]; + char lowaddrid[20]; + struct sysctl_ctx_list sysctl_tree; + struct sysctl_oid *sysctl_tree_top; +}; + +static struct mtx bounce_lock; +static int total_bpages; +static int busdma_zonecount; + +static STAILQ_HEAD(, bounce_zone) bounce_zone_list; +static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; +static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; +static void *busdma_ih; + +static MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); + +SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, + "Total bounce pages"); + +static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int commit); + +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + + /* Reserve Necessary Bounce Pages */ + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + map->pagesneeded = 0; + mtx_unlock(&bounce_lock); + return (ENOMEM); + } + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } + } + mtx_unlock(&bounce_lock); + + return (0); +} + +static void +init_bounce_pages(void *dummy __unused) +{ + + total_bpages = 0; + STAILQ_INIT(&bounce_zone_list); + STAILQ_INIT(&bounce_map_waitinglist); + STAILQ_INIT(&bounce_map_callbacklist); + mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); +} +SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); + +static struct sysctl_ctx_list * +busdma_sysctl_tree(struct bounce_zone *bz) +{ + + return (&bz->sysctl_tree); +} + +static struct sysctl_oid * +busdma_sysctl_tree_top(struct bounce_zone *bz) +{ + + return (bz->sysctl_tree_top); +} + +static int +alloc_bounce_zone(bus_dma_tag_t dmat) +{ + struct bounce_zone *bz; + + /* Check to see if we already have a suitable zone */ + STAILQ_FOREACH(bz, &bounce_zone_list, links) { + if ((dmat_alignment(dmat) <= bz->alignment) && +#ifdef dmat_domain + dmat_domain(dmat) == bz->domain && +#endif + (dmat_lowaddr(dmat) >= bz->lowaddr)) { + dmat->bounce_zone = bz; + return (0); + } + } + + if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, + M_NOWAIT | M_ZERO)) == NULL) + return (ENOMEM); + + STAILQ_INIT(&bz->bounce_page_list); + bz->free_bpages = 0; + bz->reserved_bpages = 0; + bz->active_bpages = 0; + bz->lowaddr = dmat_lowaddr(dmat); + bz->alignment = MAX(dmat_alignment(dmat), PAGE_SIZE); + bz->map_count = 0; +#ifdef dmat_domain + bz->domain = dmat_domain(dmat); +#endif + snprintf(bz->zoneid, sizeof(bz->zoneid), "zone%d", busdma_zonecount); + busdma_zonecount++; + snprintf(bz->lowaddrid, sizeof(bz->lowaddrid), "%#jx", + (uintmax_t)bz->lowaddr); + STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); + dmat->bounce_zone = bz; + + sysctl_ctx_init(&bz->sysctl_tree); + bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, + SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, + CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); + if (bz->sysctl_tree_top == NULL) { + sysctl_ctx_free(&bz->sysctl_tree); + return (0); /* XXX error code? */ + } + + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, + "Total bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, + "Free bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, + "Reserved bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, + "Active bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, + "Total bounce requests (pages bounced)"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, + "Total bounce requests that were deferred"); + SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); + SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "alignment", CTLFLAG_RD, &bz->alignment, ""); +#ifdef dmat_domain + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "domain", CTLFLAG_RD, &bz->domain, 0, + "memory domain"); +#endif + + return (0); +} + +static int +alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) +{ + struct bounce_zone *bz; + int count; + + bz = dmat->bounce_zone; + count = 0; + while (numpages > 0) { + struct bounce_page *bpage; + +#ifdef dmat_domain + bpage = malloc_domainset(sizeof(*bpage), M_BUSDMA, + DOMAINSET_PREF(bz->domain), M_NOWAIT | M_ZERO); +#else + bpage = malloc(sizeof(*bpage), M_BUSDMA, M_NOWAIT | M_ZERO); +#endif + + if (bpage == NULL) + break; +#ifdef dmat_domain + bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE, + M_BOUNCE, DOMAINSET_PREF(bz->domain), M_NOWAIT, + 0ul, bz->lowaddr, PAGE_SIZE, 0); +#else + bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, + M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); +#endif + if (bpage->vaddr == 0) { + free(bpage, M_BUSDMA); + break; + } + bpage->busaddr = pmap_kextract(bpage->vaddr); + mtx_lock(&bounce_lock); + STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); + total_bpages++; + bz->total_bpages++; + bz->free_bpages++; + mtx_unlock(&bounce_lock); + count++; + numpages--; + } + return (count); +} + +static int +reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) +{ + struct bounce_zone *bz; + int pages; + + mtx_assert(&bounce_lock, MA_OWNED); + bz = dmat->bounce_zone; + pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); + if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) + return (map->pagesneeded - (map->pagesreserved + pages)); + bz->free_bpages -= pages; + bz->reserved_bpages += pages; + map->pagesreserved += pages; + pages = map->pagesneeded - map->pagesreserved; + + return (pages); +} + +#if defined(__amd64__) || defined(__i386__) +static bus_addr_t +add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, + vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size) +#else +static bus_addr_t +add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, + bus_addr_t addr, bus_size_t size) +#endif +{ + struct bounce_zone *bz; + struct bounce_page *bpage; + + KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); + KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); +#if defined(__amd64__) || defined(__i386__) + KASSERT(map != &nobounce_dmamap, ("add_bounce_page: bad map %p", map)); +#endif +#ifdef __riscv + KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0, + ("add_bounce_page: bad map %p", map)); +#endif + + bz = dmat->bounce_zone; + if (map->pagesneeded == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesneeded--; + + if (map->pagesreserved == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesreserved--; + + mtx_lock(&bounce_lock); + bpage = STAILQ_FIRST(&bz->bounce_page_list); + if (bpage == NULL) + panic("add_bounce_page: free page list is empty"); + + STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); + bz->reserved_bpages--; + bz->active_bpages++; + mtx_unlock(&bounce_lock); + + if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) { + /* Page offset needs to be preserved. */ +#if defined(__amd64__) || defined(__i386__) + bpage->vaddr |= addr1 & PAGE_MASK; + bpage->busaddr |= addr1 & PAGE_MASK; + KASSERT(addr2 == 0, + ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET")); +#else + bpage->vaddr |= addr & PAGE_MASK; + bpage->busaddr |= addr & PAGE_MASK; +#endif + } + bpage->datavaddr = vaddr; +#if defined(__amd64__) || defined(__i386__) + bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1); + KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned")); + bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2); + bpage->dataoffs = addr1 & PAGE_MASK; +#else + bpage->datapage = PHYS_TO_VM_PAGE(addr); + bpage->dataoffs = addr & PAGE_MASK; +#endif + bpage->datacount = size; + STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); + return (bpage->busaddr); +} + +static void +free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) +{ + struct bus_dmamap *map; + struct bounce_zone *bz; + bool schedule_swi; + + bz = dmat->bounce_zone; + bpage->datavaddr = 0; + bpage->datacount = 0; + if (dmat_flags(dmat) & BUS_DMA_KEEP_PG_OFFSET) { + /* + * Reset the bounce page to start at offset 0. Other uses + * of this bounce page may need to store a full page of + * data and/or assume it starts on a page boundary. + */ + bpage->vaddr &= ~PAGE_MASK; + bpage->busaddr &= ~PAGE_MASK; + } + + schedule_swi = false; + mtx_lock(&bounce_lock); + STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); + bz->free_bpages++; + bz->active_bpages--; + if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { + if (reserve_bounce_pages(map->dmat, map, 1) == 0) { + STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); + STAILQ_INSERT_TAIL(&bounce_map_callbacklist, + map, links); + bz->total_deferred++; + schedule_swi = true; + } + } + mtx_unlock(&bounce_lock); + if (schedule_swi) + swi_sched(busdma_ih, 0); +} + +static void +busdma_swi(void *dummy __unused) +{ + bus_dma_tag_t dmat; + struct bus_dmamap *map; + + mtx_lock(&bounce_lock); + while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { + STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); + mtx_unlock(&bounce_lock); + dmat = map->dmat; + dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), BUS_DMA_LOCK); + bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, + map->callback_arg, BUS_DMA_WAITOK); + dmat_lockfunc(dmat)(dmat_lockfuncarg(dmat), BUS_DMA_UNLOCK); + mtx_lock(&bounce_lock); + } + mtx_unlock(&bounce_lock); +} + +static void +start_busdma_swi(void *dummy __unused) +{ + if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE, + &busdma_ih)) + panic("died while creating busdma swi ithread"); +} +SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi, + NULL); diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c index 95eb98a5cbab..ea3ab3dc7315 100644 --- a/sys/powerpc/powerpc/busdma_machdep.c +++ b/sys/powerpc/powerpc/busdma_machdep.c @@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$"); #define MAX_BPAGES MIN(8192, physmem/40) +struct bounce_page; struct bounce_zone; struct bus_dma_tag { @@ -85,47 +86,11 @@ struct bus_dma_tag { void *iommu_cookie; }; -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - vm_page_t datapage; /* physical page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; -static void *busdma_ih; - static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); struct bus_dmamap { - struct bp_list bpages; + STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; @@ -138,20 +103,18 @@ struct bus_dmamap { int contigalloc; }; -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; +static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, - bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); +#define dmat_alignment(dmat) ((dmat)->alignment) +#define dmat_flags(dmat) ((dmat)->flags) +#define dmat_lowaddr(dmat) ((dmat)->lowaddr) +#define dmat_lockfunc(dmat) ((dmat)->lockfunc) +#define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg) + +#include "../../kern/subr_busdma_bounce.c" + /* * Return true if a match is made. * @@ -656,31 +619,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, } } -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, - map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - /* * Add a single contiguous physical range to the segment list. */ @@ -971,272 +909,6 @@ bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) powerpc_sync(); } -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - return (bz->sysctl_tree_top); -} - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->alignment <= bz->alignment) - && (dmat->lowaddr >= bz->lowaddr)) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->lowaddr; - bz->alignment = MAX(dmat->alignment, PAGE_SIZE); - bz->map_count = 0; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, - M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, - M_NOWAIT, 0ul, - bz->lowaddr, - PAGE_SIZE, - 0); - if (bpage->vaddr == 0) { - free(bpage, M_DEVBUF); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= addr & PAGE_MASK; - bpage->busaddr |= addr & PAGE_MASK; - } - bpage->datavaddr = vaddr; - bpage->datapage = PHYS_TO_VM_PAGE(addr); - bpage->dataoffs = addr & PAGE_MASK; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - bool schedule_swi; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - schedule_swi = false; - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - bz->total_deferred++; - schedule_swi = true; - } - } - mtx_unlock(&bounce_lock); - if (schedule_swi) - swi_sched(busdma_ih, 0); -} - -static void -busdma_swi(void *dummy __unused) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, - map->callback, map->callback_arg, - BUS_DMA_WAITOK); - (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); -} - -static void -start_busdma_swi(void *dummy __unused) -{ - if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE, - &busdma_ih)) - panic("died while creating busdma swi ithread"); -} -SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi, - NULL); - int bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie) { diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c index ffc05e20517f..c3452c605390 100644 --- a/sys/riscv/riscv/busdma_bounce.c +++ b/sys/riscv/riscv/busdma_bounce.c @@ -68,6 +68,7 @@ enum { BF_COHERENT = 0x10, }; +struct bounce_page; struct bounce_zone; struct bus_dma_tag { @@ -78,44 +79,8 @@ struct bus_dma_tag { struct bounce_zone *bounce_zone; }; -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - vm_page_t datapage; /* physical page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; -static void *busdma_ih; - static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); struct sync_list { vm_offset_t vaddr; /* kva of client data */ @@ -125,7 +90,7 @@ struct sync_list { }; struct bus_dmamap { - struct bp_list bpages; + STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; @@ -140,24 +105,21 @@ struct bus_dmamap { struct sync_list slist[]; }; -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; - -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); -static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int flags); + +static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); + +#define dmat_alignment(dmat) ((dmat)->common.alignment) +#define dmat_flags(dmat) ((dmat)->common.flags) +#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr) +#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc) +#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg) + +#include "../../kern/subr_busdma_bounce.c" /* * Allocate a device specific dma_tag. @@ -605,30 +567,6 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, } } -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - /* * Add a single contiguous physical range to the segment list. */ @@ -1052,273 +990,6 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, } } -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - - return (bz->sysctl_tree_top); -} - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->common.alignment <= bz->alignment) && - (dmat->common.lowaddr >= bz->lowaddr)) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->common.lowaddr; - bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); - bz->map_count = 0; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, - M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, - M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); - if (bpage->vaddr == 0) { - free(bpage, M_DEVBUF); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - KASSERT((map->flags & DMAMAP_COULD_BOUNCE) != 0, - ("add_bounce_page: bad map %p", map)); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= addr & PAGE_MASK; - bpage->busaddr |= addr & PAGE_MASK; - } - bpage->datavaddr = vaddr; - bpage->datapage = PHYS_TO_VM_PAGE(addr); - bpage->dataoffs = addr & PAGE_MASK; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - bool schedule_swi; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - schedule_swi = false; - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - bz->total_deferred++; - schedule_swi = true; - } - } - mtx_unlock(&bounce_lock); - if (schedule_swi) - swi_sched(busdma_ih, 0); -} - -static void -busdma_swi(void *dummy __unused) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, - map->callback, map->callback_arg, BUS_DMA_WAITOK); - (dmat->common.lockfunc)(dmat->common.lockfuncarg, - BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); -} - -static void -start_busdma_swi(void *dummy __unused) -{ - if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE, - &busdma_ih)) - panic("died while creating busdma swi ithread"); -} -SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi, - NULL); - struct bus_dma_impl bus_dma_bounce_impl = { .tag_create = bounce_bus_dma_tag_create, .tag_destroy = bounce_bus_dma_tag_destroy, diff --git a/sys/x86/x86/busdma_bounce.c b/sys/x86/x86/busdma_bounce.c index 0d17869f8e17..258441cf2070 100644 --- a/sys/x86/x86/busdma_bounce.c +++ b/sys/x86/x86/busdma_bounce.c @@ -70,6 +70,7 @@ enum { BUS_DMA_FORCE_MAP = 0x08, }; +struct bounce_page; struct bounce_zone; struct bus_dma_tag { @@ -80,48 +81,11 @@ struct bus_dma_tag { struct bounce_zone *bounce_zone; }; -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - vm_page_t datapage[2]; /* physical page(s) of client data */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - int domain; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; -static void *busdma_ih; - static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Busdma parameters"); -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); struct bus_dmamap { - struct bp_list bpages; + STAILQ_HEAD(, bounce_page) bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; @@ -134,26 +98,25 @@ struct bus_dmamap { #endif }; -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static struct bus_dmamap nobounce_dmamap; -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen, int *pagesneeded); static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags); static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags); -static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int flags); + +static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); + +#define dmat_alignment(dmat) ((dmat)->common.alignment) +#define dmat_domain(dmat) ((dmat)->common.domain) +#define dmat_flags(dmat) ((dmat)->common.flags) +#define dmat_lowaddr(dmat) ((dmat)->common.lowaddr) +#define dmat_lockfunc(dmat) ((dmat)->common.lockfunc) +#define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg) + +#include "../../kern/subr_busdma_bounce.c" static int bounce_bus_dma_zone_setup(bus_dma_tag_t dmat) @@ -660,30 +623,6 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, } } -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - /* * Add a single contiguous physical range to the segment list. */ @@ -1094,284 +1033,6 @@ bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem) } #endif -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - - return (bz->sysctl_tree_top); -} - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if (dmat->common.alignment <= bz->alignment && - dmat->common.lowaddr >= bz->lowaddr && - dmat->common.domain == bz->domain) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->common.lowaddr; - bz->alignment = MAX(dmat->common.alignment, PAGE_SIZE); - bz->map_count = 0; - bz->domain = dmat->common.domain; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD | CTLFLAG_MPSAFE, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "domain", CTLFLAG_RD, &bz->domain, 0, - "memory domain"); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = malloc_domainset(sizeof(*bpage), M_DEVBUF, - DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc_domainset(PAGE_SIZE, - M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT, - 0ul, bz->lowaddr, PAGE_SIZE, 0); - if (bpage->vaddr == 0) { - free(bpage, M_DEVBUF); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - KASSERT(map != NULL && map != &nobounce_dmamap, - ("add_bounce_page: bad map %p", map)); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= addr1 & PAGE_MASK; - bpage->busaddr |= addr1 & PAGE_MASK; - KASSERT(addr2 == 0, - ("Trying to bounce multiple pages with BUS_DMA_KEEP_PG_OFFSET")); - } - bpage->datavaddr = vaddr; - bpage->datapage[0] = PHYS_TO_VM_PAGE(addr1); - KASSERT((addr2 & PAGE_MASK) == 0, ("Second page is not aligned")); - bpage->datapage[1] = PHYS_TO_VM_PAGE(addr2); - bpage->dataoffs = addr1 & PAGE_MASK; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - bool schedule_swi; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - schedule_swi = false; - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - bz->total_deferred++; - schedule_swi = true; - } - } - mtx_unlock(&bounce_lock); - if (schedule_swi) - swi_sched(busdma_ih, 0); -} - -static void -busdma_swi(void *dummy __unused) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - (dmat->common.lockfunc)(dmat->common.lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, - map->callback, map->callback_arg, BUS_DMA_WAITOK); - (dmat->common.lockfunc)(dmat->common.lockfuncarg, - BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); -} - -static void -start_busdma_swi(void *dummy __unused) -{ - if (swi_add(NULL, "busdma", busdma_swi, NULL, SWI_BUSDMA, INTR_MPSAFE, - &busdma_ih)) - panic("died while creating busdma swi ithread"); -} -SYSINIT(start_busdma_swi, SI_SUB_SOFTINTR, SI_ORDER_ANY, start_busdma_swi, - NULL); - struct bus_dma_impl bus_dma_bounce_impl = { .tag_create = bounce_bus_dma_tag_create, .tag_destroy = bounce_bus_dma_tag_destroy,