Allow loading the same DMA address multiple times without any prior

unload for the LinuxKPI.

Reviewed by:	kib, zeising
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D20181
This commit is contained in:
Tycho Nightingale 2019-05-16 17:41:16 +00:00
parent f4ab98c597
commit b961c0f244
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=347836
10 changed files with 155 additions and 35 deletions

View File

@ -152,6 +152,8 @@ static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
bus_size_t buflen, int *pagesneeded);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
pmap_t pmap, void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
@ -271,6 +273,15 @@ bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
return (error);
}
static bool
bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
if ((dmat->bounce_flags & BF_COULD_BOUNCE) == 0)
return (true);
return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
}
static bus_dmamap_t
alloc_dmamap(bus_dma_tag_t dmat, int flags)
{
@ -539,29 +550,45 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
dmat->bounce_flags);
}
static bool
_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
int *pagesneeded)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int count;
/*
* Count the number of bounce pages needed in order to
* complete this transfer
*/
count = 0;
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
if (pagesneeded == NULL)
return (true);
count++;
}
curaddr += sgsize;
buflen -= sgsize;
}
if (pagesneeded != NULL)
*pagesneeded = count;
return (count != 0);
}
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if ((map->flags & DMAMAP_COULD_BOUNCE) != 0 && map->pagesneeded == 0) {
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
_bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
@ -1316,6 +1343,7 @@ busdma_swi(void)
struct bus_dma_impl bus_dma_bounce_impl = {
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,
.id_mapped = bounce_bus_dma_id_mapped,
.map_create = bounce_bus_dmamap_create,
.map_destroy = bounce_bus_dmamap_destroy,
.mem_alloc = bounce_bus_dmamem_alloc,

View File

@ -8,6 +8,18 @@
#include <machine/bus_dma_impl.h>
/*
* Is DMA address 1:1 mapping of physical address
*/
static inline bool
bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
struct bus_dma_tag_common *tc;
tc = (struct bus_dma_tag_common *)dmat;
return (tc->impl->id_mapped(dmat, buf, buflen));
}
/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.

View File

@ -58,6 +58,7 @@ struct bus_dma_impl {
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat);
int (*tag_destroy)(bus_dma_tag_t dmat);
bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,

View File

@ -520,6 +520,7 @@ linux_dma_alloc_coherent(struct device *dev, size_t size,
return (mem);
}
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
dma_addr_t
linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
{
@ -530,6 +531,15 @@ linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
priv = dev->dma_priv;
/*
* If the resultant mapping will be entirely 1:1 with the
* physical address, short-circuit the remainder of the
* bus_dma API. This avoids tracking collisions in the pctrie
* with the additional benefit of reducing overhead.
*/
if (bus_dma_id_mapped(priv->dmat, phys, len))
return (phys);
obj = uma_zalloc(linux_dma_obj_zone, 0);
DMA_PRIV_LOCK(priv);
@ -562,7 +572,15 @@ linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
DMA_PRIV_UNLOCK(priv);
return (obj->dma_addr);
}
#else
dma_addr_t
linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
{
return (phys);
}
#endif
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
void
linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
{
@ -571,6 +589,9 @@ linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
priv = dev->dma_priv;
if (pctrie_is_empty(&priv->ptree))
return;
DMA_PRIV_LOCK(priv);
obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
if (obj == NULL) {
@ -584,6 +605,12 @@ linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
uma_zfree(linux_dma_obj_zone, obj);
}
#else
void
linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
{
}
#endif
int
linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,

View File

@ -67,7 +67,9 @@
#ifndef _BUS_DMA_H_
#define _BUS_DMA_H_
#ifdef _KERNEL
#include <sys/_bus_dma.h>
#endif
/*
* Machine independent interface for mapping physical addresses to peripheral
@ -133,6 +135,7 @@ typedef struct bus_dma_segment {
bus_size_t ds_len; /* length of transfer */
} bus_dma_segment_t;
#ifdef _KERNEL
/*
* A function that returns 1 if the address cannot be accessed by
* a device and 0 if it can be.
@ -302,5 +305,6 @@ BUS_DMAMAP_OP void bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t dmamap, bus_
BUS_DMAMAP_OP void bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t dmamap);
#undef BUS_DMAMAP_OP
#endif /* _KERNEL */
#endif /* _BUS_DMA_H_ */

View File

@ -35,6 +35,18 @@
#include <x86/busdma_impl.h>
/*
* Is DMA address 1:1 mapping of physical address
*/
static inline bool
bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
struct bus_dma_tag_common *tc;
tc = (struct bus_dma_tag_common *)dmat;
return (tc->impl->id_mapped(dmat, buf, buflen));
}
/*
* Allocate a handle for mapping from kva/uva/physical
* address space into bus device space.

View File

@ -62,6 +62,7 @@ struct bus_dma_impl {
void *lockfuncarg, bus_dma_tag_t *dmat);
int (*tag_destroy)(bus_dma_tag_t dmat);
int (*tag_set_domain)(bus_dma_tag_t);
bool (*id_mapped)(bus_dma_tag_t, vm_paddr_t, bus_size_t);
int (*map_create)(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp);
int (*map_destroy)(bus_dma_tag_t dmat, bus_dmamap_t map);
int (*mem_alloc)(bus_dma_tag_t dmat, void** vaddr, int flags,

View File

@ -365,6 +365,13 @@ dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
return (error);
}
static bool
dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
return (false);
}
static int
dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
@ -857,6 +864,7 @@ struct bus_dma_impl bus_dma_dmar_impl = {
.tag_create = dmar_bus_dma_tag_create,
.tag_destroy = dmar_bus_dma_tag_destroy,
.tag_set_domain = dmar_bus_dma_tag_set_domain,
.id_mapped = dmar_bus_dma_id_mapped,
.map_create = dmar_bus_dmamap_create,
.map_destroy = dmar_bus_dmamap_destroy,
.mem_alloc = dmar_bus_dmamem_alloc,

View File

@ -141,6 +141,8 @@ static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, vm_paddr_t addr1, vm_paddr_t addr2, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
bus_size_t buflen, int *pagesneeded);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
pmap_t pmap, void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
@ -223,6 +225,15 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
static bool
bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
{
if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0)
return (true);
return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
}
/*
* Update the domain for the tag. We may need to reallocate the zone and
* bounce pages.
@ -501,29 +512,45 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
dmat->bounce_flags);
}
static bool
_bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
int *pagesneeded)
{
vm_paddr_t curaddr;
bus_size_t sgsize;
int count;
/*
* Count the number of bounce pages needed in order to
* complete this transfer
*/
count = 0;
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
if (pagesneeded == NULL)
return (true);
count++;
}
curaddr += sgsize;
buflen -= sgsize;
}
if (pagesneeded != NULL)
*pagesneeded = count;
return (count != 0);
}
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
vm_paddr_t curaddr;
bus_size_t sgsize;
if (map != &nobounce_dmamap && map->pagesneeded == 0) {
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->common.maxsegsz);
if (bus_dma_run_filter(&dmat->common, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
_bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
@ -1305,6 +1332,7 @@ struct bus_dma_impl bus_dma_bounce_impl = {
.tag_create = bounce_bus_dma_tag_create,
.tag_destroy = bounce_bus_dma_tag_destroy,
.tag_set_domain = bounce_bus_dma_tag_set_domain,
.id_mapped = bounce_bus_dma_id_mapped,
.map_create = bounce_bus_dmamap_create,
.map_destroy = bounce_bus_dmamap_destroy,
.mem_alloc = bounce_bus_dmamem_alloc,

View File

@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
#include <sys/time.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <machine/bus.h>
#include <sys/bus.h>
#include <sys/bus_dma.h>
#include <sys/mtio.h>