Reform the busdma API so that new types may be added without modifying

every architecture's busdma_machdep.c.  It is done by unifying the
bus_dmamap_load_buffer() routines so that they may be called from MI
code.  The MD busdma is then given a chance to do any final processing
in the complete() callback.

The cam changes unify the bus_dmamap_load* handling in cam drivers.

The arm and mips implementations are updated to track virtual
addresses for sync().  Previously this was done in a type specific
way.  Now it is done in a generic way by recording the list of
virtuals in the map.

Submitted by:	jeff (sponsored by EMC/Isilon)
Reviewed by:	kan (previous version), scottl,
	mjacob (isp(4), no objections for target mode changes)
Discussed with:	     ian (arm changes)
Tested by:	marius (sparc64), mips (jmallet), isci(4) on x86 (jharris),
	amd64 (Fabian Keil <freebsd-listen@fabiankeil.de>)
This commit is contained in:
Konstantin Belousov 2013-02-12 16:57:20 +00:00
parent 44c169253d
commit dd0b4fb6d5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=246713
68 changed files with 3108 additions and 3767 deletions

View File

@ -46,11 +46,11 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/memdesc.h>
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@ -111,6 +111,7 @@ struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
@ -119,7 +120,6 @@ struct sync_list {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(sync_list) slinks;
};
int busdma_swi_pending;
@ -156,15 +156,15 @@ struct bus_dmamap {
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
struct memdesc mem;
pmap_t pmap;
bus_dmamap_callback_t *callback;
void *callback_arg;
int flags;
#define DMAMAP_COHERENT (1 << 0)
STAILQ_ENTRY(bus_dmamap) links;
STAILQ_HEAD(,sync_list) slist;
int sync_count;
struct sync_list slist[];
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@ -176,11 +176,16 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
static int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags);
static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int flags);
static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */
static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */
@ -493,17 +498,18 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
int
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
int mapsize;
int error;
error = 0;
*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
M_NOWAIT | M_ZERO);
mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
*mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
return (ENOMEM);
}
STAILQ_INIT(&((*mapp)->slist));
(*mapp)->sync_count = 0;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc(
@ -578,8 +584,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
if (STAILQ_FIRST(&map->bpages) != NULL ||
STAILQ_FIRST(&map->slist) != NULL) {
if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
return (EBUSY);
@ -606,6 +611,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
struct busdma_bufzone *bufzone;
vm_memattr_t memattr;
int mflags;
int mapsize;
if (flags & BUS_DMA_NOWAIT)
mflags = M_NOWAIT;
@ -614,15 +620,15 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
/* ARM non-snooping caches need a map for the VA cache sync structure */
*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
M_NOWAIT | M_ZERO);
mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
*mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
if (*mapp == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
}
STAILQ_INIT(&((*mapp)->slist));
(*mapp)->sync_count = 0;
if (dmat->segments == NULL) {
dmat->segments = (bus_dma_segment_t *)malloc(
@ -733,7 +739,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
static int
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if (map->pagesneeded == 0) {
CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
" map= %p, pagesneeded= %d",
dmat->lowaddr, dmat->boundary, dmat->alignment,
map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr) != 0) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, int flags)
{
@ -754,12 +790,11 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
if (__predict_true(map->pmap == pmap_kernel()))
if (__predict_true(map->pmap == kernel_pmap))
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(map->pmap, vaddr);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
run_filter(dmat, paddr) != 0) {
if (run_filter(dmat, paddr) != 0) {
map->pagesneeded++;
}
vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
@ -767,72 +802,190 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
}
CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
map->pagesneeded = 0;
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
map->dmat = dmat;
map->buf = buf;
map->buflen = buflen;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
map->pagesneeded = 0;
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_unlock(&bounce_lock);
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
* Add a single contiguous physical range to the segment list.
*/
static __inline int
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (dmat->ranges) {
struct arm32_dma_range *dr;
dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
curaddr);
if (dr == NULL) {
_bus_dmamap_unload(dmat, map);
return (EINVAL);
}
/*
* In a valid DMA range. Translate the physical
* memory address to an address in the DMA window.
*/
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat,
bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int error;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
if (buflen != 0) {
_bus_dmamap_unload(dmat, map);
return (EFBIG); /* XXX better return value here? */
}
return (0);
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dmamap_t map,
void *buf, bus_size_t buflen,
pmap_t pmap,
int flags,
bus_addr_t *lastaddrp,
bus_dma_segment_t *segs,
int *segp,
int first)
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
bus_addr_t curaddr;
vm_offset_t vaddr;
struct sync_list *sl;
int seg, error;
int error;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
if (error)
return (error);
_bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
sl = NULL;
vaddr = (vm_offset_t)buf;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
map->pmap = pmap;
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (__predict_true(map->pmap == pmap_kernel()))
if (__predict_true(map->pmap == kernel_pmap))
curaddr = pmap_kextract(vaddr);
else
curaddr = pmap_extract(map->pmap, vaddr);
@ -846,259 +999,63 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
} else {
/* add_sync_list(dmat, map, vaddr, sgsize, cflag); */
sl = (struct sync_list *)malloc(sizeof(struct sync_list),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (sl == NULL)
goto cleanup;
STAILQ_INSERT_TAIL(&(map->slist), sl, slinks);
sl->vaddr = vaddr;
sl->datacount = sgsize;
sl->busaddr = curaddr;
sl = &map->slist[map->sync_count - 1];
if (map->sync_count == 0 ||
vaddr != sl->vaddr + sl->datacount) {
if (++map->sync_count > dmat->nsegments)
goto cleanup;
sl++;
sl->vaddr = vaddr;
sl->datacount = sgsize;
sl->busaddr = curaddr;
} else
sl->datacount += sgsize;
}
if (dmat->ranges) {
struct arm32_dma_range *dr;
dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
curaddr);
if (dr == NULL) {
_bus_dmamap_unload(dmat, map);
return (EINVAL);
}
/*
* In a valid DMA range. Translate the physical
* memory address to an address in the DMA window.
*/
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
cleanup:
/*
* Did we fit?
*/
if (buflen != 0) {
_bus_dmamap_unload(dmat, map);
return(EFBIG); /* XXX better return value here? */
return (EFBIG); /* XXX better return value here? */
}
return (0);
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
bus_addr_t lastaddr = 0;
int error, nsegs = 0;
flags |= BUS_DMA_WAITOK;
void
__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg)
{
map->mem = *mem;
map->dmat = dmat;
map->callback = callback;
map->callback_arg = callback_arg;
map->pmap = kernel_pmap;
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, flags,
&lastaddr, dmat->segments, &nsegs, 1);
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
if (error == EINPROGRESS) {
return (error);
}
if (error)
(*callback)(callback_arg, dmat->segments, 0, error);
else
(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
* This error only happens when NOWAIT is set, so deferal is disabled.
*/
if (error == ENOMEM)
return (error);
return (0);
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
static __inline int
_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
int error;
M_ASSERTPKTHDR(m0);
map->pmap = kernel_pmap;
flags |= BUS_DMA_NOWAIT;
*nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len,
flags, &lastaddr,
segs, nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
/* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
return (error);
}
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
int nsegs, error;
error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
flags);
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments,
nsegs, m0->m_pkthdr.len, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs);
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
{
return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
bus_addr_t lastaddr;
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
flags |= BUS_DMA_NOWAIT;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
KASSERT(uio->uio_td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
map->pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
} else
map->pmap = kernel_pmap;
nsegs = 0;
error = 0;
first = 1;
lastaddr = (bus_addr_t) 0;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
addr, minlen, flags, &lastaddr,
dmat->segments, &nsegs, first);
first = 0;
resid -= minlen;
}
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments,
nsegs+1, uio->uio_resid, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
@ -1109,12 +1066,6 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
struct bounce_zone *bz;
struct sync_list *sl;
while ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
STAILQ_REMOVE_HEAD(&map->slist, slinks);
free(sl, M_DEVBUF);
}
if ((bz = dmat->bounce_zone) != NULL) {
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
@ -1128,6 +1079,7 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
map->pagesreserved = 0;
map->pagesneeded = 0;
}
map->sync_count = 0;
}
#ifdef notyetbounceuser
@ -1187,15 +1139,13 @@ void
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct bounce_page *bpage;
struct sync_list *sl;
struct sync_list *sl, *end;
bus_size_t len, unalign;
vm_offset_t buf, ebuf;
#ifdef FIX_DMAP_BUS_DMASYNC_POSTREAD
vm_offset_t bbuf;
char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
#endif
int listcount = 0;
/* if buffer was from user space, it it possible that this
* is not the same vm map. The fix is to map each page in
* the buffer into the current address space (KVM) and then
@ -1215,9 +1165,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)bpage->vaddr,
bpage->datacount);
cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
bpage->datacount);
l2cache_wb_range((vm_offset_t)bpage->vaddr,
@ -1254,9 +1209,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
arm_dcache_align;
cpu_dcache_inv_range(startv, len);
l2cache_inv_range(startv, startp, len);
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
else
physcopyin((void *)bpage->vaddr,
bpage->dataaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
@ -1265,29 +1225,26 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (map->flags & DMAMAP_COHERENT)
return;
sl = STAILQ_FIRST(&map->slist);
while (sl) {
listcount++;
sl = STAILQ_NEXT(sl, slinks);
}
if ((sl = STAILQ_FIRST(&map->slist)) != NULL) {
if (map->sync_count != 0) {
/* ARM caches are not self-snooping for dma */
sl = &map->slist[0];
end = &map->slist[map->sync_count];
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
"performing sync", __func__, dmat, dmat->flags, op);
switch (op) {
case BUS_DMASYNC_PREWRITE:
while (sl != NULL) {
while (sl != end) {
cpu_dcache_wb_range(sl->vaddr, sl->datacount);
l2cache_wb_range(sl->vaddr, sl->busaddr,
sl->datacount);
sl = STAILQ_NEXT(sl, slinks);
sl++;
}
break;
case BUS_DMASYNC_PREREAD:
while (sl != NULL) {
while (sl != end) {
/* write back the unaligned portions */
vm_paddr_t physaddr = sl->busaddr, ephysaddr;
buf = sl->vaddr;
@ -1327,16 +1284,16 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
cpu_dcache_inv_range(buf, len);
l2cache_inv_range(buf, physaddr, len);
}
sl = STAILQ_NEXT(sl, slinks);
sl++;
}
break;
case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
while (sl != NULL) {
while (sl != end) {
cpu_dcache_wbinv_range(sl->vaddr, sl->datacount);
l2cache_wbinv_range(sl->vaddr,
sl->busaddr, sl->datacount);
sl = STAILQ_NEXT(sl, slinks);
sl++;
}
break;
@ -1344,7 +1301,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
case BUS_DMASYNC_POSTREAD:
if (!pmap_dmap_iscurrent(map->pmap))
panic("_bus_dmamap_sync: wrong user map. apply fix");
while (sl != NULL) {
while (sl != end) {
/* write back the unaligned portions */
vm_paddr_t physaddr;
register_t s = 0;
@ -1391,7 +1348,7 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
intr_restore(s);
}
sl = STAILQ_NEXT(sl, slinks);
sl++;
}
break;
#endif /* FIX_DMAP_BUS_DMASYNC_POSTREAD */
@ -1559,7 +1516,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
@ -1593,6 +1550,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@ -1646,8 +1604,8 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
map->callback, map->callback_arg, /*flags*/0);
bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
map->callback_arg, BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}

View File

@ -61,12 +61,12 @@ __FBSDID("$FreeBSD$");
#include <sys/interrupt.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <sys/ktr.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/uma.h>
#include <vm/vm.h>
@ -125,10 +125,17 @@ struct bounce_page {
vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
struct sync_list {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
bus_size_t datacount; /* client data count */
};
int busdma_swi_pending;
struct bounce_zone {
@ -158,24 +165,21 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
"Total bounce pages");
#define DMAMAP_LINEAR 0x1
#define DMAMAP_MBUF 0x2
#define DMAMAP_UIO 0x4
#define DMAMAP_CACHE_ALIGNED 0x10
#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
#define DMAMAP_COHERENT 0x8
#define DMAMAP_CACHE_ALIGNED 0x10
struct bus_dmamap {
struct bp_list bpages;
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
struct memdesc mem;
int flags;
void *buffer;
int len;
STAILQ_ENTRY(bus_dmamap) links;
bus_dmamap_callback_t *callback;
void *callback_arg;
int sync_count;
struct sync_list *slist;
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@ -191,7 +195,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
/* Default tag, as most drivers provide no parent tag. */
@ -564,13 +569,20 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
int
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
struct sync_list *slist;
bus_dmamap_t map;
int error = 0;
slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
if (slist == NULL)
return (ENOMEM);
map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
*mapp = map;
if (map == NULL)
if (map == NULL) {
free(slist, M_DEVBUF);
return (ENOMEM);
}
/*
* If the tag's segments haven't been allocated yet we need to do it
@ -580,6 +592,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
dmat->segments = malloc(dmat->nsegments *
sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT);
if (dmat->segments == NULL) {
free(slist, M_DEVBUF);
uma_zfree(dmamap_zone, map);
*mapp = NULL;
return (ENOMEM);
@ -599,6 +612,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
if (dmat->bounce_zone == NULL) {
if ((error = alloc_bounce_zone(dmat)) != 0) {
free(slist, M_DEVBUF);
uma_zfree(dmamap_zone, map);
*mapp = NULL;
return (error);
@ -633,6 +647,8 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
bz->map_count++;
}
map->sync_count = 0;
map->slist = slist;
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, error);
@ -647,11 +663,12 @@ int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
if (STAILQ_FIRST(&map->bpages) != NULL) {
if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
return (EBUSY);
}
free(map->slist, M_DEVBUF);
uma_zfree(dmamap_zone, map);
if (dmat->bounce_zone)
dmat->bounce_zone->map_count--;
@ -668,6 +685,7 @@ int
bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
bus_dmamap_t *mapp)
{
struct sync_list *slist;
void * vaddr;
struct busdma_bufzone *bufzone;
busdma_bufalloc_t ba;
@ -679,7 +697,6 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
mflags = M_NOWAIT;
else
mflags = M_WAITOK;
/*
* If the tag's segments haven't been allocated yet we need to do it
* now, because we can't sleep for resources at map load time.
@ -688,10 +705,14 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
dmat->segments = malloc(dmat->nsegments *
sizeof(*dmat->segments), M_DEVBUF, mflags);
map = uma_zalloc_arg(dmamap_zone, dmat, mflags);
if (map == NULL)
slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
if (slist == NULL)
return (ENOMEM);
map = uma_zalloc_arg(dmamap_zone, dmat, mflags);
if (map == NULL) {
free(slist, M_DEVBUF);
return (ENOMEM);
}
if (flags & BUS_DMA_COHERENT) {
memattr = VM_MEMATTR_UNCACHEABLE;
ba = coherent_allocator;
@ -738,12 +759,14 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
memattr);
}
if (vaddr == NULL) {
free(slist, M_DEVBUF);
uma_zfree(dmamap_zone, map);
map = NULL;
} else {
map->slist = slist;
map->sync_count = 0;
}
*vaddrp = vaddr;
*mapp = map;
@ -762,10 +785,11 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
if (map->flags & DMAMAP_COHERENT)
ba = coherent_allocator;
else
else
ba = standard_allocator;
uma_zfree(dmamap_zone, map);
uma_zfree(dmamap_zone, map);
free(map->slist, M_DEVBUF);
/* Be careful not to access map from here on. */
bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
@ -777,7 +801,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
}
static int
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if ((map->pagesneeded == 0)) {
CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
dmat->lowaddr, dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr) != 0) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
@ -798,75 +852,183 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
if (__predict_true(pmap == pmap_kernel()))
if (__predict_true(pmap == kernel_pmap))
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
run_filter(dmat, paddr) != 0)
if (run_filter(dmat, paddr) != 0)
map->pagesneeded++;
vaddr += PAGE_SIZE;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_unlock(&bounce_lock);
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrance, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
* Add a single contiguous physical range to the segment list.
*/
static __inline int
bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
int flags, vm_offset_t *lastaddrp, int *segp)
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (dmat->ranges) {
struct arm32_dma_range *dr;
dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
curaddr);
if (dr == NULL)
return (EINVAL);
/*
* In a valid DMA range. Translate the physical
* memory address to an address in the DMA window.
*/
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
}
seg = *segp;
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
*/
if (seg >= 0 &&
curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
segs[seg].ds_len += sgsize;
} else {
if (++seg >= dmat->nsegments)
return (EFBIG);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
*segp = seg;
return (0);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
int error = 0;
bus_addr_t curaddr;
int error;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen,
flags);
if (error)
return (error);
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
if (buflen != 0) {
_bus_dmamap_unload(dmat, map);
return (EFBIG); /* XXX better return value here? */
}
return (0);
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrance, and the ending segment on exit.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
struct sync_list *sl;
vm_offset_t vaddr = (vm_offset_t)buf;
int error = 0;
if (segs == NULL)
segs = dmat->segments;
if ((flags & BUS_DMA_LOAD_MBUF) != 0)
map->flags |= DMAMAP_CACHE_ALIGNED;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
"alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (__predict_true(pmap == pmap_kernel())) {
if (__predict_true(pmap == kernel_pmap)) {
curaddr = pmap_kextract(vaddr);
} else {
curaddr = pmap_extract(pmap, vaddr);
@ -882,260 +1044,63 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr))
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
if (dmat->ranges) {
struct arm32_dma_range *dr;
dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
curaddr);
if (dr == NULL)
return (EINVAL);
/*
* In a valid DMA range. Translate the physical
* memory address to an address in the DMA window.
*/
curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
}
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
*/
if (seg >= 0 && curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) ==
(curaddr & bmask))) {
segs[seg].ds_len += sgsize;
goto segdone;
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
} else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
sl = &map->slist[map->sync_count - 1];
if (map->sync_count == 0 ||
vaddr != sl->vaddr + sl->datacount) {
if (++map->sync_count > dmat->nsegments)
goto cleanup;
sl++;
sl->vaddr = vaddr;
sl->datacount = sgsize;
sl->busaddr = curaddr;
} else
sl->datacount += sgsize;
}
if (error)
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
segdone:
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
cleanup:
/*
* Did we fit?
*/
if (buflen != 0)
error = EFBIG; /* XXX better return value here? */
return (error);
if (buflen != 0) {
_bus_dmamap_unload(dmat, map);
return (EFBIG); /* XXX better return value here? */
}
return (0);
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
void
__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg)
{
vm_offset_t lastaddr = 0;
int error, nsegs = -1;
KASSERT(dmat != NULL, ("dmatag is NULL"));
KASSERT(map != NULL, ("dmamap is NULL"));
map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_LINEAR;
map->buffer = buf;
map->len = buflen;
error = bus_dmamap_load_buffer(dmat,
dmat->segments, map, buf, buflen, kernel_pmap,
flags, &lastaddr, &nsegs);
if (error == EINPROGRESS)
return (error);
if (error)
(*callback)(callback_arg, NULL, 0, error);
else
(*callback)(callback_arg, dmat->segments, nsegs + 1, error);
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, nsegs + 1, error);
return (error);
}
/*
* Like bus_dmamap_load(), but for mbufs.
*
* Note that the manpage states that BUS_DMA_NOWAIT is implied for mbufs.
*
* We know that the way the system allocates and uses mbufs implies that we can
* treat them as DMAMAP_CACHE_ALIGNED in terms of handling partial cache line
* flushes. Even though the flush may reference the data area within the mbuf
* that isn't aligned to a cache line, we know the overall mbuf itself is
* properly aligned, and we know that the CPU will not touch the header fields
* before the data area while the DMA is in progress.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
int nsegs = -1, error = 0;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_MBUF | DMAMAP_CACHE_ALIGNED;
map->buffer = m0;
map->len = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = bus_dmamap_load_buffer(dmat,
dmat->segments, map, m->m_data, m->m_len,
pmap_kernel(), flags, &lastaddr, &nsegs);
map->len += m->m_len;
}
}
} else {
error = EINVAL;
}
if (error) {
/*
* force "no valid mappings" on error in callback.
*/
(*callback)(callback_arg, NULL, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments, nsegs + 1,
m0->m_pkthdr.len, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
{
int error = 0;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
*nsegs = -1;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_MBUF | DMAMAP_CACHE_ALIGNED;
map->buffer = m0;
map->len = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = bus_dmamap_load_buffer(dmat, segs, map,
m->m_data, m->m_len,
pmap_kernel(), flags, &lastaddr,
nsegs);
map->len += m->m_len;
}
}
} else {
error = EINVAL;
}
/* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
return (error);
}
/*
* Like bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr = 0;
int nsegs, i, error;
bus_size_t resid;
struct iovec *iov;
struct pmap *pmap;
resid = uio->uio_resid;
iov = uio->uio_iov;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_UIO;
map->buffer = uio;
map->len = 0;
if (uio->uio_segflg == UIO_USERSPACE) {
KASSERT(uio->uio_td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
} else
pmap = kernel_pmap;
error = 0;
nsegs = -1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = bus_dmamap_load_buffer(dmat, dmat->segments,
map, addr, minlen, pmap, flags, &lastaddr, &nsegs);
map->len += minlen;
resid -= minlen;
}
}
if (error) {
/*
* force "no valid mappings" on error in callback.
*/
(*callback)(callback_arg, NULL, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments, nsegs+1,
uio->uio_resid, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
@ -1146,24 +1111,25 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
map->flags &= ~DMAMAP_TYPE_MASK;
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
}
map->sync_count = 0;
return;
}
static void
bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op, int bufaligned)
bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op,
int bufaligned)
{
char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
register_t s;
int partial;
if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) {
cpu_dcache_wb_range((vm_offset_t)buf, len);
cpu_l2cache_wb_range((vm_offset_t)buf, len);
cpu_dcache_wb_range(buf, len);
cpu_l2cache_wb_range(buf, len);
}
/*
@ -1186,38 +1152,37 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op, int bufaligned)
if (op & BUS_DMASYNC_PREREAD) {
if (!(op & BUS_DMASYNC_PREWRITE) && !partial) {
cpu_dcache_inv_range((vm_offset_t)buf, len);
cpu_l2cache_inv_range((vm_offset_t)buf, len);
cpu_dcache_inv_range(buf, len);
cpu_l2cache_inv_range(buf, len);
} else {
cpu_dcache_wbinv_range((vm_offset_t)buf, len);
cpu_l2cache_wbinv_range((vm_offset_t)buf, len);
cpu_dcache_wbinv_range(buf, len);
cpu_l2cache_wbinv_range(buf, len);
}
}
if (op & BUS_DMASYNC_POSTREAD) {
if (partial && !bufaligned) {
s = intr_disable();
if ((vm_offset_t)buf & arm_dcache_align_mask)
memcpy(_tmp_cl, (void *)((vm_offset_t)buf &
if (buf & arm_dcache_align_mask)
memcpy(_tmp_cl, (void *)(buf &
~arm_dcache_align_mask),
(vm_offset_t)buf & arm_dcache_align_mask);
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
buf & arm_dcache_align_mask);
if ((buf + len) & arm_dcache_align_mask)
memcpy(_tmp_clend,
(void *)((vm_offset_t)buf + len),
arm_dcache_align - (((vm_offset_t)(buf) +
len) & arm_dcache_align_mask));
(void *)(buf + len),
arm_dcache_align -
((buf + len) & arm_dcache_align_mask));
}
cpu_dcache_inv_range((vm_offset_t)buf, len);
cpu_l2cache_inv_range((vm_offset_t)buf, len);
cpu_dcache_inv_range(buf, len);
cpu_l2cache_inv_range(buf, len);
if (partial && !bufaligned) {
if ((vm_offset_t)buf & arm_dcache_align_mask)
memcpy((void *)((vm_offset_t)buf &
if (buf & arm_dcache_align_mask)
memcpy((void *)(buf &
~arm_dcache_align_mask), _tmp_cl,
(vm_offset_t)buf & arm_dcache_align_mask);
if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
memcpy((void *)((vm_offset_t)buf + len),
buf & arm_dcache_align_mask);
if ((buf + len) & arm_dcache_align_mask)
memcpy((void *)(buf + len),
_tmp_clend, arm_dcache_align -
(((vm_offset_t)(buf) + len) &
arm_dcache_align_mask));
((buf + len) & arm_dcache_align_mask));
intr_restore(s);
}
}
@ -1230,10 +1195,18 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
STAILQ_FOREACH(bpage, &map->bpages, links) {
if (op & BUS_DMASYNC_PREWRITE) {
bcopy((void *)bpage->datavaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
if (bpage->vaddr_nocache == 0) {
cpu_dcache_wb_range(bpage->vaddr,
bpage->datacount);
@ -1249,36 +1222,23 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
cpu_l2cache_inv_range(bpage->vaddr,
bpage->datacount);
}
bcopy((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
(void *)bpage->datavaddr, bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
(void *)bpage->datavaddr, bpage->datacount);
else
physcopyin((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
bpage->dataaddr, bpage->datacount);
dmat->bounce_zone->total_bounced++;
}
}
}
static __inline int
_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
{
struct bounce_page *bpage;
STAILQ_FOREACH(bpage, &map->bpages, links) {
if ((vm_offset_t)buf >= bpage->datavaddr &&
(vm_offset_t)buf + len <= bpage->datavaddr +
bpage->datacount)
return (1);
}
return (0);
}
void
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct mbuf *m;
struct uio *uio;
int resid;
struct iovec *iov;
struct sync_list *sl, *end;
int bufaligned;
if (op == BUS_DMASYNC_POSTWRITE)
@ -1289,40 +1249,11 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
_bus_dmamap_sync_bp(dmat, map, op);
CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED);
switch(map->flags & DMAMAP_TYPE_MASK) {
case DMAMAP_LINEAR:
if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
bus_dmamap_sync_buf(map->buffer, map->len, op,
if (map->sync_count) {
end = &map->slist[map->sync_count];
for (sl = &map->slist[0]; sl != end; sl++)
bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
bufaligned);
break;
case DMAMAP_MBUF:
m = map->buffer;
while (m) {
if (m->m_len > 0 &&
!(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
bus_dmamap_sync_buf(m->m_data, m->m_len, op,
bufaligned);
m = m->m_next;
}
break;
case DMAMAP_UIO:
uio = map->buffer;
iov = uio->uio_iov;
resid = uio->uio_resid;
for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
bus_size_t minlen = resid < iov[i].iov_len ? resid :
iov[i].iov_len;
if (minlen > 0) {
if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
minlen))
bus_dmamap_sync_buf(iov[i].iov_base,
minlen, op, bufaligned);
resid -= minlen;
}
}
break;
default:
break;
}
drain:
@ -1489,7 +1420,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
@ -1522,6 +1453,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@ -1575,8 +1507,8 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buffer, map->len,
map->callback, map->callback_arg, /*flags*/0);
bus_dmamap_load_mem(map->dmat, map, &map->mem,
map->callback, map->callback_arg, BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}

View File

@ -64,13 +64,19 @@ typedef enum {
* Perform transport negotiation
* with this command.
*/
CAM_SCATTER_VALID = 0x00000010,/* Scatter/gather list is valid */
CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */
CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */
CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */
CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */
CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */
CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */
CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */
CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */
CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */
CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */
CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */
CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */
CAM_DATA_MASK = 0x00240010,/* Data type mask */
CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */
CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */
CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */
@ -81,10 +87,8 @@ typedef enum {
CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/
CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/
CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */
CAM_SG_LIST_PHYS = 0x00040000,/* SG list has physical addrs. */
CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/
CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/
CAM_DATA_PHYS = 0x00200000,/* SG/Buffer data ptrs are phys. */
CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */
CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */

View File

@ -547,7 +547,8 @@ xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td
* We can't deal with physical addresses for this
* type of transaction.
*/
if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
CAM_DATA_VADDR) {
error = EINVAL;
break;
}

View File

@ -93,8 +93,8 @@ struct cfcs_softc {
* handle physical addresses yet. That would require mapping things in
* order to do the copy.
*/
#define CFCS_BAD_CCB_FLAGS (CAM_DATA_PHYS | CAM_SG_LIST_PHYS | \
CAM_MSG_BUF_PHYS | CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR |\
#define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_MSG_BUF_PHYS | \
CAM_SNS_BUF_PHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \
CAM_SENSE_PHYS)
int cfcs_init(void);
@ -379,36 +379,35 @@ cfcs_datamove(union ctl_io *io)
* Simplify things on both sides by putting single buffers into a
* single entry S/G list.
*/
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) {
/* We should filter this out on entry */
panic("%s: physical S/G list, should not get here",
__func__);
} else {
int len_seen;
switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
case CAM_DATA_SG: {
int len_seen;
cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
cam_sg_count = ccb->csio.sglist_cnt;
cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
cam_sg_count = ccb->csio.sglist_cnt;
for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
if ((len_seen + cam_sglist[i].ds_len) >=
io->scsiio.kern_rel_offset) {
cam_sg_start = i;
cam_sg_offset =
io->scsiio.kern_rel_offset -
len_seen;
break;
}
len_seen += cam_sglist[i].ds_len;
for (i = 0, len_seen = 0; i < cam_sg_count; i++) {
if ((len_seen + cam_sglist[i].ds_len) >=
io->scsiio.kern_rel_offset) {
cam_sg_start = i;
cam_sg_offset = io->scsiio.kern_rel_offset -
len_seen;
break;
}
len_seen += cam_sglist[i].ds_len;
}
} else {
break;
}
case CAM_DATA_VADDR:
cam_sglist = &cam_sg_entry;
cam_sglist[0].ds_len = ccb->csio.dxfer_len;
cam_sglist[0].ds_addr = (bus_addr_t)ccb->csio.data_ptr;
cam_sg_count = 1;
cam_sg_start = 0;
cam_sg_offset = io->scsiio.kern_rel_offset;
break;
default:
panic("Invalid CAM flags %#x", ccb->ccb_h.flags);
}
if (io->scsiio.kern_sg_entries > 0) {

View File

@ -876,6 +876,7 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
csio->cdb_len = atio->cdb_len;
flags &= ~CAM_DATA_MASK;
if (io->scsiio.kern_sg_entries == 0) {
/* No S/G list */
data_ptr = io->scsiio.kern_data_ptr;
@ -883,7 +884,9 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
csio->sglist_cnt = 0;
if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
flags |= CAM_DATA_PHYS;
flags |= CAM_DATA_PADDR;
else
flags |= CAM_DATA_VADDR;
} else if (io->scsiio.kern_sg_entries <=
(sizeof(cmd_info->cam_sglist)/
sizeof(cmd_info->cam_sglist[0]))) {
@ -907,11 +910,10 @@ ctlfestart(struct cam_periph *periph, union ccb *start_ccb)
ctl_sglist[i].len;
}
csio->sglist_cnt = io->scsiio.kern_sg_entries;
flags |= CAM_SCATTER_VALID;
if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR)
flags |= CAM_SG_LIST_PHYS;
flags |= CAM_DATA_SG_PADDR;
else
flags &= ~CAM_SG_LIST_PHYS;
flags &= ~CAM_DATA_SG;
data_ptr = (uint8_t *)cam_sglist;
dxfer_len = io->scsiio.kern_data_len;
} else {

View File

@ -696,8 +696,11 @@ passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
* do the right thing, even if there isn't data to map, but since CCBs
* without data are a reasonably common occurance (e.g. test unit
* ready), it will save a few cycles if we check for it here.
*
* XXX What happens if a sg list is supplied? We don't filter that
* out.
*/
if (((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0)
if (((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
&& (((ccb->ccb_h.func_code == XPT_SCSI_IO ||
ccb->ccb_h.func_code == XPT_ATA_IO)
&& ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE))

View File

@ -737,7 +737,7 @@ targsendccb(struct targ_softc *softc, union ccb *ccb,
* without data are a reasonably common occurance (e.g. test unit
* ready), it will save a few cycles if we check for it here.
*/
if (((ccb_h->flags & CAM_DATA_PHYS) == 0)
if (((ccb_h->flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
&& (((ccb_h->func_code == XPT_CONT_TARGET_IO)
&& ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE))
|| (ccb_h->func_code == XPT_DEV_MATCH))) {

View File

@ -2681,6 +2681,7 @@ kern/subr_acl_posix1e.c optional ufs_acl
kern/subr_autoconf.c standard
kern/subr_blist.c standard
kern/subr_bus.c standard
kern/subr_bus_dma.c standard
kern/subr_bufring.c standard
kern/subr_clock.c standard
kern/subr_devstat.c standard

View File

@ -448,26 +448,28 @@ aac_cam_action(struct cam_sim *sim, union ccb *ccb)
/* Map the s/g list. XXX 32bit addresses only! */
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
case CAM_DATA_VADDR:
srb->data_len = csio->dxfer_len;
if (ccb->ccb_h.flags & CAM_DATA_PHYS) {
/* Send a 32bit command */
fib->Header.Command = ScsiPortCommand;
srb->sg_map.SgCount = 1;
srb->sg_map.SgEntry[0].SgAddress =
(uint32_t)(uintptr_t)csio->data_ptr;
srb->sg_map.SgEntry[0].SgByteCount =
csio->dxfer_len;
} else {
/*
* Arrange things so that the S/G
* map will get set up automagically
*/
cm->cm_data = (void *)csio->data_ptr;
cm->cm_datalen = csio->dxfer_len;
cm->cm_sgtable = &srb->sg_map;
}
} else {
/*
* Arrange things so that the S/G
* map will get set up automagically
*/
cm->cm_data = (void *)csio->data_ptr;
cm->cm_datalen = csio->dxfer_len;
cm->cm_sgtable = &srb->sg_map;
break;
case CAM_DATA_PADDR:
/* Send a 32bit command */
fib->Header.Command = ScsiPortCommand;
srb->sg_map.SgCount = 1;
srb->sg_map.SgEntry[0].SgAddress =
(uint32_t)(uintptr_t)csio->data_ptr;
srb->sg_map.SgEntry[0].SgByteCount =
csio->dxfer_len;
srb->data_len = csio->dxfer_len;
break;
default:
/* XXX Need to handle multiple s/g elements */
panic("aac_cam: multiple s/g elements");
}

View File

@ -207,6 +207,7 @@ adv_action(struct cam_sim *sim, union ccb *ccb)
struct ccb_hdr *ccb_h;
struct ccb_scsiio *csio;
struct adv_ccb_info *cinfo;
int error;
ccb_h = &ccb->ccb_h;
csio = &ccb->csio;
@ -217,58 +218,17 @@ adv_action(struct cam_sim *sim, union ccb *ccb)
ccb_h->ccb_cinfo_ptr = cinfo;
cinfo->ccb = ccb;
/* Only use S/G if there is a transfer */
if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer
* to a single buffer
*/
if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
int error;
error =
bus_dmamap_load(adv->buffer_dmat,
cinfo->dmamap,
csio->data_ptr,
csio->dxfer_len,
adv_execute_ccb,
csio, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
adv_set_state(adv,
ADV_BUSDMA_BLOCK);
}
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
adv_execute_ccb(csio, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
panic("adv_setup_data - Physical "
"segment pointers unsupported");
if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
panic("adv_setup_data - Virtual "
"segment addresses unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
}
} else {
adv_execute_ccb(ccb, NULL, 0, 0);
error = bus_dmamap_load_ccb(adv->buffer_dmat,
cinfo->dmamap,
ccb,
adv_execute_ccb,
csio, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller
* queue until our mapping is returned.
*/
adv_set_state(adv, ADV_BUSDMA_BLOCK);
}
break;
}

View File

@ -353,6 +353,7 @@ adw_action(struct cam_sim *sim, union ccb *ccb)
struct ccb_scsiio *csio;
struct ccb_hdr *ccbh;
struct acb *acb;
int error;
csio = &ccb->csio;
ccbh = &ccb->ccb_h;
@ -427,66 +428,18 @@ adw_action(struct cam_sim *sim, union ccb *ccb)
acb->queue.cdb, csio->cdb_len);
}
/*
* If we have any data to send with this command,
* map it into bus space.
*/
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer
* to a single buffer.
*/
if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
int error;
error =
bus_dmamap_load(adw->buffer_dmat,
acb->dmamap,
csio->data_ptr,
csio->dxfer_len,
adwexecuteacb,
acb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(sim, 1);
acb->state |= CAM_RELEASE_SIMQ;
}
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
adwexecuteacb(acb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccbh->flags & CAM_DATA_PHYS) != 0)
panic("adw_action - Physical "
"segment pointers "
"unsupported");
if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
panic("adw_action - Virtual "
"segment addresses "
"unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
adwexecuteacb(acb, segs, csio->sglist_cnt,
(csio->sglist_cnt < ADW_SGSIZE)
? 0 : EFBIG);
}
} else {
adwexecuteacb(acb, NULL, 0, 0);
error = bus_dmamap_load_ccb(adw->buffer_dmat,
acb->dmamap,
ccb,
adwexecuteacb,
acb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller
* queue until our mapping is returned.
*/
xpt_freeze_simq(sim, 1);
acb->state |= CAM_RELEASE_SIMQ;
}
break;
}

View File

@ -778,6 +778,7 @@ ahaaction(struct cam_sim *sim, union ccb *ccb)
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
struct ccb_scsiio *csio;
struct ccb_hdr *ccbh;
int error;
csio = &ccb->csio;
ccbh = &csio->ccb_h;
@ -811,67 +812,22 @@ ahaaction(struct cam_sim *sim, union ccb *ccb)
* If we have any data to send with this command,
* map it into bus space.
*/
/* Only use S/G if there is a transfer */
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer
* to a single buffer.
*/
if ((ccbh->flags & CAM_DATA_PHYS)==0) {
int error;
error = bus_dmamap_load(
aha->buffer_dmat,
accb->dmamap,
csio->data_ptr,
csio->dxfer_len,
ahaexecuteccb,
accb,
/*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain
* ordering, freeze the
* controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(aha->sim,
1);
csio->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
ahaexecuteccb(accb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccbh->flags & CAM_DATA_PHYS) != 0)
panic("ahaaction - Physical "
"segment pointers "
"unsupported");
if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
panic("ahaaction - Virtual "
"segment addresses "
"unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)
csio->data_ptr;
ahaexecuteccb(accb, segs,
csio->sglist_cnt, 0);
}
} else {
ahaexecuteccb(accb, NULL, 0, 0);
error = bus_dmamap_load_ccb(
aha->buffer_dmat,
accb->dmamap,
ccb,
ahaexecuteccb,
accb,
/*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the
* controller queue until our mapping is
* returned.
*/
xpt_freeze_simq(aha->sim, 1);
csio->ccb_h.status |= CAM_RELEASE_SIMQ;
}
} else {
hccb->opcode = INITIATOR_BUS_DEV_RESET;

View File

@ -1006,6 +1006,7 @@ ahbaction(struct cam_sim *sim, union ccb *ccb)
{
struct ecb *ecb;
struct hardware_ecb *hecb;
int error;
/*
* get an ecb to use.
@ -1056,65 +1057,19 @@ ahbaction(struct cam_sim *sim, union ccb *ccb)
hecb->cdb, hecb->cdb_len);
}
/*
* If we have any data to send with this command,
* map it into bus space.
*/
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer
* to a single buffer.
*/
if ((ccb->ccb_h.flags & CAM_DATA_PHYS)==0) {
int error;
error = bus_dmamap_load(
ahb->buffer_dmat,
ecb->dmamap,
ccb->csio.data_ptr,
ccb->csio.dxfer_len,
ahbexecuteecb,
ecb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(ahb->sim, 1);
ccb->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)ccb->csio.data_ptr;
seg.ds_len = ccb->csio.dxfer_len;
ahbexecuteecb(ecb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
panic("ahbaction - Physical segment "
"pointers unsupported");
if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
panic("btaction - Virtual segment "
"addresses unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)
ccb->csio.data_ptr;
ahbexecuteecb(ecb, segs, ccb->csio.sglist_cnt,
0);
}
} else {
ahbexecuteecb(ecb, NULL, 0, 0);
error = bus_dmamap_load_ccb(
ahb->buffer_dmat,
ecb->dmamap,
ccb,
ahbexecuteecb,
ecb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller
* queue until our mapping is returned.
*/
xpt_freeze_simq(ahb->sim, 1);
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
break;
}

View File

@ -1683,21 +1683,10 @@ ahci_begin_transaction(device_t dev, union ccb *ccb)
(ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)))
ch->aslots |= (1 << slot->slot);
slot->dma.nsegs = 0;
/* If request moves data, setup and load SG list */
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
void *buf;
bus_size_t size;
slot->state = AHCI_SLOT_LOADING;
if (ccb->ccb_h.func_code == XPT_ATA_IO) {
buf = ccb->ataio.data_ptr;
size = ccb->ataio.dxfer_len;
} else {
buf = ccb->csio.data_ptr;
size = ccb->csio.dxfer_len;
}
bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map,
buf, size, ahci_dmasetprd, slot, 0);
bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb,
ahci_dmasetprd, slot, 0);
} else
ahci_execute_transaction(slot);
}

View File

@ -146,8 +146,8 @@ aic_action(struct cam_sim *sim, union ccb *ccb)
scb->cmd_ptr = ccb->csio.cdb_io.cdb_bytes;
}
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) ||
(ccb->ccb_h.flags & CAM_DATA_PHYS)) {
if ((ccb->ccb_h.flags & CAM_DATA_MASK) !=
CAM_DATA_VADDR) {
ccb->ccb_h.status = CAM_REQ_INVALID;
aic_free_scb(aic, scb);
xpt_done(ccb);

View File

@ -1071,6 +1071,7 @@ ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
{
struct hardware_scb *hscb;
struct ccb_hdr *ccb_h;
int error;
hscb = scb->hscb;
ccb_h = &csio->ccb_h;
@ -1120,64 +1121,18 @@ ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
}
}
/* Only use S/G if there is a transfer */
if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
/* We've been given a pointer to a single buffer */
if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
int s;
int error;
s = splsoftvm();
error = bus_dmamap_load(ahd->buffer_dmat,
scb->dmamap,
csio->data_ptr,
csio->dxfer_len,
ahd_execute_scb,
scb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(sim,
/*count*/1);
scb->io_ctx->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
splx(s);
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
panic("ahd_setup_data - Transfer size "
"larger than can device max");
seg.ds_addr =
(bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
ahd_execute_scb(scb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
panic("ahd_setup_data - Physical segment "
"pointers unsupported");
if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
panic("ahd_setup_data - Virtual segment "
"addresses unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
}
} else {
ahd_execute_scb(scb, NULL, 0, 0);
error = bus_dmamap_load_ccb(ahd->buffer_dmat,
scb->dmamap,
(union ccb *)csio,
ahd_execute_scb,
scb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller queue
* until our mapping is returned.
*/
xpt_freeze_simq(sim, /*count*/1);
scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
}
}

View File

@ -1138,6 +1138,7 @@ ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
{
struct hardware_scb *hscb;
struct ccb_hdr *ccb_h;
int error;
hscb = scb->hscb;
ccb_h = &csio->ccb_h;
@ -1179,64 +1180,21 @@ ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim,
}
}
/* Only use S/G if there is a transfer */
if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
/* We've been given a pointer to a single buffer */
if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
int s;
int error;
s = splsoftvm();
error = bus_dmamap_load(ahc->buffer_dmat,
scb->dmamap,
csio->data_ptr,
csio->dxfer_len,
ahc_execute_scb,
scb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(sim,
/*count*/1);
scb->io_ctx->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
splx(s);
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE)
panic("ahc_setup_data - Transfer size "
"larger than can device max");
seg.ds_addr =
(bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
ahc_execute_scb(scb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
panic("ahc_setup_data - Physical segment "
"pointers unsupported");
if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
panic("ahc_setup_data - Virtual segment "
"addresses unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
ahc_execute_scb(scb, segs, csio->sglist_cnt, 0);
}
} else {
ahc_execute_scb(scb, NULL, 0, 0);
error = bus_dmamap_load_ccb(ahc->buffer_dmat,
scb->dmamap,
(union ccb *)csio,
ahc_execute_scb,
scb,
0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(sim, /*count*/1);
scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
}
}

View File

@ -274,12 +274,9 @@ amr_cam_action(struct cam_sim *sim, union ccb *ccb)
* address
*/
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if (ccbh->flags & CAM_DATA_PHYS)
if ((ccbh->flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
/* we can't map it */
ccbh->status = CAM_REQ_INVALID;
if (ccbh->flags & CAM_SCATTER_VALID)
/* we want to do the s/g setup */
ccbh->status = CAM_REQ_INVALID;
}
/*

View File

@ -2341,7 +2341,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb *p
(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 |
(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
/* 4 bytes: Areca io control code */
if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((pccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
buffer = pccb->csio.data_ptr;
transfer_len = pccb->csio.dxfer_len;
} else {
@ -2731,6 +2731,7 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
case XPT_SCSI_IO: {
struct CommandControlBlock *srb;
int target = pccb->ccb_h.target_id;
int error;
if(target == 16) {
/* virtual device for iop message transfer */
@ -2745,52 +2746,13 @@ static void arcmsr_action(struct cam_sim *psim, union ccb *pccb)
pccb->ccb_h.arcmsr_ccbsrb_ptr = srb;
pccb->ccb_h.arcmsr_ccbacb_ptr = acb;
srb->pccb = pccb;
if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
/* Single buffer */
if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
/* Buffer is virtual */
u_int32_t error, s;
s = splsoftvm();
error = bus_dmamap_load(acb->dm_segs_dmat
, srb->dm_segs_dmamap
, pccb->csio.data_ptr
, pccb->csio.dxfer_len
, arcmsr_execute_srb, srb, /*flags*/0);
if(error == EINPROGRESS) {
xpt_freeze_simq(acb->psim, 1);
pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
splx(s);
}
else { /* Buffer is physical */
#ifdef PAE
panic("arcmsr: CAM_DATA_PHYS not supported");
#else
struct bus_dma_segment seg;
seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
seg.ds_len = pccb->csio.dxfer_len;
arcmsr_execute_srb(srb, &seg, 1, 0);
#endif
}
} else {
/* Scatter/gather list */
struct bus_dma_segment *segs;
if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
xpt_done(pccb);
free(srb, M_DEVBUF);
return;
}
segs = (struct bus_dma_segment *)pccb->csio.data_ptr;
arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
}
} else {
arcmsr_execute_srb(srb, NULL, 0, 0);
error = bus_dmamap_load_ccb(acb->dm_segs_dmat
, srb->dm_segs_dmamap
, pccb
, arcmsr_execute_srb, srb, /*flags*/0);
if(error == EINPROGRESS) {
xpt_freeze_simq(acb->psim, 1);
pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
break;
}

View File

@ -304,10 +304,17 @@ ata_dmaload(struct ata_request *request, void *addr, int *entries)
else
dspa.dmatab = request->dma->sg;
if ((error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map,
request->data, request->bytecount,
ch->dma.setprd, &dspa, BUS_DMA_NOWAIT)) ||
(error = dspa.error)) {
#ifdef ATA_CAM
if (request->ccb)
error = bus_dmamap_load_ccb(request->dma->data_tag,
request->dma->data_map, request->ccb,
ch->dma.setprd, &dspa, BUS_DMA_NOWAIT);
else
#endif
error = bus_dmamap_load(request->dma->data_tag, request->dma->data_map,
request->data, request->bytecount,
ch->dma.setprd, &dspa, BUS_DMA_NOWAIT);
if (error || (error = dspa.error)) {
device_printf(request->parent, "FAILURE - load data\n");
goto error;
}

View File

@ -514,12 +514,6 @@ atapi_action(struct cam_sim *sim, union ccb *ccb)
("CAM CCB too long for ATAPI"));
goto action_invalid;
}
if ((ccb_h->flags & CAM_SCATTER_VALID)) {
/* scatter-gather not supported */
xpt_print_path(ccb_h->path);
printf("ATAPI/CAM does not support scatter-gather yet!\n");
goto action_invalid;
}
switch (ccb_h->flags & CAM_DIR_MASK) {
case CAM_DIR_IN:

View File

@ -1158,6 +1158,7 @@ btaction(struct cam_sim *sim, union ccb *ccb)
if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
struct ccb_scsiio *csio;
struct ccb_hdr *ccbh;
int error;
csio = &ccb->csio;
ccbh = &csio->ccb_h;
@ -1205,67 +1206,21 @@ btaction(struct cam_sim *sim, union ccb *ccb)
* If we have any data to send with this command,
* map it into bus space.
*/
/* Only use S/G if there is a transfer */
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer
* to a single buffer.
*/
if ((ccbh->flags & CAM_DATA_PHYS)==0) {
int error;
error = bus_dmamap_load(
bt->buffer_dmat,
bccb->dmamap,
csio->data_ptr,
csio->dxfer_len,
btexecuteccb,
bccb,
/*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain
* ordering, freeze the
* controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(bt->sim,
1);
csio->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
btexecuteccb(bccb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccbh->flags & CAM_DATA_PHYS) != 0)
panic("btaction - Physical "
"segment pointers "
"unsupported");
if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
panic("btaction - Virtual "
"segment addresses "
"unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)
csio->data_ptr;
btexecuteccb(bccb, segs,
csio->sglist_cnt, 0);
}
} else {
btexecuteccb(bccb, NULL, 0, 0);
error = bus_dmamap_load_ccb(
bt->buffer_dmat,
bccb->dmamap,
ccb,
btexecuteccb,
bccb,
/*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the
* controller queue until our mapping is
* returned.
*/
xpt_freeze_simq(bt->sim, 1);
csio->ccb_h.status |= CAM_RELEASE_SIMQ;
}
} else {
hccb->opcode = INITIATOR_BUS_DEV_RESET;

View File

@ -2688,9 +2688,14 @@ ciss_map_request(struct ciss_request *cr)
BUS_DMASYNC_PREWRITE);
if (cr->cr_data != NULL) {
error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap,
cr->cr_data, cr->cr_length,
ciss_request_map_helper, cr, 0);
if (cr->cr_flags & CISS_REQ_CCB)
error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat,
cr->cr_datamap, cr->cr_data,
ciss_request_map_helper, cr, 0);
else
error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap,
cr->cr_data, cr->cr_length,
ciss_request_map_helper, cr, 0);
if (error != 0)
return (error);
} else {
@ -3056,18 +3061,6 @@ ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
/* if there is data transfer, it must be to/from a virtual address */
if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */
debug(3, " data pointer is to physical address");
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */
debug(3, " data has premature s/g setup");
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
}
/* abandon aborted ccbs or those that have failed validation */
if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
debug(3, "abandoning CCB due to abort/validation failure");
@ -3094,7 +3087,7 @@ ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
* Build the command.
*/
cc = cr->cr_cc;
cr->cr_data = csio->data_ptr;
cr->cr_data = csio;
cr->cr_length = csio->dxfer_len;
cr->cr_complete = ciss_cam_complete;
cr->cr_private = csio;
@ -3112,12 +3105,13 @@ ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
cc->cdb.type = CISS_CDB_TYPE_COMMAND;
cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
cr->cr_flags = CISS_REQ_DATAOUT;
cr->cr_flags = CISS_REQ_DATAOUT | CISS_REQ_CCB;
cc->cdb.direction = CISS_CDB_DIRECTION_WRITE;
} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
cr->cr_flags = CISS_REQ_DATAIN;
cr->cr_flags = CISS_REQ_DATAIN | CISS_REQ_CCB;
cc->cdb.direction = CISS_CDB_DIRECTION_READ;
} else {
cr->cr_data = NULL;
cr->cr_flags = 0;
cc->cdb.direction = CISS_CDB_DIRECTION_NONE;
}

View File

@ -116,6 +116,7 @@ struct ciss_request
#define CISS_REQ_DATAOUT (1<<3) /* data host->adapter */
#define CISS_REQ_DATAIN (1<<4) /* data adapter->host */
#define CISS_REQ_BUSY (1<<5) /* controller has req */
#define CISS_REQ_CCB (1<<6) /* data is ccb */
void (* cr_complete)(struct ciss_request *);
void *cr_private;

View File

@ -910,56 +910,22 @@ dpt_action(struct cam_sim *sim, union ccb *ccb)
*/
/* Only use S/G if there is a transfer */
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
int error;
error = bus_dmamap_load_ccb(dpt->buffer_dmat,
dccb->dmamap,
ccb,
dptexecuteccb,
dccb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* We've been given a pointer
* to a single buffer.
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
int error;
error =
bus_dmamap_load(dpt->buffer_dmat,
dccb->dmamap,
csio->data_ptr,
csio->dxfer_len,
dptexecuteccb,
dccb, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(sim, 1);
dccb->state |= CAM_RELEASE_SIMQ;
}
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
dptexecuteccb(dccb, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccbh->flags & CAM_DATA_PHYS) != 0)
panic("dpt_action - Physical "
"segment pointers "
"unsupported");
if ((ccbh->flags&CAM_SG_LIST_PHYS)==0)
panic("dpt_action - Virtual "
"segment addresses "
"unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
dptexecuteccb(dccb, segs, csio->sglist_cnt, 0);
xpt_freeze_simq(sim, 1);
dccb->state |= CAM_RELEASE_SIMQ;
}
} else {
/*

View File

@ -2478,11 +2478,6 @@ END_DEBUG
ocb->orb[4] |= htonl(ORB_CMD_IN);
}
if (csio->ccb_h.flags & CAM_SCATTER_VALID)
printf("sbp: CAM_SCATTER_VALID\n");
if (csio->ccb_h.flags & CAM_DATA_PHYS)
printf("sbp: CAM_DATA_PHYS\n");
if (csio->ccb_h.flags & CAM_CDB_POINTER)
cdb = (void *)csio->cdb_io.cdb_ptr;
else
@ -2496,10 +2491,9 @@ printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntoh
int s, error;
s = splsoftvm();
error = bus_dmamap_load(/*dma tag*/sbp->dmat,
error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat,
/*dma map*/ocb->dmamap,
ccb->csio.data_ptr,
ccb->csio.dxfer_len,
ccb,
sbp_execute_ocb,
ocb,
/*flags*/0);

View File

@ -474,33 +474,6 @@ static void os_cmddone(PCOMMAND pCmd)
static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
{
POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
union ccb *ccb = ext->ccb;
bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
int idx;
if(logical) {
if (ccb->ccb_h.flags & CAM_DATA_PHYS)
panic("physical address unsupported");
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
panic("physical address unsupported");
for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr);
pSg[idx].size = sgList[idx].ds_len;
pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
}
}
else {
os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
pSg->size = ccb->csio.dxfer_len;
pSg->eot = 1;
}
return TRUE;
}
/* since we have provided physical sg, nobody will ask us to build physical sg */
HPT_ASSERT(0);
return FALSE;
@ -515,23 +488,27 @@ static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs
HPT_ASSERT(pCmd->flags.physical_sg);
if (error || nsegs == 0)
if (error)
panic("busdma error");
HPT_ASSERT(nsegs<=os_max_sg_descriptors);
for (idx = 0; idx < nsegs; idx++, psg++) {
psg->addr.bus = segs[idx].ds_addr;
psg->size = segs[idx].ds_len;
psg->eot = 0;
}
psg[-1].eot = 1;
if (nsegs != 0) {
for (idx = 0; idx < nsegs; idx++, psg++) {
psg->addr.bus = segs[idx].ds_addr;
psg->size = segs[idx].ds_len;
psg->eot = 0;
}
psg[-1].eot = 1;
if (pCmd->flags.data_in) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD);
}
else if (pCmd->flags.data_out) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE);
if (pCmd->flags.data_in) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
BUS_DMASYNC_PREREAD);
}
else if (pCmd->flags.data_out) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
BUS_DMASYNC_PREWRITE);
}
}
ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT);
@ -661,6 +638,7 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
case 0x2f:
case 0x8f: /* VERIFY_16 */
{
int error;
pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
if(!pCmd){
KdPrint(("Failed to allocate command!"));
@ -717,42 +695,20 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
pCmd->target = vd;
pCmd->done = os_cmddone;
pCmd->buildsgl = os_buildsgl;
pCmd->psg = ext->psg;
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
int idx;
bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
pCmd->flags.physical_sg = 1;
for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
pCmd->psg[idx].addr.bus = sgList[idx].ds_addr;
pCmd->psg[idx].size = sgList[idx].ds_len;
pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
}
ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT);
ldm_queue_cmd(pCmd);
}
else {
int error;
pCmd->flags.physical_sg = 1;
error = bus_dmamap_load(vbus_ext->io_dmat,
ext->dma_map,
ccb->csio.data_ptr, ccb->csio.dxfer_len,
hpt_io_dmamap_callback, pCmd,
pCmd->flags.physical_sg = 1;
error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
ext->dma_map, ccb,
hpt_io_dmamap_callback, pCmd,
BUS_DMA_WAITOK
);
KdPrint(("bus_dmamap_load return %d", error));
if (error && error!=EINPROGRESS) {
os_printk("bus_dmamap_load error %d", error);
cmdext_put(ext);
ldm_free_cmds(pCmd);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
}
KdPrint(("bus_dmamap_load return %d", error));
if (error && error!=EINPROGRESS) {
os_printk("bus_dmamap_load error %d", error);
cmdext_put(ext);
ldm_free_cmds(pCmd);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
}
return;
}

View File

@ -2358,6 +2358,7 @@ static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
{
struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
struct hpt_iop_srb * srb;
int error;
switch (ccb->ccb_h.func_code) {
@ -2380,52 +2381,22 @@ static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
}
srb->ccb = ccb;
error = bus_dmamap_load_ccb(hba->io_dmat,
srb->dma_map,
ccb,
hptiop_post_scsi_command,
srb,
0);
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
hptiop_post_scsi_command(srb, NULL, 0, 0);
else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
int error;
error = bus_dmamap_load(hba->io_dmat,
srb->dma_map,
ccb->csio.data_ptr,
ccb->csio.dxfer_len,
hptiop_post_scsi_command,
srb, 0);
if (error && error != EINPROGRESS) {
device_printf(hba->pcidev,
"%d bus_dmamap_load error %d",
hba->pciunit, error);
xpt_freeze_simq(hba->sim, 1);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
invalid:
hptiop_free_srb(hba, srb);
xpt_done(ccb);
goto scsi_done;
}
}
else {
device_printf(hba->pcidev,
"CAM_DATA_PHYS not supported");
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
goto invalid;
}
}
else {
struct bus_dma_segment *segs;
if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
(ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
device_printf(hba->pcidev, "SCSI cmd failed");
ccb->ccb_h.status=CAM_PROVIDE_FAIL;
goto invalid;
}
segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
hptiop_post_scsi_command(srb, segs,
ccb->csio.sglist_cnt, 0);
if (error && error != EINPROGRESS) {
device_printf(hba->pcidev,
"%d bus_dmamap_load error %d",
hba->pciunit, error);
xpt_freeze_simq(hba->sim, 1);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
hptiop_free_srb(hba, srb);
xpt_done(ccb);
goto scsi_done;
}
scsi_done:

View File

@ -2620,32 +2620,7 @@ launch_worker_thread(void)
int HPTLIBAPI fOsBuildSgl(_VBUS_ARG PCommand pCmd, FPSCAT_GATH pSg, int logical)
{
union ccb *ccb = (union ccb *)pCmd->pOrgCommand;
bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
int idx;
if(logical) {
if (ccb->ccb_h.flags & CAM_DATA_PHYS)
panic("physical address unsupported");
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
panic("physical address unsupported");
for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
pSg[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr;
pSg[idx].wSgSize = sgList[idx].ds_len;
pSg[idx].wSgFlag = (idx==ccb->csio.sglist_cnt-1)? SG_FLAG_EOT : 0;
}
}
else {
pSg->dSgAddress = (ULONG_PTR)(UCHAR *)ccb->csio.data_ptr;
pSg->wSgSize = ccb->csio.dxfer_len;
pSg->wSgFlag = SG_FLAG_EOT;
}
return TRUE;
}
/* since we have provided physical sg, nobody will ask us to build physical sg */
HPT_ASSERT(0);
return FALSE;
@ -2757,24 +2732,28 @@ hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
HPT_ASSERT(pCmd->cf_physical_sg);
if (error || nsegs == 0)
if (error)
panic("busdma error");
HPT_ASSERT(nsegs<= MAX_SG_DESCRIPTORS);
for (idx = 0; idx < nsegs; idx++, psg++) {
psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr;
psg->wSgSize = segs[idx].ds_len;
psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0;
/* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */
}
/* psg[-1].wSgFlag = SG_FLAG_EOT; */
if (pCmd->cf_data_in) {
bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREREAD);
}
else if (pCmd->cf_data_out) {
bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map, BUS_DMASYNC_PREWRITE);
if (nsegs != 0) {
for (idx = 0; idx < nsegs; idx++, psg++) {
psg->dSgAddress = (ULONG_PTR)(UCHAR *)segs[idx].ds_addr;
psg->wSgSize = segs[idx].ds_len;
psg->wSgFlag = (idx == nsegs-1)? SG_FLAG_EOT: 0;
/* KdPrint(("psg[%d]:add=%p,size=%x,flag=%x\n", idx, psg->dSgAddress,psg->wSgSize,psg->wSgFlag)); */
}
/* psg[-1].wSgFlag = SG_FLAG_EOT; */
if (pCmd->cf_data_in) {
bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map,
BUS_DMASYNC_PREREAD);
}
else if (pCmd->cf_data_out) {
bus_dmamap_sync(pAdapter->io_dma_parent, pmap->dma_map,
BUS_DMASYNC_PREWRITE);
}
}
ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz);
@ -2883,6 +2862,7 @@ OsSendCommand(_VBUS_ARG union ccb *ccb)
UCHAR CdbLength;
_VBUS_INST(pVDev->pVBus)
PCommand pCmd = AllocateCommand(_VBUS_P0);
int error;
HPT_ASSERT(pCmd);
CdbLength = csio->cdb_len;
@ -2960,40 +2940,21 @@ OsSendCommand(_VBUS_ARG union ccb *ccb)
break;
}
/*///////////////////////// */
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
int idx;
bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
pCmd->cf_physical_sg = 1;
for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
pCmd->pSgTable[idx].dSgAddress = (ULONG_PTR)(UCHAR *)sgList[idx].ds_addr;
pCmd->pSgTable[idx].wSgSize = sgList[idx].ds_len;
pCmd->pSgTable[idx].wSgFlag= (idx==ccb->csio.sglist_cnt-1)?SG_FLAG_EOT: 0;
}
ccb->ccb_h.timeout_ch = timeout(hpt_timeout, (caddr_t)ccb, 20*hz);
pVDev->pfnSendCommand(_VBUS_P pCmd);
}
else {
int error;
pCmd->cf_physical_sg = 1;
error = bus_dmamap_load(pAdapter->io_dma_parent,
pmap->dma_map,
ccb->csio.data_ptr, ccb->csio.dxfer_len,
hpt_io_dmamap_callback, pCmd,
BUS_DMA_WAITOK
);
KdPrint(("bus_dmamap_load return %d\n", error));
if (error && error!=EINPROGRESS) {
hpt_printk(("bus_dmamap_load error %d\n", error));
FreeCommand(_VBUS_P pCmd);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
dmamap_put(pmap);
pAdapter->outstandingCommands--;
xpt_done(ccb);
}
pCmd->cf_physical_sg = 1;
error = bus_dmamap_load_ccb(pAdapter->io_dma_parent,
pmap->dma_map,
ccb,
hpt_io_dmamap_callback,
pCmd, BUS_DMA_WAITOK
);
KdPrint(("bus_dmamap_load return %d\n", error));
if (error && error!=EINPROGRESS) {
hpt_printk(("bus_dmamap_load error %d\n", error));
FreeCommand(_VBUS_P pCmd);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
dmamap_put(pmap);
pAdapter->outstandingCommands--;
xpt_done(ccb);
}
goto Command_Complished;
}

View File

@ -481,32 +481,6 @@ static void os_cmddone(PCOMMAND pCmd)
static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
{
POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
union ccb *ccb = ext->ccb;
bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
int idx;
if(logical) {
if (ccb->ccb_h.flags & CAM_DATA_PHYS)
panic("physical address unsupported");
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
panic("physical address unsupported");
for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
os_set_sgptr(&pSg[idx], (HPT_U8 *)(HPT_UPTR)sgList[idx].ds_addr);
pSg[idx].size = sgList[idx].ds_len;
pSg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
}
}
else {
os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
pSg->size = ccb->csio.dxfer_len;
pSg->eot = 1;
}
return TRUE;
}
/* since we have provided physical sg, nobody will ask us to build physical sg */
HPT_ASSERT(0);
@ -522,25 +496,28 @@ static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs
HPT_ASSERT(pCmd->flags.physical_sg);
if (error || nsegs == 0)
if (error)
panic("busdma error");
HPT_ASSERT(nsegs<=os_max_sg_descriptors);
for (idx = 0; idx < nsegs; idx++, psg++) {
psg->addr.bus = segs[idx].ds_addr;
psg->size = segs[idx].ds_len;
psg->eot = 0;
if (nsegs != 0) {
for (idx = 0; idx < nsegs; idx++, psg++) {
psg->addr.bus = segs[idx].ds_addr;
psg->size = segs[idx].ds_len;
psg->eot = 0;
}
psg[-1].eot = 1;
if (pCmd->flags.data_in) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
BUS_DMASYNC_PREREAD);
}
else if (pCmd->flags.data_out) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
BUS_DMASYNC_PREWRITE);
}
}
psg[-1].eot = 1;
if (pCmd->flags.data_in) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREREAD);
}
else if (pCmd->flags.data_out) {
bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_PREWRITE);
}
ext->ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT);
ldm_queue_cmd(pCmd);
}
@ -667,6 +644,8 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
case 0x13:
case 0x2f:
{
int error;
pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
if(!pCmd){
KdPrint(("Failed to allocate command!"));
@ -722,42 +701,21 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
pCmd->target = vd;
pCmd->done = os_cmddone;
pCmd->buildsgl = os_buildsgl;
pCmd->psg = ext->psg;
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
int idx;
bus_dma_segment_t *sgList = (bus_dma_segment_t *)ccb->csio.data_ptr;
if (ccb->ccb_h.flags & CAM_SG_LIST_PHYS)
pCmd->flags.physical_sg = 1;
for (idx = 0; idx < ccb->csio.sglist_cnt; idx++) {
pCmd->psg[idx].addr.bus = sgList[idx].ds_addr;
pCmd->psg[idx].size = sgList[idx].ds_len;
pCmd->psg[idx].eot = (idx==ccb->csio.sglist_cnt-1)? 1 : 0;
}
ccb->ccb_h.timeout_ch = timeout(hpt_timeout, pCmd, HPT_OSM_TIMEOUT);
ldm_queue_cmd(pCmd);
}
else {
int error;
pCmd->flags.physical_sg = 1;
error = bus_dmamap_load(vbus_ext->io_dmat,
ext->dma_map,
ccb->csio.data_ptr, ccb->csio.dxfer_len,
hpt_io_dmamap_callback, pCmd,
BUS_DMA_WAITOK
pCmd->flags.physical_sg = 1;
error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
ext->dma_map,
ccb,
hpt_io_dmamap_callback, pCmd,
BUS_DMA_WAITOK
);
KdPrint(("bus_dmamap_load return %d", error));
if (error && error!=EINPROGRESS) {
os_printk("bus_dmamap_load error %d", error);
cmdext_put(ext);
ldm_free_cmds(pCmd);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
}
KdPrint(("bus_dmamap_load return %d", error));
if (error && error!=EINPROGRESS) {
os_printk("bus_dmamap_load error %d", error);
cmdext_put(ext);
ldm_free_cmds(pCmd);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
xpt_done(ccb);
}
return;
}

View File

@ -794,6 +794,7 @@ gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
{
struct gdt_ccb *gccb;
struct cam_sim *sim;
int error;
GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
@ -844,51 +845,14 @@ gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
gccb->gc_scratch_busbase);
/*
* If we have any data to send with this command,
* map it into bus space.
*/
/* Only use S/G if there is a transfer */
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
int s;
int error;
/* vorher unlock von splcam() ??? */
s = splsoftvm();
error =
bus_dmamap_load(gdt->sc_buffer_dmat,
gccb->gc_dmamap,
ccb->csio.data_ptr,
ccb->csio.dxfer_len,
gdtexecuteccb,
gccb, /*flags*/0);
if (error == EINPROGRESS) {
xpt_freeze_simq(sim, 1);
gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
splx(s);
} else {
panic("iir: CAM_DATA_PHYS not supported");
}
} else {
struct bus_dma_segment *segs;
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
panic("iir%d: iir_action - Physical "
"segment pointers unsupported", gdt->sc_hanum);
if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
panic("iir%d: iir_action - Virtual "
"segment addresses unsupported", gdt->sc_hanum);
/* Just use the segments provided */
segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
}
} else {
gdtexecuteccb(gccb, NULL, 0, 0);
error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
gccb->gc_dmamap,
ccb,
gdtexecuteccb,
gccb, /*flags*/0);
if (error == EINPROGRESS) {
xpt_freeze_simq(sim, 1);
gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
*lock = splcam();
@ -903,6 +867,7 @@ gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
u_int8_t *cmdp;
u_int16_t opcode;
u_int32_t blockno, blockcnt;
int error;
GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
@ -953,49 +918,15 @@ gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
gdt_enc32(gccb->gc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
blockcnt);
/*
* If we have any data to send with this command,
* map it into bus space.
*/
/* Only use S/G if there is a transfer */
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
int s;
int error;
/* vorher unlock von splcam() ??? */
s = splsoftvm();
error =
bus_dmamap_load(gdt->sc_buffer_dmat,
error = bus_dmamap_load_ccb(gdt->sc_buffer_dmat,
gccb->gc_dmamap,
ccb->csio.data_ptr,
ccb->csio.dxfer_len,
ccb,
gdtexecuteccb,
gccb, /*flags*/0);
if (error == EINPROGRESS) {
xpt_freeze_simq(sim, 1);
gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
splx(s);
} else {
panic("iir: CAM_DATA_PHYS not supported");
}
} else {
struct bus_dma_segment *segs;
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
panic("iir%d: iir_action - Physical "
"segment pointers unsupported", gdt->sc_hanum);
if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
panic("iir%d: iir_action - Virtual "
"segment addresses unsupported", gdt->sc_hanum);
/* Just use the segments provided */
segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
if (error == EINPROGRESS) {
xpt_freeze_simq(sim, 1);
gccb->gc_ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
*lock = splcam();
return (gccb);
}

View File

@ -713,7 +713,6 @@ void
isci_io_request_execute_scsi_io(union ccb *ccb,
struct ISCI_CONTROLLER *controller)
{
struct ccb_scsiio *csio = &ccb->csio;
target_id_t target_id = ccb->ccb_h.target_id;
struct ISCI_REQUEST *request;
struct ISCI_IO_REQUEST *io_request;
@ -748,29 +747,21 @@ isci_io_request_execute_scsi_io(union ccb *ccb,
io_request->current_sge_index = 0;
io_request->parent.remote_device_handle = device->sci_object;
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) != 0)
panic("Unexpected CAM_SCATTER_VALID flag! flags = 0x%x\n",
if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
panic("Unexpected cam data format! flags = 0x%x\n",
ccb->ccb_h.flags);
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
panic("Unexpected CAM_DATA_PHYS flag! flags = 0x%x\n",
ccb->ccb_h.flags);
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
error = bus_dmamap_load(io_request->parent.dma_tag,
io_request->parent.dma_map, csio->data_ptr, csio->dxfer_len,
isci_io_request_construct, io_request, 0x0);
/* A resource shortage from BUSDMA will be automatically
* continued at a later point, pushing the CCB processing
* forward, which will in turn unfreeze the simq.
*/
if (error == EINPROGRESS) {
xpt_freeze_simq(controller->sim, 1);
ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
}
} else
isci_io_request_construct(io_request, NULL, 0, 0);
error = bus_dmamap_load_ccb(io_request->parent.dma_tag,
io_request->parent.dma_map, ccb,
isci_io_request_construct, io_request, 0x0);
/* A resource shortage from BUSDMA will be automatically
* continued at a later point, pushing the CCB processing
* forward, which will in turn unfreeze the simq.
*/
if (error == EINPROGRESS) {
xpt_freeze_simq(controller->sim, 1);
ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
}
}
void

View File

@ -1922,6 +1922,7 @@ isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
mush_t mush, *mp;
void (*eptr)(void *, bus_dma_segment_t *, int, int);
void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int);
int error;
mp = &mush;
mp->isp = isp;
@ -1942,70 +1943,17 @@ isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
}
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) {
(*eptr)(mp, NULL, 0, 0);
} else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
int error;
error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
#if 0
xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error);
#endif
if (error == EINPROGRESS) {
bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
mp->error = EINVAL;
isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
} else if (error && mp->error == 0) {
error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
(union ccb *)csio, eptr, mp, 0);
if (error == EINPROGRESS) {
bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
mp->error = EINVAL;
isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
} else if (error && mp->error == 0) {
#ifdef DIAGNOSTIC
isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
#endif
mp->error = error;
}
} else {
/* Pointer to physical buffer */
struct bus_dma_segment seg;
seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
(*eptr)(mp, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported");
mp->error = EINVAL;
} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
struct uio sguio;
int error;
/*
* We're taking advantage of the fact that
* the pointer/length sizes and layout of the iovec
* structure are the same as the bus_dma_segment
* structure. This might be a little dangerous,
* but only if they change the structures, which
* seems unlikely.
*/
KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) &&
sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) &&
sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed"));
sguio.uio_iov = (struct iovec *)csio->data_ptr;
sguio.uio_iovcnt = csio->sglist_cnt;
sguio.uio_resid = csio->dxfer_len;
sguio.uio_segflg = UIO_SYSSPACE;
error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0);
if (error != 0 && mp->error == 0) {
isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
mp->error = error;
}
} else {
/* Just use the segments provided */
segs = (struct bus_dma_segment *) csio->data_ptr;
(*eptr)(mp, segs, csio->sglist_cnt, 0);
}
mp->error = error;
}
if (mp->error) {
int retval = CMD_COMPLETE;

View File

@ -635,6 +635,7 @@ isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
{
mush_t mush, *mp;
void (*eptr)(void *, bus_dma_segment_t *, int, int);
int error;
mp = &mush;
mp->isp = isp;
@ -645,47 +646,18 @@ isp_sbus_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
eptr = dma2;
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) {
(*eptr)(mp, NULL, 0, 0);
} else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
int error;
error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
#if 0
xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error);
#endif
if (error == EINPROGRESS) {
bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
mp->error = EINVAL;
isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
} else if (error && mp->error == 0) {
error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat,
PISP_PCMD(csio)->dmap, (union ccb *)csio, eptr, mp, 0);
if (error == EINPROGRESS) {
bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
mp->error = EINVAL;
isp_prt(isp, ISP_LOGERR,
"deferred dma allocation not supported");
} else if (error && mp->error == 0) {
#ifdef DIAGNOSTIC
isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
#endif
mp->error = error;
}
} else {
/* Pointer to physical buffer */
struct bus_dma_segment seg;
seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
(*eptr)(mp, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported");
mp->error = EINVAL;
} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
isp_prt(isp, ISP_LOGERR, "Physical SG/LIST Phys segment pointers unsupported");
mp->error = EINVAL;
} else {
/* Just use the segments provided */
segs = (struct bus_dma_segment *) csio->data_ptr;
(*eptr)(mp, segs, csio->sglist_cnt, 0);
}
mp->error = error;
}
if (mp->error) {
int retval = CMD_COMPLETE;

View File

@ -2267,8 +2267,14 @@ mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
if (cm->cm_flags & MFI_CMD_CCB)
error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
polled);
else
error = bus_dmamap_load(sc->mfi_buffer_dmat,
cm->cm_dmamap, cm->cm_data, cm->cm_len,
mfi_data_cb, cm, polled);
if (error == EINPROGRESS) {
sc->mfi_flags |= MFI_FLAGS_QFRZN;
return (0);

View File

@ -265,17 +265,6 @@ mfip_cam_action(struct cam_sim *sim, union ccb *ccb)
ccbh->status = CAM_REQ_INVALID;
break;
}
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if (ccbh->flags & CAM_DATA_PHYS) {
ccbh->status = CAM_REQ_INVALID;
break;
}
if (ccbh->flags & CAM_SCATTER_VALID) {
ccbh->status = CAM_REQ_INVALID;
break;
}
}
ccbh->ccb_mfip_ptr = sc;
TAILQ_INSERT_TAIL(&mfisc->mfi_cam_ccbq, ccbh, sim_links.tqe);
mfi_startio(mfisc);
@ -380,14 +369,14 @@ mfip_start(void *data)
cm->cm_private = ccb;
cm->cm_sg = &pt->sgl;
cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
cm->cm_data = csio->data_ptr;
cm->cm_data = ccb;
cm->cm_len = csio->dxfer_len;
switch (ccbh->flags & CAM_DIR_MASK) {
case CAM_DIR_IN:
cm->cm_flags = MFI_CMD_DATAIN;
cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_CCB;
break;
case CAM_DIR_OUT:
cm->cm_flags = MFI_CMD_DATAOUT;
cm->cm_flags = MFI_CMD_DATAOUT | MFI_CMD_CCB;
break;
case CAM_DIR_NONE:
default:

View File

@ -107,6 +107,7 @@ struct mfi_command {
#define MFI_ON_MFIQ_BUSY (1<<7)
#define MFI_ON_MFIQ_MASK ((1<<5)|(1<<6)|(1<<7))
#define MFI_CMD_SCSI (1<<8)
#define MFI_CMD_CCB (1<<9)
uint8_t retry_for_fw_reset;
void (* cm_complete)(struct mfi_command *cm);
void *cm_private;

View File

@ -1864,9 +1864,13 @@ mly_map_command(struct mly_command *mc)
/* does the command have a data buffer? */
if (mc->mc_data != NULL) {
bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
mly_map_command_sg, mc, 0);
if (mc->mc_flags & MLY_CMD_CCB)
bus_dmamap_load_ccb(sc->mly_buffer_dmat, mc->mc_datamap,
mc->mc_data, mly_map_command_sg, mc, 0);
else
bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap,
mc->mc_data, mc->mc_length,
mly_map_command_sg, mc, 0);
if (mc->mc_flags & MLY_CMD_DATAIN)
bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
if (mc->mc_flags & MLY_CMD_DATAOUT)
@ -2220,18 +2224,6 @@ mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
/* if there is data transfer, it must be to/from a virtual address */
if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if (csio->ccb_h.flags & CAM_DATA_PHYS) { /* we can't map it */
debug(0, " data pointer is to physical address");
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
if (csio->ccb_h.flags & CAM_SCATTER_VALID) { /* we want to do the s/g setup */
debug(0, " data has premature s/g setup");
csio->ccb_h.status = CAM_REQ_CMP_ERR;
}
}
/* abandon aborted ccbs or those that have failed validation */
if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
debug(2, "abandoning CCB due to abort/validation failure");
@ -2251,10 +2243,12 @@ mly_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio)
}
/* build the command */
mc->mc_data = csio->data_ptr;
mc->mc_data = csio;
mc->mc_length = csio->dxfer_len;
mc->mc_complete = mly_cam_complete;
mc->mc_private = csio;
mc->mc_flags |= MLY_CMD_CCB;
/* XXX This code doesn't set the data direction in mc_flags. */
/* save the bus number in the ccb for later recovery XXX should be a better way */
csio->ccb_h.sim_priv.entries[0].field = bus;

View File

@ -126,6 +126,7 @@ struct mly_command {
#define MLY_CMD_MAPPED (1<<3) /* command has had its data mapped */
#define MLY_CMD_DATAIN (1<<4) /* data moves controller->system */
#define MLY_CMD_DATAOUT (1<<5) /* data moves system->controller */
#define MLY_CMD_CCB (1<<6) /* data is ccb. */
u_int16_t mc_status; /* command completion status */
u_int8_t mc_sense; /* sense data length */
int32_t mc_resid; /* I/O residual count */

View File

@ -2278,6 +2278,9 @@ mps_map_command(struct mps_softc *sc, struct mps_command *cm)
if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
&cm->cm_uio, mps_data_cb2, cm, 0);
} else if (cm->cm_flags & MPS_CM_FLAGS_USE_CCB) {
error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
cm->cm_data, mps_data_cb, cm, 0);
} else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);

View File

@ -1755,8 +1755,13 @@ mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
}
}
cm->cm_data = csio->data_ptr;
cm->cm_length = csio->dxfer_len;
if (cm->cm_length != 0) {
cm->cm_data = ccb;
cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
} else {
cm->cm_data = NULL;
}
cm->cm_sge = &req->SGL;
cm->cm_sglsize = (32 - 24) * 4;
cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
@ -2691,19 +2696,15 @@ mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
/*
* XXX We don't yet support physical addresses here.
*/
if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
case CAM_DATA_PADDR:
case CAM_DATA_SG_PADDR:
mps_printf(sc, "%s: physical addresses not supported\n",
__func__);
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
return;
}
/*
* If the user wants to send an S/G list, check to make sure they
* have single buffers.
*/
if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
case CAM_DATA_SG:
/*
* The chip does not support more than one buffer for the
* request or response.
@ -2741,9 +2742,15 @@ mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
} else
response = ccb->smpio.smp_response;
} else {
break;
case CAM_DATA_VADDR:
request = ccb->smpio.smp_request;
response = ccb->smpio.smp_response;
break;
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
return;
}
cm = mps_alloc_command(sc);

View File

@ -231,6 +231,7 @@ struct mps_command {
#define MPS_CM_FLAGS_SMP_PASS (1 << 8)
#define MPS_CM_FLAGS_CHAIN_FAILED (1 << 9)
#define MPS_CM_FLAGS_ERROR_MASK MPS_CM_FLAGS_CHAIN_FAILED
#define MPS_CM_FLAGS_USE_CCB (1 << 10)
u_int cm_state;
#define MPS_CM_STATE_FREE 0
#define MPS_CM_STATE_BUSY 1

View File

@ -1382,7 +1382,7 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
}
}
if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if (istgt == 0) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
@ -1623,7 +1623,7 @@ mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
mpt_prt(mpt,
"mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
ccb->ccb_h.status & CAM_STATUS_MASK);
if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
if (nseg) {
bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
@ -1785,7 +1785,7 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
}
}
if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if (istgt) {
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
@ -2010,7 +2010,7 @@ mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
mpt_prt(mpt,
"mpt_execute_req: I/O cancelled (status 0x%x)\n",
ccb->ccb_h.status & CAM_STATUS_MASK);
if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
if (nseg) {
bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
}
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
@ -2062,6 +2062,7 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
bus_dmamap_callback_t *cb;
target_id_t tgt;
int raid_passthru;
int error;
/* Get the pointer for the physical addapter */
mpt = ccb->ccb_h.ccb_mpt_ptr;
@ -2206,64 +2207,15 @@ mpt_start(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.target_lun, req, req->serno);
}
/*
* If we have any data to send with this command map it into bus space.
*/
if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer to a single buffer.
*/
if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
/*
* Virtual address that needs to translated into
* one or more physical address ranges.
*/
int error;
int s = splsoftvm();
error = bus_dmamap_load(mpt->buffer_dmat,
req->dmap, csio->data_ptr, csio->dxfer_len,
cb, req, 0);
splx(s);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(mpt->sim, 1);
ccbh->status |= CAM_RELEASE_SIMQ;
}
} else {
/*
* We have been given a pointer to single
* physical buffer.
*/
struct bus_dma_segment seg;
seg.ds_addr =
(bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
(*cb)(req, &seg, 1, 0);
}
} else {
/*
* We have been given a list of addresses.
* This case could be easily supported but they are not
* currently generated by the CAM subsystem so there
* is no point in wasting the time right now.
*/
struct bus_dma_segment *segs;
if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
(*cb)(req, NULL, 0, EFAULT);
} else {
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
(*cb)(req, segs, csio->sglist_cnt, 0);
}
}
} else {
(*cb)(req, NULL, 0, 0);
error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
req, 0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering, freeze the controller queue
* until our mapping is returned.
*/
xpt_freeze_simq(mpt->sim, 1);
ccbh->status |= CAM_RELEASE_SIMQ;
}
}
@ -4458,6 +4410,7 @@ mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
bus_dmamap_callback_t *cb;
PTR_MSG_TARGET_ASSIST_REQUEST ta;
request_t *req;
int error;
KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
("dxfer_len %u but direction is NONE", csio->dxfer_len));
@ -4544,44 +4497,11 @@ mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
"nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
int error;
int s = splsoftvm();
error = bus_dmamap_load(mpt->buffer_dmat,
req->dmap, csio->data_ptr, csio->dxfer_len,
cb, req, 0);
splx(s);
if (error == EINPROGRESS) {
xpt_freeze_simq(mpt->sim, 1);
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
} else {
/*
* We have been given a pointer to single
* physical buffer.
*/
struct bus_dma_segment seg;
seg.ds_addr = (bus_addr_t)
(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
(*cb)(req, &seg, 1, 0);
}
} else {
/*
* We have been given a list of addresses.
* This case could be easily supported but they are not
* currently generated by the CAM subsystem so there
* is no point in wasting the time right now.
*/
struct bus_dma_segment *sgs;
if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
(*cb)(req, NULL, 0, EFAULT);
} else {
/* Just use the segments provided */
sgs = (struct bus_dma_segment *)csio->data_ptr;
(*cb)(req, sgs, csio->sglist_cnt, 0);
}
error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
cb, req, 0);
if (error == EINPROGRESS) {
xpt_freeze_simq(mpt->sim, 1);
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
} else {
uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];

View File

@ -1260,19 +1260,9 @@ mvs_begin_transaction(device_t dev, union ccb *ccb)
mvs_set_edma_mode(dev, MVS_EDMA_OFF);
}
if (ch->numpslots == 0 || ch->basic_dma) {
void *buf;
bus_size_t size;
slot->state = MVS_SLOT_LOADING;
if (ccb->ccb_h.func_code == XPT_ATA_IO) {
buf = ccb->ataio.data_ptr;
size = ccb->ataio.dxfer_len;
} else {
buf = ccb->csio.data_ptr;
size = ccb->csio.dxfer_len;
}
bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map,
buf, size, mvs_dmasetprd, slot, 0);
bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map,
ccb, mvs_dmasetprd, slot, 0);
} else
mvs_legacy_execute_transaction(slot);
}

View File

@ -996,19 +996,9 @@ siis_begin_transaction(device_t dev, union ccb *ccb)
slot->dma.nsegs = 0;
/* If request moves data, setup and load SG list */
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
void *buf;
bus_size_t size;
slot->state = SIIS_SLOT_LOADING;
if (ccb->ccb_h.func_code == XPT_ATA_IO) {
buf = ccb->ataio.data_ptr;
size = ccb->ataio.dxfer_len;
} else {
buf = ccb->csio.data_ptr;
size = ccb->csio.dxfer_len;
}
bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map,
buf, size, siis_dmasetprd, slot, 0);
bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map,
ccb, siis_dmasetprd, slot, 0);
} else
siis_execute_transaction(slot);
}
@ -1032,24 +1022,26 @@ siis_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
return;
}
KASSERT(nsegs <= SIIS_SG_ENTRIES, ("too many DMA segment entries\n"));
/* Get a piece of the workspace for this request */
ctp = (struct siis_cmd *)
(ch->dma.work + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot));
/* Fill S/G table */
if (slot->ccb->ccb_h.func_code == XPT_ATA_IO)
prd = &ctp->u.ata.prd[0];
else
prd = &ctp->u.atapi.prd[0];
for (i = 0; i < nsegs; i++) {
prd[i].dba = htole64(segs[i].ds_addr);
prd[i].dbc = htole32(segs[i].ds_len);
prd[i].control = 0;
}
prd[nsegs - 1].control = htole32(SIIS_PRD_TRM);
slot->dma.nsegs = nsegs;
bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
if (nsegs != 0) {
/* Get a piece of the workspace for this request */
ctp = (struct siis_cmd *)(ch->dma.work + SIIS_CT_OFFSET +
(SIIS_CT_SIZE * slot->slot));
/* Fill S/G table */
if (slot->ccb->ccb_h.func_code == XPT_ATA_IO)
prd = &ctp->u.ata.prd[0];
else
prd = &ctp->u.atapi.prd[0];
for (i = 0; i < nsegs; i++) {
prd[i].dba = htole64(segs[i].ds_addr);
prd[i].dbc = htole32(segs[i].ds_len);
prd[i].control = 0;
}
prd[nsegs - 1].control = htole32(SIIS_PRD_TRM);
bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
}
siis_execute_transaction(slot);
}

View File

@ -7877,51 +7877,15 @@ sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp)
return;
}
if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
/* Single buffer */
if (!(ccb_h->flags & CAM_DATA_PHYS)) {
/* Buffer is virtual */
cp->dmamapped = (dir == CAM_DIR_IN) ?
SYM_DMA_READ : SYM_DMA_WRITE;
retv = bus_dmamap_load(np->data_dmat, cp->dmamap,
csio->data_ptr, csio->dxfer_len,
sym_execute_ccb, cp, 0);
if (retv == EINPROGRESS) {
cp->host_status = HS_WAIT;
xpt_freeze_simq(np->sim, 1);
csio->ccb_h.status |= CAM_RELEASE_SIMQ;
}
} else {
/* Buffer is physical */
struct bus_dma_segment seg;
seg.ds_addr = (bus_addr_t) csio->data_ptr;
sym_execute_ccb(cp, &seg, 1, 0);
}
} else {
/* Scatter/gather list */
struct bus_dma_segment *segs;
if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) {
/* The SG list pointer is physical */
sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
goto out_abort;
}
if (!(ccb_h->flags & CAM_DATA_PHYS)) {
/* SG buffer pointers are virtual */
sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID);
goto out_abort;
}
/* SG buffer pointers are physical */
segs = (struct bus_dma_segment *)csio->data_ptr;
sym_execute_ccb(cp, segs, csio->sglist_cnt, 0);
cp->dmamapped = (dir == CAM_DIR_IN) ? SYM_DMA_READ : SYM_DMA_WRITE;
retv = bus_dmamap_load_ccb(np->data_dmat, cp->dmamap,
(union ccb *)csio, sym_execute_ccb, cp, 0);
if (retv == EINPROGRESS) {
cp->host_status = HS_WAIT;
xpt_freeze_simq(np->sim, 1);
csio->ccb_h.status |= CAM_RELEASE_SIMQ;
}
return;
out_abort:
sym_xpt_done(np, (union ccb *) csio, cp);
sym_free_ccb(np, cp);
}
/*

View File

@ -559,6 +559,7 @@ trm_action(struct cam_sim *psim, union ccb *pccb)
PDCB pDCB = NULL;
PSRB pSRB;
struct ccb_scsiio *pcsio;
int error;
pcsio = &pccb->csio;
TRM_DPRINTF(" XPT_SCSI_IO \n");
@ -614,71 +615,18 @@ trm_action(struct cam_sim *psim, union ccb *pccb)
} else
bcopy(pcsio->cdb_io.cdb_bytes,
pSRB->CmdBlock, pcsio->cdb_len);
if ((pccb->ccb_h.flags & CAM_DIR_MASK)
!= CAM_DIR_NONE) {
if ((pccb->ccb_h.flags &
CAM_SCATTER_VALID) == 0) {
if ((pccb->ccb_h.flags
& CAM_DATA_PHYS) == 0) {
int vmflags;
int error;
vmflags = splsoftvm();
error = bus_dmamap_load(
pACB->buffer_dmat,
error = bus_dmamap_load_ccb(pACB->buffer_dmat,
pSRB->dmamap,
pcsio->data_ptr,
pcsio->dxfer_len,
pccb,
trm_ExecuteSRB,
pSRB,
0);
if (error == EINPROGRESS) {
xpt_freeze_simq(
pACB->psim,
1);
pccb->ccb_h.status |=
CAM_RELEASE_SIMQ;
}
splx(vmflags);
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)pcsio->data_ptr;
seg.ds_len = pcsio->dxfer_len;
trm_ExecuteSRB(pSRB, &seg, 1,
0);
}
} else {
/* CAM_SCATTER_VALID */
struct bus_dma_segment *segs;
if ((pccb->ccb_h.flags &
CAM_SG_LIST_PHYS) == 0 ||
(pccb->ccb_h.flags
& CAM_DATA_PHYS) != 0) {
pSRB->pNextSRB = pACB->pFreeSRB;
pACB->pFreeSRB = pSRB;
pccb->ccb_h.status =
CAM_PROVIDE_FAIL;
xpt_done(pccb);
splx(actionflags);
return;
}
/* cam SG list is physical,
* cam data is virtual
*/
segs = (struct bus_dma_segment *)
pcsio->data_ptr;
trm_ExecuteSRB(pSRB, segs,
pcsio->sglist_cnt, 1);
} /* CAM_SCATTER_VALID */
} else
trm_ExecuteSRB(pSRB, NULL, 0, 0);
}
if (error == EINPROGRESS) {
xpt_freeze_simq(pACB->psim, 1);
pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
break;
}
case XPT_GDEV_TYPE:
TRM_DPRINTF(" XPT_GDEV_TYPE \n");
pccb->ccb_h.status = CAM_REQ_INVALID;

View File

@ -72,6 +72,7 @@
#define TW_OSLI_REQ_FLAGS_PASSTHRU (1<<5) /* pass through request */
#define TW_OSLI_REQ_FLAGS_SLEEPING (1<<6) /* owner sleeping on this cmd */
#define TW_OSLI_REQ_FLAGS_FAILED (1<<7) /* bus_dmamap_load() failed */
#define TW_OSLI_REQ_FLAGS_CCB (1<<8) /* req is ccb. */
#ifdef TW_OSL_DEBUG

View File

@ -261,55 +261,23 @@ tw_osli_execute_scsi(struct tw_osli_req_context *req, union ccb *ccb)
scsi_req->cdb = csio->cdb_io.cdb_bytes;
scsi_req->cdb_len = csio->cdb_len;
if (!(ccb_h->flags & CAM_DATA_PHYS)) {
/* Virtual data addresses. Need to convert them... */
tw_osli_dbg_dprintf(3, sc,
"XPT_SCSI_IO: Single virtual address!");
if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) {
tw_osli_printf(sc, "size = %d",
TW_CL_SEVERITY_ERROR_STRING,
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
0x2106,
"I/O size too big",
csio->dxfer_len);
ccb_h->status = CAM_REQ_TOO_BIG;
ccb_h->status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
return(1);
}
if ((req->length = csio->dxfer_len)) {
req->data = csio->data_ptr;
scsi_req->sgl_entries = 1;
}
} else {
tw_osli_printf(sc, "",
TW_CL_SEVERITY_ERROR_STRING,
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
0x2107,
"XPT_SCSI_IO: Got SGList");
ccb_h->status = CAM_REQ_INVALID;
ccb_h->status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
return(1);
}
} else {
/* Data addresses are physical. */
tw_osli_printf(sc, "",
if (csio->dxfer_len > TW_CL_MAX_IO_SIZE) {
tw_osli_printf(sc, "size = %d",
TW_CL_SEVERITY_ERROR_STRING,
TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER,
0x2108,
"XPT_SCSI_IO: Physical data addresses");
ccb_h->status = CAM_REQ_INVALID;
0x2106,
"I/O size too big",
csio->dxfer_len);
ccb_h->status = CAM_REQ_TOO_BIG;
ccb_h->status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
return(1);
}
req->data = ccb;
req->length = csio->dxfer_len;
req->flags |= TW_OSLI_REQ_FLAGS_CCB;
req->deadline = tw_osl_get_local_time() + (ccb_h->timeout / 1000);
/*
* twa_map_load_data_callback will fill in the SGL,
* and submit the I/O.

View File

@ -1473,6 +1473,10 @@ tw_osli_map_request(struct tw_osli_req_context *req)
twa_map_load_data_callback, req,
BUS_DMA_WAITOK);
mtx_unlock_spin(sc->io_lock);
} else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) {
error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map,
req->data, twa_map_load_data_callback, req,
BUS_DMA_WAITOK);
} else {
/*
* There's only one CAM I/O thread running at a time.

View File

@ -137,6 +137,7 @@ enum tws_req_flags {
TWS_DIR_IN = 0x2,
TWS_DIR_OUT = 0x4,
TWS_DIR_NONE = 0x8,
TWS_DATA_CCB = 0x16,
};
enum tws_intrs {

View File

@ -739,39 +739,8 @@ tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
else
bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
if (!(ccb_h->flags & CAM_DATA_PHYS)) {
/* Virtual data addresses. Need to convert them... */
if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
tws_release_request(req);
ccb_h->status = CAM_REQ_TOO_BIG;
xpt_done(ccb);
return(0);
}
req->length = csio->dxfer_len;
if (req->length) {
req->data = csio->data_ptr;
/* there is 1 sgl_entrie */
/* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
}
} else {
TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
tws_release_request(req);
ccb_h->status = CAM_REQ_INVALID;
xpt_done(ccb);
return(0);
}
} else {
/* Data addresses are physical. */
TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
tws_release_request(req);
ccb_h->status = CAM_REQ_INVALID;
ccb_h->status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
return(0);
}
req->data = ccb;
req->flags |= TWS_DATA_CCB;
/* save ccb ptr */
req->ccb_ptr = ccb;
/*
@ -961,10 +930,16 @@ tws_map_request(struct tws_softc *sc, struct tws_request *req)
* Map the data buffer into bus space and build the SG list.
*/
mtx_lock(&sc->io_lock);
error = bus_dmamap_load(sc->data_tag, req->dma_map,
req->data, req->length,
tws_dmamap_data_load_cbfn, req,
my_flags);
if (req->flags & TWS_DATA_CCB)
error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
req->data,
tws_dmamap_data_load_cbfn, req,
my_flags);
else
error = bus_dmamap_load(sc->data_tag, req->dma_map,
req->data, req->length,
tws_dmamap_data_load_cbfn, req,
my_flags);
mtx_unlock(&sc->io_lock);
if (error == EINPROGRESS) {

View File

@ -961,28 +961,31 @@ vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
ccbh = &csio->ccb_h;
error = 0;
if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
if ((ccbh->flags & CAM_DATA_PHYS) == 0)
error = sglist_append(sg,
csio->data_ptr, csio->dxfer_len);
else
error = sglist_append_phys(sg,
(vm_paddr_t)(vm_offset_t) csio->data_ptr,
csio->dxfer_len);
} else {
switch ((ccbh->flags & CAM_DATA_MASK)) {
case CAM_DATA_VADDR:
error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
break;
case CAM_DATA_PADDR:
error = sglist_append_phys(sg,
(vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
break;
case CAM_DATA_SG:
for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0)
error = sglist_append(sg,
(void *)(vm_offset_t) dseg->ds_addr,
dseg->ds_len);
else
error = sglist_append_phys(sg,
(vm_paddr_t) dseg->ds_addr, dseg->ds_len);
error = sglist_append(sg,
(void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
}
break;
case CAM_DATA_SG_PADDR:
for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
error = sglist_append_phys(sg,
(vm_paddr_t) dseg->ds_addr, dseg->ds_len);
}
break;
default:
error = EINVAL;
break;
}
return (error);

View File

@ -1066,7 +1066,7 @@ wds_scsi_io(struct cam_sim * sim, struct ccb_scsiio * csio)
xpt_done((union ccb *) csio);
return;
}
if (ccb_h->flags & (CAM_CDB_PHYS | CAM_SCATTER_VALID | CAM_DATA_PHYS)) {
if ((ccb_h->flags & CAM_DATA_MASK) != CAM_DATA_VADDR) {
/* don't support these */
ccb_h->status = CAM_REQ_INVALID;
xpt_done((union ccb *) csio);

View File

@ -31,14 +31,14 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/memdesc.h>
#include <sys/proc.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@ -73,6 +73,7 @@ struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
@ -107,8 +108,7 @@ struct bus_dmamap {
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
struct memdesc mem;
bus_dmamap_callback_t *callback;
void *callback_arg;
STAILQ_ENTRY(bus_dmamap) links;
@ -123,7 +123,7 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
bus_size_t len);
@ -480,37 +480,44 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
}
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct thread *td, int flags, bus_addr_t *lastaddrp,
bus_dma_segment_t *segs, int *segp, int first)
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr;
bus_addr_t paddr;
int seg;
pmap_t pmap;
if (map == NULL)
map = &nobounce_dmamap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
dmat->alignment > 1) && map != &nobounce_dmamap &&
map->pagesneeded == 0) {
vm_offset_t vendaddr;
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr, 0) != 0) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
vm_offset_t vaddr;
vm_offset_t vendaddr;
bus_addr_t paddr;
if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
dmat->alignment > 1) && map != &nobounce_dmamap &&
map->pagesneeded == 0) {
/*
* Count the number of bounce pages
* needed in order to complete this transfer
@ -519,52 +526,177 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
if (pmap != NULL)
paddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (run_filter(dmat, paddr, 0) != 0)
map->pagesneeded++;
vaddr += PAGE_SIZE;
}
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
/* Reserve Necessary Bounce Pages */
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int error;
if (map == NULL)
map = &nobounce_dmamap;
if (segs == NULL)
segs = dmat->segments;
if (map != &nobounce_dmamap) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (map->pagesneeded != 0 &&
run_filter(dmat, curaddr, sgsize)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, pmap_t pmap, int flags,
bus_dma_segment_t *segs, int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
vm_offset_t vaddr;
int error;
if (map == NULL)
map = &nobounce_dmamap;
if (segs == NULL)
segs = dmat->segments;
if (map != &nobounce_dmamap) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
vaddr = (vm_offset_t)buf;
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
map->dmat = dmat;
map->buf = buf;
map->buflen = buflen;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
}
mtx_unlock(&bounce_lock);
}
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
curaddr = pmap_kextract(vaddr);
else
curaddr = pmap_extract(pmap, vaddr);
/*
* Compute the segment size, and adjust counts.
@ -575,211 +707,46 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
{
bus_addr_t lastaddr = 0;
int error, nsegs = 0;
void
__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
if (map != NULL) {
flags |= BUS_DMA_WAITOK;
map->dmat = dmat;
map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
}
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
&lastaddr, dmat->segments, &nsegs, 1);
if (error == EINPROGRESS)
return (error);
if (error)
(*callback)(callback_arg, dmat->segments, 0, error);
else
(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
return (0);
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
int nsegs, error;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len, NULL, flags,
&lastaddr, dmat->segments, &nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments, nsegs + 1,
m0->m_pkthdr.len, error);
}
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags)
{
int error;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
*nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len, NULL, flags,
&lastaddr, segs, nsegs, first);
first = 0;
}
}
++*nsegs;
} else {
error = EINVAL;
}
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
bus_addr_t lastaddr;
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
flags |= BUS_DMA_NOWAIT;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = _bus_dmamap_load_buffer(dmat, map, addr,
minlen, td, flags, &lastaddr, dmat->segments,
&nsegs, first);
first = 0;
resid -= minlen;
}
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments, nsegs + 1,
uio->uio_resid, error);
}
return (error);
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
@ -810,8 +777,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr, bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
total_bounced++;
@ -819,8 +792,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr, bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
else
physcopyin((void *)bpage->vaddr,
bpage->dataaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
total_bounced++;
@ -893,7 +872,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_page *bpage;
@ -924,6 +903,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@ -975,8 +955,8 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
map->callback, map->callback_arg, /*flags*/0);
bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
map->callback_arg, BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}

460
sys/kern/subr_bus_dma.c Normal file
View File

@ -0,0 +1,460 @@
/*-
* Copyright (c) 2012 EMC Corp.
* All rights reserved.
*
* Copyright (c) 1997, 1998 Justin T. Gibbs.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_bus.h"
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/bus.h>
#include <sys/callout.h>
#include <sys/mbuf.h>
#include <sys/memdesc.h>
#include <sys/proc.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <vm/pmap.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <machine/bus.h>
/*
* Load a list of virtual addresses.
*/
static int
_bus_dmamap_load_vlist(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *list, int sglist_cnt, struct pmap *pmap, int *nsegs,
int flags)
{
int error;
error = 0;
for (; sglist_cnt > 0; sglist_cnt--, list++) {
error = _bus_dmamap_load_buffer(dmat, map,
(void *)list->ds_addr, list->ds_len, pmap, flags, NULL,
nsegs);
if (error)
break;
}
return (error);
}
/*
* Load a list of physical addresses.
*/
static int
_bus_dmamap_load_plist(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *list, int sglist_cnt, int *nsegs, int flags)
{
int error;
error = 0;
for (; sglist_cnt > 0; sglist_cnt--, list++) {
error = _bus_dmamap_load_phys(dmat, map,
(vm_paddr_t)list->ds_addr, list->ds_len, flags, NULL,
nsegs);
if (error)
break;
}
return (error);
}
/*
* Load an mbuf chain.
*/
static int
_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs, int flags)
{
struct mbuf *m;
int error;
M_ASSERTPKTHDR(m0);
error = 0;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map, m->m_data,
m->m_len, kernel_pmap, flags | BUS_DMA_LOAD_MBUF,
segs, nsegs);
}
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, flags, error, *nsegs);
return (error);
}
/*
* Load from block io.
*/
static int
_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
int *nsegs, int flags)
{
int error;
error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
return (error);
}
/*
* Load a cam control block.
*/
static int
_bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
int *nsegs, int flags)
{
struct ccb_ataio *ataio;
struct ccb_scsiio *csio;
struct ccb_hdr *ccb_h;
void *data_ptr;
int error;
uint32_t dxfer_len;
uint16_t sglist_cnt;
error = 0;
ccb_h = &ccb->ccb_h;
switch (ccb_h->func_code) {
case XPT_SCSI_IO:
csio = &ccb->csio;
data_ptr = csio->data_ptr;
dxfer_len = csio->dxfer_len;
sglist_cnt = csio->sglist_cnt;
break;
case XPT_ATA_IO:
ataio = &ccb->ataio;
data_ptr = ataio->data_ptr;
dxfer_len = ataio->dxfer_len;
sglist_cnt = 0;
break;
default:
panic("_bus_dmamap_load_ccb: Unsupported func code %d",
ccb_h->func_code);
}
switch ((ccb_h->flags & CAM_DATA_MASK)) {
case CAM_DATA_VADDR:
error = _bus_dmamap_load_buffer(dmat, map, data_ptr, dxfer_len,
kernel_pmap, flags, NULL, nsegs);
break;
case CAM_DATA_PADDR:
error = _bus_dmamap_load_phys(dmat, map,
(vm_paddr_t)(uintptr_t)data_ptr, dxfer_len, flags, NULL,
nsegs);
break;
case CAM_DATA_SG:
error = _bus_dmamap_load_vlist(dmat, map,
(bus_dma_segment_t *)data_ptr, sglist_cnt, kernel_pmap,
nsegs, flags);
break;
case CAM_DATA_SG_PADDR:
error = _bus_dmamap_load_plist(dmat, map,
(bus_dma_segment_t *)data_ptr, sglist_cnt, nsegs, flags);
break;
case CAM_DATA_BIO:
error = _bus_dmamap_load_bio(dmat, map, (struct bio *)data_ptr,
nsegs, flags);
break;
default:
panic("_bus_dmamap_load_ccb: flags 0x%X unimplemented",
ccb_h->flags);
}
return (error);
}
/*
* Load a uio.
*/
static int
_bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
int *nsegs, int flags)
{
bus_size_t resid;
bus_size_t minlen;
struct iovec *iov;
pmap_t pmap;
caddr_t addr;
int error, i;
if (uio->uio_segflg == UIO_USERSPACE) {
KASSERT(uio->uio_td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
} else
pmap = kernel_pmap;
resid = uio->uio_resid;
iov = uio->uio_iov;
error = 0;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
addr = (caddr_t) iov[i].iov_base;
minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
if (minlen > 0) {
error = _bus_dmamap_load_buffer(dmat, map, addr,
minlen, pmap, flags, NULL, nsegs);
resid -= minlen;
}
}
return (error);
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
bus_dma_segment_t *segs;
struct memdesc mem;
int error;
int nsegs;
if ((flags & BUS_DMA_NOWAIT) == 0) {
mem = memdesc_vaddr(buf, buflen);
_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
}
nsegs = -1;
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, kernel_pmap,
flags, NULL, &nsegs);
nsegs++;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, flags, error, nsegs + 1);
if (error == EINPROGRESS)
return (error);
segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
if (error)
(*callback)(callback_arg, segs, 0, error);
else
(*callback)(callback_arg, segs, nsegs, 0);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
* This error only happens when NOWAIT is set, so deferal is disabled.
*/
if (error == ENOMEM)
return (error);
return (0);
}
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
bus_dma_segment_t *segs;
int nsegs, error;
flags |= BUS_DMA_NOWAIT;
nsegs = -1;
error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
++nsegs;
segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
if (error)
(*callback)(callback_arg, segs, 0, 0, error);
else
(*callback)(callback_arg, segs, nsegs, m0->m_pkthdr.len, error);
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, flags, error, nsegs);
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags)
{
int error;
flags |= BUS_DMA_NOWAIT;
*nsegs = -1;
error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
++*nsegs;
_bus_dmamap_complete(dmat, map, segs, *nsegs, error);
return (error);
}
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
bus_dma_segment_t *segs;
int nsegs, error;
flags |= BUS_DMA_NOWAIT;
nsegs = -1;
error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
nsegs++;
segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
if (error)
(*callback)(callback_arg, segs, 0, 0, error);
else
(*callback)(callback_arg, segs, nsegs, uio->uio_resid, error);
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat, error, nsegs + 1);
return (error);
}
int
bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
{
bus_dma_segment_t *segs;
struct ccb_hdr *ccb_h;
struct memdesc mem;
int error;
int nsegs;
ccb_h = &ccb->ccb_h;
if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
callback(callback_arg, NULL, 0, 0);
return (0);
}
if ((flags & BUS_DMA_NOWAIT) == 0) {
mem = memdesc_ccb(ccb);
_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
}
nsegs = -1;
error = _bus_dmamap_load_ccb(dmat, map, ccb, &nsegs, flags);
nsegs++;
if (error == EINPROGRESS)
return (error);
segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
if (error)
(*callback)(callback_arg, segs, 0, error);
else
(*callback)(callback_arg, segs, nsegs, error);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
* This error only happens when NOWAIT is set, so deferal is disabled.
*/
if (error == ENOMEM)
return (error);
return (0);
}
int
bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
bus_dma_segment_t *segs;
int error;
int nsegs;
if ((flags & BUS_DMA_NOWAIT) == 0)
_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);
nsegs = -1;
error = 0;
switch (mem->md_type) {
case MEMDESC_VADDR:
error = _bus_dmamap_load_buffer(dmat, map, mem->u.md_vaddr,
mem->md_opaque, kernel_pmap, flags, NULL, &nsegs);
break;
case MEMDESC_PADDR:
error = _bus_dmamap_load_phys(dmat, map, mem->u.md_paddr,
mem->md_opaque, flags, NULL, &nsegs);
break;
case MEMDESC_VLIST:
error = _bus_dmamap_load_vlist(dmat, map, mem->u.md_list,
mem->md_opaque, kernel_pmap, &nsegs, flags);
break;
case MEMDESC_PLIST:
error = _bus_dmamap_load_plist(dmat, map, mem->u.md_list,
mem->md_opaque, &nsegs, flags);
break;
case MEMDESC_BIO:
error = _bus_dmamap_load_bio(dmat, map, mem->u.md_bio,
&nsegs, flags);
break;
case MEMDESC_UIO:
error = _bus_dmamap_load_uio(dmat, map, mem->u.md_uio,
&nsegs, flags);
break;
case MEMDESC_MBUF:
error = _bus_dmamap_load_mbuf_sg(dmat, map, mem->u.md_mbuf,
NULL, &nsegs, flags);
break;
case MEMDESC_CCB:
error = _bus_dmamap_load_ccb(dmat, map, mem->u.md_ccb, &nsegs,
flags);
break;
}
nsegs++;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, flags, error, nsegs + 1);
if (error == EINPROGRESS)
return (error);
segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
if (error)
(*callback)(callback_arg, segs, 0, error);
else
(*callback)(callback_arg, segs, nsegs, 0);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
* This error only happens when NOWAIT is set, so deferal is disabled.
*/
if (error == ENOMEM)
return (error);
return (0);
}

View File

@ -152,6 +152,52 @@ copyout_nofault(const void *kaddr, void *udaddr, size_t len)
return (error);
}
#define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1)
int
physcopyin(void *src, vm_paddr_t dst, size_t len)
{
vm_page_t m[PHYS_PAGE_COUNT(len)];
struct iovec iov[1];
struct uio uio;
int i;
iov[0].iov_base = src;
iov[0].iov_len = len;
uio.uio_iov = iov;
uio.uio_iovcnt = 1;
uio.uio_offset = 0;
uio.uio_resid = len;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_WRITE;
for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
m[i] = PHYS_TO_VM_PAGE(dst);
return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
}
int
physcopyout(vm_paddr_t src, void *dst, size_t len)
{
vm_page_t m[PHYS_PAGE_COUNT(len)];
struct iovec iov[1];
struct uio uio;
int i;
iov[0].iov_base = dst;
iov[0].iov_len = len;
uio.uio_iov = iov;
uio.uio_iovcnt = 1;
uio.uio_offset = 0;
uio.uio_resid = len;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
m[i] = PHYS_TO_VM_PAGE(src);
return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
}
#undef PHYS_PAGE_COUNT
int
uiomove(void *cp, int n, struct uio *uio)
{

View File

@ -40,12 +40,12 @@ __FBSDID("$FreeBSD$");
#include <sys/interrupt.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <sys/ktr.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@ -89,10 +89,17 @@ struct bounce_page {
vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
struct sync_list {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
bus_size_t datacount; /* client data count */
};
int busdma_swi_pending;
struct bounce_zone {
@ -122,10 +129,6 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
"Total bounce pages");
#define DMAMAP_LINEAR 0x1
#define DMAMAP_MBUF 0x2
#define DMAMAP_UIO 0x4
#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
#define DMAMAP_UNCACHEABLE 0x8
#define DMAMAP_ALLOCATED 0x10
#define DMAMAP_MALLOCUSED 0x20
@ -135,16 +138,16 @@ struct bus_dmamap {
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
struct memdesc mem;
int flags;
void *buffer;
void *origbuffer;
void *allocbuffer;
TAILQ_ENTRY(bus_dmamap) freelist;
int len;
STAILQ_ENTRY(bus_dmamap) links;
bus_dmamap_callback_t *callback;
void *callback_arg;
int sync_count;
struct sync_list *slist;
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@ -166,7 +169,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
/* Default tag, as most drivers provide no parent tag. */
@ -214,11 +218,6 @@ SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
* Check to see if the specified page is in an allowed DMA range.
*/
static __inline int
bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
int flags, vm_offset_t *lastaddrp, int *segp);
static __inline int
_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
{
@ -273,10 +272,14 @@ dflt_lock(void *arg, bus_dma_lock_op_t op)
}
static __inline bus_dmamap_t
_busdma_alloc_dmamap(void)
_busdma_alloc_dmamap(bus_dma_tag_t dmat)
{
struct sync_list *slist;
bus_dmamap_t map;
slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
if (slist == NULL)
return (NULL);
mtx_lock(&busdma_mtx);
map = TAILQ_FIRST(&dmamap_freelist);
if (map)
@ -288,13 +291,18 @@ _busdma_alloc_dmamap(void)
map->flags = DMAMAP_ALLOCATED;
} else
map->flags = 0;
STAILQ_INIT(&map->bpages);
if (map != NULL) {
STAILQ_INIT(&map->bpages);
map->slist = slist;
} else
free(slist, M_DEVBUF);
return (map);
}
static __inline void
_busdma_free_dmamap(bus_dmamap_t map)
{
free(map->slist, M_DEVBUF);
if (map->flags & DMAMAP_ALLOCATED)
free(map, M_DEVBUF);
else {
@ -477,7 +485,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
}
newmap = _busdma_alloc_dmamap();
newmap = _busdma_alloc_dmamap(dmat);
if (newmap == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
return (ENOMEM);
@ -485,6 +493,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
*mapp = newmap;
newmap->dmat = dmat;
newmap->allocbuffer = NULL;
newmap->sync_count = 0;
dmat->map_count++;
/*
@ -549,7 +558,7 @@ int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
if (STAILQ_FIRST(&map->bpages) != NULL) {
if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
return (EBUSY);
@ -592,7 +601,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
newmap = _busdma_alloc_dmamap();
newmap = _busdma_alloc_dmamap(dmat);
if (newmap == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
@ -601,6 +610,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
dmat->map_count++;
*mapp = newmap;
newmap->dmat = dmat;
newmap->sync_count = 0;
/*
* If all the memory is coherent with DMA then we don't need to
@ -684,7 +694,37 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
static int
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if ((map->pagesneeded == 0)) {
CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
dmat->lowaddr, dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr) != 0) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
@ -719,60 +759,157 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
{
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_unlock(&bounce_lock);
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
*/
seg = *segp;
if (seg >= 0 &&
curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
segs[seg].ds_len += sgsize;
} else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int error;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
if (buflen != 0) {
_bus_dmamap_unload(dmat, map);
return (EFBIG); /* XXX better return value here? */
}
return (0);
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrance, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static __inline int
bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
int flags, vm_offset_t *lastaddrp, int *segp)
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
bus_addr_t curaddr;
struct sync_list *sl;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
int error = 0;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen,
flags);
if (error)
return (error);
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
"alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
/*
* Get the physical address for this segment.
*
@ -791,237 +928,62 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
}
/*
* Insert chunk into a segment, coalescing with
* the previous segment if possible.
*/
if (seg >= 0 && curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) ==
(curaddr & bmask))) {
segs[seg].ds_len += sgsize;
goto segdone;
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
} else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
sl = &map->slist[map->sync_count - 1];
if (map->sync_count == 0 ||
vaddr != sl->vaddr + sl->datacount) {
if (++map->sync_count > dmat->nsegments)
goto cleanup;
sl++;
sl->vaddr = vaddr;
sl->datacount = sgsize;
sl->busaddr = curaddr;
} else
sl->datacount += sgsize;
}
if (error)
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
segdone:
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
cleanup:
/*
* Did we fit?
*/
if (buflen != 0)
if (buflen != 0) {
_bus_dmamap_unload(dmat, map);
error = EFBIG; /* XXX better return value here? */
}
return (error);
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
void
__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
vm_offset_t lastaddr = 0;
int error, nsegs = -1;
KASSERT(dmat != NULL, ("dmatag is NULL"));
KASSERT(map != NULL, ("dmamap is NULL"));
map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_LINEAR;
map->buffer = buf;
map->len = buflen;
error = bus_dmamap_load_buffer(dmat,
dmat->segments, map, buf, buflen, kernel_pmap,
flags, &lastaddr, &nsegs);
if (error == EINPROGRESS)
return (error);
if (error)
(*callback)(callback_arg, NULL, 0, error);
else
(*callback)(callback_arg, dmat->segments, nsegs + 1, error);
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, nsegs + 1, error);
return (error);
}
/*
* Like bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
int nsegs = -1, error = 0;
M_ASSERTPKTHDR(m0);
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_MBUF;
map->buffer = m0;
map->len = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = bus_dmamap_load_buffer(dmat,
dmat->segments, map, m->m_data, m->m_len,
kernel_pmap, flags, &lastaddr, &nsegs);
map->len += m->m_len;
}
}
} else {
error = EINVAL;
}
if (error) {
/*
* force "no valid mappings" on error in callback.
*/
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments, nsegs + 1,
m0->m_pkthdr.len, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
{
int error = 0;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
*nsegs = -1;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_MBUF;
map->buffer = m0;
map->len = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
vm_offset_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = bus_dmamap_load_buffer(dmat, segs, map,
m->m_data, m->m_len,
kernel_pmap, flags, &lastaddr,
nsegs);
map->len += m->m_len;
}
}
} else {
error = EINVAL;
}
/* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
return (error);
}
/*
* Like bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
vm_offset_t lastaddr = 0;
int nsegs, i, error;
bus_size_t resid;
struct iovec *iov;
struct pmap *pmap;
resid = uio->uio_resid;
iov = uio->uio_iov;
map->flags &= ~DMAMAP_TYPE_MASK;
map->flags |= DMAMAP_UIO;
map->buffer = uio;
map->len = 0;
if (uio->uio_segflg == UIO_USERSPACE) {
KASSERT(uio->uio_td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
/* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */
panic("can't do it yet");
} else
pmap = kernel_pmap;
error = 0;
nsegs = -1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = bus_dmamap_load_buffer(dmat, dmat->segments,
map, addr, minlen, pmap, flags, &lastaddr, &nsegs);
map->len += minlen;
resid -= minlen;
}
}
if (error) {
/*
* force "no valid mappings" on error in callback.
*/
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments, nsegs+1,
uio->uio_resid, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
@ -1032,16 +994,16 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
map->flags &= ~DMAMAP_TYPE_MASK;
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
}
map->sync_count = 0;
return;
}
static void
bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op)
{
char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
vm_offset_t buf_cl, buf_clend;
@ -1055,9 +1017,9 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
* prevent a data loss we save these chunks in temporary buffer
* before invalidation and restore them afer it
*/
buf_cl = (vm_offset_t)buf & ~cache_linesize_mask;
size_cl = (vm_offset_t)buf & cache_linesize_mask;
buf_clend = (vm_offset_t)buf + len;
buf_cl = buf & ~cache_linesize_mask;
size_cl = buf & cache_linesize_mask;
buf_clend = buf + len;
size_clend = (mips_pdcache_linesize -
(buf_clend & cache_linesize_mask)) & cache_linesize_mask;
@ -1072,7 +1034,7 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
memcpy (tmp_cl, (void*)buf_cl, size_cl);
if (size_clend)
memcpy (tmp_clend, (void*)buf_clend, size_clend);
mips_dcache_inv_range((vm_offset_t)buf, len);
mips_dcache_inv_range(buf, len);
/*
* Restore them
*/
@ -1087,15 +1049,14 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
* necessary.
*/
if (size_cl)
mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl);
mips_dcache_wbinv_range(buf_cl, size_cl);
if (size_clend && (size_cl == 0 ||
buf_clend - buf_cl > mips_pdcache_linesize))
mips_dcache_wbinv_range((vm_offset_t)buf_clend,
size_clend);
mips_dcache_wbinv_range(buf_clend, size_clend);
break;
case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
mips_dcache_wbinv_range((vm_offset_t)buf_cl, len);
mips_dcache_wbinv_range(buf_cl, len);
break;
case BUS_DMASYNC_PREREAD:
@ -1106,7 +1067,7 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
memcpy (tmp_cl, (void *)buf_cl, size_cl);
if (size_clend)
memcpy (tmp_clend, (void *)buf_clend, size_clend);
mips_dcache_inv_range((vm_offset_t)buf, len);
mips_dcache_inv_range(buf, len);
/*
* Restore them
*/
@ -1121,15 +1082,14 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
* necessary.
*/
if (size_cl)
mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl);
mips_dcache_wbinv_range(buf_cl, size_cl);
if (size_clend && (size_cl == 0 ||
buf_clend - buf_cl > mips_pdcache_linesize))
mips_dcache_wbinv_range((vm_offset_t)buf_clend,
size_clend);
mips_dcache_wbinv_range(buf_clend, size_clend);
break;
case BUS_DMASYNC_PREWRITE:
mips_dcache_wb_range((vm_offset_t)buf, len);
mips_dcache_wb_range(buf, len);
break;
}
}
@ -1141,10 +1101,18 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
STAILQ_FOREACH(bpage, &map->bpages, links) {
if (op & BUS_DMASYNC_PREWRITE) {
bcopy((void *)bpage->datavaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache :
bpage->vaddr),
bpage->datacount);
if (bpage->vaddr_nocache == 0) {
mips_dcache_wb_range(bpage->vaddr,
bpage->datacount);
@ -1156,36 +1124,23 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
mips_dcache_inv_range(bpage->vaddr,
bpage->datacount);
}
bcopy((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
(void *)bpage->datavaddr, bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
(void *)bpage->datavaddr, bpage->datacount);
else
physcopyin((void *)(bpage->vaddr_nocache != 0 ?
bpage->vaddr_nocache : bpage->vaddr),
bpage->dataaddr, bpage->datacount);
dmat->bounce_zone->total_bounced++;
}
}
}
static __inline int
_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
{
struct bounce_page *bpage;
STAILQ_FOREACH(bpage, &map->bpages, links) {
if ((vm_offset_t)buf >= bpage->datavaddr &&
(vm_offset_t)buf + len <= bpage->datavaddr +
bpage->datacount)
return (1);
}
return (0);
}
void
_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct mbuf *m;
struct uio *uio;
int resid;
struct iovec *iov;
struct sync_list *sl, *end;
if (op == BUS_DMASYNC_POSTWRITE)
return;
@ -1199,38 +1154,10 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
return;
CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
switch(map->flags & DMAMAP_TYPE_MASK) {
case DMAMAP_LINEAR:
if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
bus_dmamap_sync_buf(map->buffer, map->len, op);
break;
case DMAMAP_MBUF:
m = map->buffer;
while (m) {
if (m->m_len > 0 &&
!(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
bus_dmamap_sync_buf(m->m_data, m->m_len, op);
m = m->m_next;
}
break;
case DMAMAP_UIO:
uio = map->buffer;
iov = uio->uio_iov;
resid = uio->uio_resid;
for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
bus_size_t minlen = resid < iov[i].iov_len ? resid :
iov[i].iov_len;
if (minlen > 0) {
if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
minlen))
bus_dmamap_sync_buf(iov[i].iov_base,
minlen, op);
resid -= minlen;
}
}
break;
default:
break;
if (map->sync_count) {
end = &map->slist[map->sync_count];
for (sl = &map->slist[0]; sl != end; sl++)
bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op);
}
}
@ -1393,7 +1320,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
@ -1426,6 +1353,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@ -1479,8 +1407,8 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buffer, map->len,
map->callback, map->callback_arg, /*flags*/0);
bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
map->callback_arg, BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}

View File

@ -40,10 +40,10 @@ __FBSDID("$FreeBSD$");
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@ -87,6 +87,7 @@ struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
@ -125,8 +126,7 @@ struct bus_dmamap {
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
struct memdesc mem;
bus_dma_segment_t *segments;
int nsegs;
bus_dmamap_callback_t *callback;
@ -144,7 +144,8 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
@ -564,32 +565,45 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrance, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
*/
static __inline int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dmamap_t map,
void *buf, bus_size_t buflen,
pmap_t pmap,
int flags,
bus_addr_t *lastaddrp,
bus_dma_segment_t *segs,
int *segp,
int first)
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr;
bus_addr_t paddr;
int seg;
if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
vm_offset_t vendaddr;
if (map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr) != 0) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
vm_offset_t vaddr;
vm_offset_t vendaddr;
bus_addr_t paddr;
if (map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
dmat->boundary, dmat->alignment);
@ -605,10 +619,10 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_size_t sg_len;
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
if (pmap)
paddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (run_filter(dmat, paddr) != 0) {
sg_len = roundup2(sg_len, dmat->alignment);
map->pagesneeded++;
@ -617,44 +631,171 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
map->dmat = dmat;
map->buf = buf;
map->buflen = buflen;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Add a single contiguous physical range to the segment list.
*/
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat,
bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int error;
if (segs == NULL)
segs = map->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrance, and the ending segment on exit.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dmamap_t map,
void *buf, bus_size_t buflen,
pmap_t pmap,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
vm_offset_t vaddr;
int error;
if (segs == NULL)
segs = map->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
mtx_unlock(&bounce_lock);
}
vaddr = (vm_offset_t)buf;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
bus_size_t max_sgsize;
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
curaddr = pmap_kextract(vaddr);
else
curaddr = pmap_extract(pmap, vaddr);
/*
* Compute the segment size, and adjust counts.
@ -664,268 +805,56 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->alignment);
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
} else {
sgsize = MIN(sgsize, max_sgsize);
}
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
void
__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg)
{
bus_addr_t lastaddr = 0;
int error;
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
flags |= BUS_DMA_WAITOK;
map->dmat = dmat;
map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
}
}
map->nsegs = 0;
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
&lastaddr, map->segments, &map->nsegs, 1);
map->nsegs++;
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, map->nsegs);
if (error == EINPROGRESS) {
return (error);
}
if (dmat->iommu != NULL)
IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr,
dmat->highaddr, dmat->alignment, dmat->boundary,
dmat->iommu_cookie);
if (error)
(*callback)(callback_arg, map->segments, 0, error);
if (segs != NULL)
memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
else
(*callback)(callback_arg, map->segments, map->nsegs, 0);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
* This error only happens when NOWAIT is set, so deferal is disabled.
*/
if (error == ENOMEM)
return (error);
return (0);
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
int error;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
map->nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len,
NULL, flags, &lastaddr,
map->segments, &map->nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
map->nsegs++;
segs = map->segments;
map->nsegs = nsegs;
if (dmat->iommu != NULL)
IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr,
dmat->highaddr, dmat->alignment, dmat->boundary,
dmat->iommu_cookie);
IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
dmat->lowaddr, dmat->highaddr, dmat->alignment,
dmat->boundary, dmat->iommu_cookie);
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, map->segments, 0, 0, error);
} else {
(*callback)(callback_arg, map->segments,
map->nsegs, m0->m_pkthdr.len, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, map->nsegs);
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
{
int error;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
*nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len,
NULL, flags, &lastaddr,
segs, nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
/* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
if (dmat->iommu != NULL)
IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr,
dmat->highaddr, dmat->alignment, dmat->boundary,
dmat->iommu_cookie);
map->nsegs = *nsegs;
memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
return (error);
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
bus_addr_t lastaddr = 0;
int error, first, i;
bus_size_t resid;
struct iovec *iov;
pmap_t pmap;
flags |= BUS_DMA_NOWAIT;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
KASSERT(uio->uio_td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
} else
pmap = NULL;
map->nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
addr, minlen, pmap, flags, &lastaddr,
map->segments, &map->nsegs, first);
first = 0;
resid -= minlen;
}
}
map->nsegs++;
if (dmat->iommu != NULL)
IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr,
dmat->highaddr, dmat->alignment, dmat->boundary,
dmat->iommu_cookie);
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, map->segments, 0, 0, error);
} else {
(*callback)(callback_arg, map->segments,
map->nsegs, uio->uio_resid, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, map->nsegs);
return (error);
return (segs);
}
/*
@ -963,9 +892,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
@ -973,9 +907,13 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
else
physcopyin((void *)bpage->vaddr,
bpage->dataaddr, bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
@ -1142,7 +1080,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
@ -1174,6 +1112,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@ -1227,8 +1166,9 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
map->callback, map->callback_arg, /*flags*/0);
bus_dmamap_load_mem(map->dmat, map, &map->mem,
map->callback, map->callback_arg,
BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}

View File

@ -369,9 +369,8 @@ ps3cdrom_action(struct cam_sim *sim, union ccb *ccb)
TAILQ_REMOVE(&sc->sc_free_xferq, xp, x_queue);
err = bus_dmamap_load(sc->sc_dmatag, xp->x_dmamap,
ccb->csio.data_ptr, ccb->csio.dxfer_len, ps3cdrom_transfer,
xp, 0);
err = bus_dmamap_load_ccb(sc->sc_dmatag, xp->x_dmamap,
ccb, ps3cdrom_transfer, xp, 0);
if (err && err != EINPROGRESS) {
device_printf(dev, "Could not load DMA map (%d)\n",
err);

View File

@ -78,14 +78,17 @@
struct bus_dma_methods {
int (*dm_dmamap_create)(bus_dma_tag_t, int, bus_dmamap_t *);
int (*dm_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
int (*dm_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
bus_size_t, bus_dmamap_callback_t *, void *, int);
int (*dm_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
struct mbuf *, bus_dmamap_callback2_t *, void *, int);
int (*dm_dmamap_load_mbuf_sg)(bus_dma_tag_t, bus_dmamap_t,
struct mbuf *, bus_dma_segment_t *segs, int *nsegs, int);
int (*dm_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t, struct uio *,
bus_dmamap_callback2_t *, void *, int);
int (*dm_dmamap_load_phys)(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags,
bus_dma_segment_t *segs, int *segp);
int (*dm_dmamap_load_buffer)(bus_dma_tag_t dmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, struct pmap *pmap, int flags,
bus_dma_segment_t *segs, int *segp);
void (*dm_dmamap_waitok)(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg);
bus_dma_segment_t *(*dm_dmamap_complete)(bus_dma_tag_t dmat,
bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error);
void (*dm_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
void (*dm_dmamap_sync)(bus_dma_tag_t, bus_dmamap_t,
bus_dmasync_op_t);
@ -125,14 +128,16 @@ struct bus_dma_tag {
((t)->dt_mt->dm_dmamap_create((t), (f), (p)))
#define bus_dmamap_destroy(t, p) \
((t)->dt_mt->dm_dmamap_destroy((t), (p)))
#define bus_dmamap_load(t, m, p, s, cb, cba, f) \
((t)->dt_mt->dm_dmamap_load((t), (m), (p), (s), (cb), (cba), (f)))
#define bus_dmamap_load_mbuf(t, m, mb, cb, cba, f) \
((t)->dt_mt->dm_dmamap_load_mbuf((t), (m), (mb), (cb), (cba), (f)))
#define bus_dmamap_load_mbuf_sg(t, m, mb, segs, nsegs, f) \
((t)->dt_mt->dm_dmamap_load_mbuf_sg((t), (m), (mb), (segs), (nsegs), (f)))
#define bus_dmamap_load_uio(t, m, ui, cb, cba, f) \
((t)->dt_mt->dm_dmamap_load_uio((t), (m), (ui), (cb), (cba), (f)))
#define _bus_dmamap_load_phys(t, m, b, l, f, s, sp) \
((t)->dt_mt->dm_dmamap_load_phys((t), (m), (b), (l), \
(f), (s), (sp)))
#define _bus_dmamap_load_buffer(t, m, b, l, p, f, s, sp) \
((t)->dt_mt->dm_dmamap_load_buffer((t), (m), (b), (l), (p), \
(f), (s), (sp)))
#define _bus_dmamap_waitok(t, m, mem, c, ca) \
((t)->dt_mt->dm_dmamap_waitok((t), (m), (mem), (c), (ca)))
#define _bus_dmamap_complete(t, m, s, n, e) \
((t)->dt_mt->dm_dmamap_complete((t), (m), (s), (n), (e)))
#define bus_dmamap_unload(t, p) \
((t)->dt_mt->dm_dmamap_unload((t), (p)))
#define bus_dmamap_sync(t, m, op) \

View File

@ -98,13 +98,11 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/smp.h>
#include <sys/systm.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@ -326,38 +324,106 @@ nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
* Add a single contiguous physical range to the segment list.
*/
static int
_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
struct thread *td, int flags, bus_addr_t *lastaddrp,
bus_dma_segment_t *segs, int *segp, int first)
nexus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->dt_boundary - 1);
if (dmat->dt_boundary > 0) {
baddr = (curaddr + dmat->dt_boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
(dmat->dt_boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->dt_nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
static int
nexus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if (segs == NULL)
segs = dmat->dt_segments;
curaddr = buf;
while (buflen > 0) {
sgsize = MIN(buflen, dmat->dt_maxsegsz);
sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
curaddr += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
static int
nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
bus_addr_t curaddr;
vm_offset_t vaddr = (vm_offset_t)buf;
int seg;
pmap_t pmap;
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
else
pmap = NULL;
if (segs == NULL)
segs = dmat->dt_segments;
lastaddr = *lastaddrp;
bmask = ~(dmat->dt_boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
curaddr = pmap_kextract(vaddr);
else
curaddr = pmap_extract(pmap, vaddr);
/*
* Compute the segment size, and adjust counts.
@ -368,205 +434,36 @@ _nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
if (buflen < sgsize)
sgsize = buflen;
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->dt_boundary > 0) {
baddr = (curaddr + dmat->dt_boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
(dmat->dt_boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->dt_nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Common function for loading a DMA map with a linear buffer. May
* be called by bus-specific DMA map load functions.
*
* Most SPARCs have IOMMUs in the bus controllers. In those cases
* they only need one segment and will use virtual addresses for DVMA.
* Those bus controllers should intercept these vectors and should
* *NEVER* call nexus_dmamap_load() which is used only by devices that
* bypass DVMA.
*/
static int
nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
int flags)
static void
nexus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
bus_addr_t lastaddr;
int error, nsegs;
error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
&lastaddr, dmat->dt_segments, &nsegs, 1);
if (error == 0) {
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
map->dm_flags |= DMF_LOADED;
} else
(*callback)(callback_arg, NULL, 0, error);
return (0);
}
/*
* Like nexus_dmamap_load(), but for mbufs.
*/
static int
nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
static bus_dma_segment_t *
nexus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
int nsegs, error;
M_ASSERTPKTHDR(m0);
nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _nexus_dmamap_load_buffer(dmat,
m->m_data, m->m_len,NULL, flags, &lastaddr,
dmat->dt_segments, &nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
} else {
map->dm_flags |= DMF_LOADED;
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
m0->m_pkthdr.len, error);
}
return (error);
}
static int
nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags)
{
int error;
M_ASSERTPKTHDR(m0);
*nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _nexus_dmamap_load_buffer(dmat,
m->m_data, m->m_len,NULL, flags, &lastaddr,
segs, nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
++*nsegs;
return (error);
}
/*
* Like nexus_dmamap_load(), but for uios.
*/
static int
nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
{
bus_addr_t lastaddr;
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
struct thread *td = NULL;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL, ("%s: USERSPACE but no proc", __func__));
}
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
td, flags, &lastaddr, dmat->dt_segments, &nsegs,
first);
first = 0;
resid -= minlen;
}
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
} else {
map->dm_flags |= DMF_LOADED;
(*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
uio->uio_resid, error);
}
return (error);
if (segs == NULL)
segs = dmat->dt_segments;
return (segs);
}
/*
@ -669,10 +566,10 @@ nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
static struct bus_dma_methods nexus_dma_methods = {
nexus_dmamap_create,
nexus_dmamap_destroy,
nexus_dmamap_load,
nexus_dmamap_load_mbuf,
nexus_dmamap_load_mbuf_sg,
nexus_dmamap_load_uio,
nexus_dmamap_load_phys,
nexus_dmamap_load_buffer,
nexus_dmamap_waitok,
nexus_dmamap_complete,
nexus_dmamap_unload,
nexus_dmamap_sync,
nexus_dmamem_alloc,

View File

@ -847,31 +847,167 @@ iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
}
/*
* IOMMU DVMA operations, common to PCI and SBus
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
static int
iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
int flags, bus_dma_segment_t *segs, int *segp, int align)
iommu_dvmamap_load_phys(bus_dma_tag_t dt, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t amask, dvmaddr, dvmoffs;
bus_size_t sgsize, esize;
vm_offset_t vaddr, voffs;
struct iommu_state *is;
vm_offset_t voffs;
vm_paddr_t curaddr;
pmap_t pmap = NULL;
int error, firstpg, sgcnt;
u_int slot;
is = dt->dt_cookie;
if (*segp == -1) {
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("%s: map still in use\n", __func__);
#endif
bus_dmamap_unload(dt, map);
}
/*
* Make sure that the map is not on a queue so that the
* resource list may be safely accessed and modified without
* needing the lock to cover the whole operation.
*/
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
amask = dt->dt_alignment - 1;
} else
amask = 0;
KASSERT(buflen != 0, ("%s: buflen == 0!", __func__));
if (buflen > dt->dt_maxsize)
return (EINVAL);
if (td != NULL)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
if (segs == NULL)
segs = dt->dt_segments;
voffs = buf & IO_PAGE_MASK;
/* Try to find a slab that is large enough. */
error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
&dvmaddr);
if (error != 0)
return (error);
sgcnt = *segp;
firstpg = 1;
map->dm_flags &= ~DMF_STREAMED;
map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ?
DMF_STREAMED : 0;
for (; buflen > 0; ) {
curaddr = buf;
/*
* Compute the segment size, and adjust counts.
*/
sgsize = IO_PAGE_SIZE - ((u_long)buf & IO_PAGE_MASK);
if (buflen < sgsize)
sgsize = buflen;
buflen -= sgsize;
buf += sgsize;
dvmoffs = trunc_io_page(dvmaddr);
iommu_enter(is, dvmoffs, trunc_io_page(curaddr),
(map->dm_flags & DMF_STREAMED) != 0, flags);
if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) {
slot = IOTSBSLOT(dvmoffs);
if (buflen <= 0 || slot % 8 == 7)
IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH,
is->is_ptsb + slot * 8);
}
/*
* Chop the chunk up into segments of at most maxsegsz, but try
* to fill each segment as well as possible.
*/
if (!firstpg) {
esize = ulmin(sgsize,
dt->dt_maxsegsz - segs[sgcnt].ds_len);
segs[sgcnt].ds_len += esize;
sgsize -= esize;
dvmaddr += esize;
}
while (sgsize > 0) {
sgcnt++;
if (sgcnt >= dt->dt_nsegments)
return (EFBIG);
/*
* No extra alignment here - the common practice in
* the busdma code seems to be that only the first
* segment needs to satisfy the alignment constraints
* (and that only for bus_dmamem_alloc()ed maps).
* It is assumed that such tags have maxsegsize >=
* maxsize.
*/
esize = ulmin(sgsize, dt->dt_maxsegsz);
segs[sgcnt].ds_addr = dvmaddr;
segs[sgcnt].ds_len = esize;
sgsize -= esize;
dvmaddr += esize;
}
firstpg = 0;
}
*segp = sgcnt;
return (0);
}
/*
* IOMMU DVMA operations, common to PCI and SBus
*/
static int
iommu_dvmamap_load_buffer(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t amask, dvmaddr, dvmoffs;
bus_size_t sgsize, esize;
struct iommu_state *is;
vm_offset_t vaddr, voffs;
vm_paddr_t curaddr;
int error, firstpg, sgcnt;
u_int slot;
is = dt->dt_cookie;
if (*segp == -1) {
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("%s: map still in use\n", __func__);
#endif
bus_dmamap_unload(dt, map);
}
/*
* Make sure that the map is not on a queue so that the
* resource list may be safely accessed and modified without
* needing the lock to cover the whole operation.
*/
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
amask = dt->dt_alignment - 1;
} else
amask = 0;
KASSERT(buflen != 0, ("%s: buflen == 0!", __func__));
if (buflen > dt->dt_maxsize)
return (EINVAL);
if (segs == NULL)
segs = dt->dt_segments;
vaddr = (vm_offset_t)buf;
voffs = vaddr & IO_PAGE_MASK;
amask = align ? dt->dt_alignment - 1 : 0;
/* Try to find a slab that is large enough. */
error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
@ -888,10 +1024,10 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
/*
* Get the physical address for this page.
*/
if (pmap != NULL)
curaddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
curaddr = pmap_kextract(vaddr);
else
curaddr = pmap_extract(pmap, vaddr);
/*
* Compute the segment size, and adjust counts.
@ -949,129 +1085,17 @@ iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
return (0);
}
static int
iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
int flags)
static void
iommu_dvmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
{
struct iommu_state *is = dt->dt_cookie;
int error, seg = -1;
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("%s: map still in use\n", __func__);
#endif
bus_dmamap_unload(dt, map);
}
/*
* Make sure that the map is not on a queue so that the resource list
* may be safely accessed and modified without needing the lock to
* cover the whole operation.
*/
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
error = iommu_dvmamap_load_buffer(dt, is, map, buf, buflen, NULL,
flags, dt->dt_segments, &seg, 1);
IS_LOCK(is);
iommu_map_insq(is, map);
if (error != 0) {
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
(*cb)(cba, dt->dt_segments, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
(*cb)(cba, dt->dt_segments, seg + 1, 0);
}
return (error);
}
static int
iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
bus_dmamap_callback2_t *cb, void *cba, int flags)
static bus_dma_segment_t *
iommu_dvmamap_complete(bus_dma_tag_t dt, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
struct iommu_state *is = dt->dt_cookie;
struct mbuf *m;
int error = 0, first = 1, nsegs = -1;
M_ASSERTPKTHDR(m0);
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("%s: map still in use\n", __func__);
#endif
bus_dmamap_unload(dt, map);
}
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
if (m0->m_pkthdr.len <= dt->dt_maxsize) {
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map,
m->m_data, m->m_len, NULL, flags, dt->dt_segments,
&nsegs, first);
first = 0;
}
} else
error = EINVAL;
IS_LOCK(is);
iommu_map_insq(is, map);
if (error != 0) {
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
/* force "no valid mappings" in callback */
(*cb)(cba, dt->dt_segments, 0, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
(*cb)(cba, dt->dt_segments, nsegs + 1, m0->m_pkthdr.len, 0);
}
return (error);
}
static int
iommu_dvmamap_load_mbuf_sg(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
bus_dma_segment_t *segs, int *nsegs, int flags)
{
struct iommu_state *is = dt->dt_cookie;
struct mbuf *m;
int error = 0, first = 1;
M_ASSERTPKTHDR(m0);
*nsegs = -1;
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("%s: map still in use\n", __func__);
#endif
bus_dmamap_unload(dt, map);
}
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
if (m0->m_pkthdr.len <= dt->dt_maxsize) {
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map,
m->m_data, m->m_len, NULL, flags, segs,
nsegs, first);
first = 0;
}
} else
error = EINVAL;
IS_LOCK(is);
iommu_map_insq(is, map);
@ -1081,71 +1105,10 @@ iommu_dvmamap_load_mbuf_sg(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
++*nsegs;
}
return (error);
}
static int
iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
bus_dmamap_callback2_t *cb, void *cba, int flags)
{
struct iommu_state *is = dt->dt_cookie;
struct iovec *iov;
struct thread *td = NULL;
bus_size_t minlen, resid;
int nsegs = -1, error = 0, first = 1, i;
if ((map->dm_flags & DMF_LOADED) != 0) {
#ifdef DIAGNOSTIC
printf("%s: map still in use\n", __func__);
#endif
bus_dmamap_unload(dt, map);
}
IS_LOCK(is);
iommu_map_remq(is, map);
IS_UNLOCK(is);
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
td = uio->uio_td;
KASSERT(td != NULL,
("%s: USERSPACE but no proc", __func__));
}
for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
if (minlen == 0)
continue;
error = iommu_dvmamap_load_buffer(dt, is, map,
iov[i].iov_base, minlen, td, flags, dt->dt_segments,
&nsegs, first);
first = 0;
resid -= minlen;
}
IS_LOCK(is);
iommu_map_insq(is, map);
if (error) {
iommu_dvmamap_vunload(is, map);
IS_UNLOCK(is);
/* force "no valid mappings" in callback */
(*cb)(cba, dt->dt_segments, 0, 0, error);
} else {
IS_UNLOCK(is);
map->dm_flags |= DMF_LOADED;
(*cb)(cba, dt->dt_segments, nsegs + 1, uio->uio_resid, 0);
}
return (error);
if (segs == NULL)
segs = dt->dt_segments;
return (segs);
}
static void
@ -1241,10 +1204,10 @@ iommu_diag(struct iommu_state *is, vm_offset_t va)
struct bus_dma_methods iommu_dma_methods = {
iommu_dvmamap_create,
iommu_dvmamap_destroy,
iommu_dvmamap_load,
iommu_dvmamap_load_mbuf,
iommu_dvmamap_load_mbuf_sg,
iommu_dvmamap_load_uio,
iommu_dvmamap_load_phys,
iommu_dvmamap_load_buffer,
iommu_dvmamap_waitok,
iommu_dvmamap_complete,
iommu_dvmamap_unload,
iommu_dvmamap_sync,
iommu_dvmamem_alloc,

View File

@ -109,8 +109,14 @@
*/
#define BUS_DMA_KEEP_PG_OFFSET 0x400
#define BUS_DMA_LOAD_MBUF 0x800
/* Forwards needed by prototypes below. */
union ccb;
struct bio;
struct mbuf;
struct memdesc;
struct pmap;
struct uio;
/*
@ -190,6 +196,49 @@ typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
*/
typedef void bus_dmamap_callback2_t(void *, bus_dma_segment_t *, int, bus_size_t, int);
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf, bus_dma_segment_t *segs,
int *nsegs, int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Like bus_dmamap_load but for cam control blocks.
*/
int bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
bus_dmamap_callback_t *callback, void *callback_arg,
int flags);
/*
* Loads any memory descriptor.
*/
int bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* XXX sparc64 uses the same interface, but a much different implementation.
* <machine/bus_dma.h> for the sparc64 arch contains the equivalent
@ -223,35 +272,6 @@ int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
*/
void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map);
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags);
/*
* Like bus_dmamap_load but for mbufs. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
int bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *mbuf, bus_dma_segment_t *segs,
int *nsegs, int flags);
/*
* Like bus_dmamap_load but for uios. Note the use of the
* bus_dmamap_callback2_t interface.
*/
int bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *ui,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags);
/*
* Perform a synchronization operation on the given map.
*/
@ -272,6 +292,36 @@ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map);
_bus_dmamap_unload(dmat, dmamap); \
} while (0)
/*
* The following functions define the interface between the MD and MI
* busdma layers. These are not intended for consumption by driver
* software.
*/
void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem,
bus_dmamap_callback_t *callback,
void *callback_arg);
#define _bus_dmamap_waitok(dmat, map, mem, callback, callback_arg) \
do { \
if ((map) != NULL) \
__bus_dmamap_waitok(dmat, map, mem, callback, \
callback_arg); \
} while (0);
int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map,
void *buf, bus_size_t buflen, struct pmap *pmap,
int flags, bus_dma_segment_t *segs, int *segp);
int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t paddr, bus_size_t buflen,
int flags, bus_dma_segment_t *segs, int *segp);
bus_dma_segment_t *_bus_dmamap_complete(bus_dma_tag_t dmat,
bus_dmamap_t map,
bus_dma_segment_t *segs,
int nsegs, int error);
#endif /* __sparc64__ */
#endif /* _BUS_DMA_H_ */

156
sys/sys/memdesc.h Normal file
View File

@ -0,0 +1,156 @@
/*-
* Copyright (c) 2012 EMC Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_MEMDESC_H_
#define _SYS_MEMDESC_H_
struct bio;
struct bus_dma_segment;
struct uio;
struct mbuf;
union ccb;
/*
* struct memdesc encapsulates various memory descriptors and provides
* abstract access to them.
*/
struct memdesc {
union {
void *md_vaddr;
vm_paddr_t md_paddr;
struct bus_dma_segment *md_list;
struct bio *md_bio;
struct uio *md_uio;
struct mbuf *md_mbuf;
union ccb *md_ccb;
} u;
size_t md_opaque; /* type specific data. */
uint32_t md_type; /* Type of memory. */
};
#define MEMDESC_VADDR 1 /* Contiguous virtual address. */
#define MEMDESC_PADDR 2 /* Contiguous physical address. */
#define MEMDESC_VLIST 3 /* scatter/gather list of kva addresses. */
#define MEMDESC_PLIST 4 /* scatter/gather list of physical addresses. */
#define MEMDESC_BIO 5 /* Pointer to a bio (block io). */
#define MEMDESC_UIO 6 /* Pointer to a uio (any io). */
#define MEMDESC_MBUF 7 /* Pointer to a mbuf (network io). */
#define MEMDESC_CCB 8 /* Cam control block. (scsi/ata io). */
static inline struct memdesc
memdesc_vaddr(void *vaddr, size_t len)
{
struct memdesc mem;
mem.u.md_vaddr = vaddr;
mem.md_opaque = len;
mem.md_type = MEMDESC_VADDR;
return (mem);
}
static inline struct memdesc
memdesc_paddr(vm_paddr_t paddr, size_t len)
{
struct memdesc mem;
mem.u.md_paddr = paddr;
mem.md_opaque = len;
mem.md_type = MEMDESC_PADDR;
return (mem);
}
static inline struct memdesc
memdesc_vlist(struct bus_dma_segment *vlist, int sglist_cnt)
{
struct memdesc mem;
mem.u.md_list = vlist;
mem.md_opaque = sglist_cnt;
mem.md_type = MEMDESC_VLIST;
return (mem);
}
static inline struct memdesc
memdesc_plist(struct bus_dma_segment *plist, int sglist_cnt)
{
struct memdesc mem;
mem.u.md_list = plist;
mem.md_opaque = sglist_cnt;
mem.md_type = MEMDESC_PLIST;
return (mem);
}
static inline struct memdesc
memdesc_bio(struct bio *bio)
{
struct memdesc mem;
mem.u.md_bio = bio;
mem.md_type = MEMDESC_BIO;
return (mem);
}
static inline struct memdesc
memdesc_uio(struct uio *uio)
{
struct memdesc mem;
mem.u.md_uio = uio;
mem.md_type = MEMDESC_UIO;
return (mem);
}
static inline struct memdesc
memdesc_mbuf(struct mbuf *mbuf)
{
struct memdesc mem;
mem.u.md_mbuf = mbuf;
mem.md_type = MEMDESC_MBUF;
return (mem);
}
static inline struct memdesc
memdesc_ccb(union ccb *ccb)
{
struct memdesc mem;
mem.u.md_ccb = ccb;
mem.md_type = MEMDESC_CCB;
return (mem);
}
#endif /* _SYS_MEMDESC_H_ */

View File

@ -96,6 +96,8 @@ int copyinstrfrom(const void * __restrict src, void * __restrict dst,
int copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop);
int copyout_map(struct thread *td, vm_offset_t *addr, size_t sz);
int copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz);
int physcopyin(void *src, vm_paddr_t dst, size_t len);
int physcopyout(vm_paddr_t src, void *dst, size_t len);
int uiomove(void *cp, int n, struct uio *uio);
int uiomove_frombuf(void *buf, int buflen, struct uio *uio);
int uiomove_fromphys(struct vm_page *ma[], vm_offset_t offset, int n,

View File

@ -36,10 +36,10 @@ __FBSDID("$FreeBSD$");
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@ -86,6 +86,7 @@ struct bounce_page {
vm_offset_t vaddr; /* kva of bounce buffer */
bus_addr_t busaddr; /* Physical address */
vm_offset_t datavaddr; /* kva of client data */
bus_addr_t dataaddr; /* client physical address */
bus_size_t datacount; /* client data count */
STAILQ_ENTRY(bounce_page) links;
};
@ -124,8 +125,7 @@ struct bus_dmamap {
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
struct memdesc mem;
bus_dmamap_callback_t *callback;
void *callback_arg;
STAILQ_ENTRY(bus_dmamap) links;
@ -141,11 +141,18 @@ static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int commit);
static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t vaddr, bus_size_t size);
vm_offset_t vaddr, bus_addr_t addr,
bus_size_t size);
static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags);
static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
pmap_t pmap, void *buf, bus_size_t buflen,
int flags);
static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen,
int flags);
static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
int flags);
#ifdef XEN
#undef pmap_kextract
@ -579,7 +586,33 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
int
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
/*
* Count the number of bounce pages
* needed in order to complete this transfer
*/
curaddr = buf;
while (buflen != 0) {
sgsize = MIN(buflen, dmat->maxsegsz);
if (run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
@ -604,12 +637,11 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
bus_size_t sg_len;
sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
if (pmap)
paddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
run_filter(dmat, paddr) != 0) {
else
paddr = pmap_extract(pmap, vaddr);
if (run_filter(dmat, paddr) != 0) {
sg_len = roundup2(sg_len, dmat->alignment);
map->pagesneeded++;
}
@ -617,78 +649,177 @@ _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static int
_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
{
/* Reserve Necessary Bounce Pages */
if (map->pagesneeded != 0) {
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
map->dmat = dmat;
map->buf = buf;
map->buflen = buflen;
STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_lock(&bounce_lock);
if (flags & BUS_DMA_NOWAIT) {
if (reserve_bounce_pages(dmat, map, 0) != 0) {
mtx_unlock(&bounce_lock);
return (ENOMEM);
}
} else {
if (reserve_bounce_pages(dmat, map, 1) != 0) {
/* Queue us for resources */
STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
mtx_unlock(&bounce_lock);
return (EINPROGRESS);
}
mtx_unlock(&bounce_lock);
}
mtx_unlock(&bounce_lock);
return (0);
}
/*
* Utility function to load a linear buffer. lastaddrp holds state
* between invocations (for multiple-buffer loads). segp contains
* the starting segment on entrace, and the ending segment on exit.
* first indicates if this is the first invocation of this function.
* Add a single contiguous physical range to the segment list.
*/
static __inline int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dmamap_t map,
void *buf, bus_size_t buflen,
pmap_t pmap,
int flags,
bus_addr_t *lastaddrp,
bus_dma_segment_t *segs,
int *segp,
int first)
static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
seg = *segp;
if (seg == -1) {
seg = 0;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
return (0);
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
*segp = seg;
return (sgsize);
}
/*
* Utility function to load a physical buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat,
bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vm_offset_t vaddr;
int seg, error;
bus_addr_t curaddr;
int error;
if (map == NULL || map == &contig_dmamap)
map = &nobounce_dmamap;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (error)
return (error);
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->maxsegsz);
if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE);
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
buf += sgsize;
buflen -= sgsize;
}
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Utility function to load a linear buffer. segp contains
* the starting segment on entrace, and the ending segment on exit.
*/
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dmamap_t map,
void *buf, bus_size_t buflen,
pmap_t pmap,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
vm_offset_t vaddr;
int error;
if (map == NULL || map == &contig_dmamap)
map = &nobounce_dmamap;
if (segs == NULL)
segs = dmat->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
vaddr = (vm_offset_t)buf;
lastaddr = *lastaddrp;
bmask = ~(dmat->boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
while (buflen > 0) {
bus_size_t max_sgsize;
/*
* Get the physical address for this segment.
*/
if (pmap)
curaddr = pmap_extract(pmap, vaddr);
else
if (pmap == kernel_pmap)
curaddr = pmap_kextract(vaddr);
else
curaddr = pmap_extract(pmap, vaddr);
/*
* Compute the segment size, and adjust counts.
@ -699,228 +830,46 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->alignment);
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
sgsize);
} else {
sgsize = MIN(sgsize, max_sgsize);
}
/*
* Make sure we don't cross any boundaries.
*/
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
/*
* Insert chunk into a segment, coalescing with
* previous segment if possible.
*/
if (first) {
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
first = 0;
} else {
if (curaddr == lastaddr &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)
break;
segs[seg].ds_addr = curaddr;
segs[seg].ds_len = sgsize;
}
}
lastaddr = curaddr + sgsize;
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
if (sgsize == 0)
break;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*lastaddrp = lastaddr;
/*
* Did we fit?
*/
return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
/*
* Map the buffer buf into bus space using the dmamap map.
*/
int
bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
void
__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg)
{
bus_addr_t lastaddr = 0;
int error, nsegs = 0;
if (map != NULL) {
flags |= BUS_DMA_WAITOK;
map->mem = *mem;
map->dmat = dmat;
map->callback = callback;
map->callback_arg = callback_arg;
}
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
&lastaddr, dmat->segments, &nsegs, 1);
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
if (error == EINPROGRESS) {
return (error);
}
if (error)
(*callback)(callback_arg, dmat->segments, 0, error);
else
(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
* This error only happens when NOWAIT is set, so deferal is disabled.
*/
if (error == ENOMEM)
return (error);
return (0);
}
/*
* Like _bus_dmamap_load(), but for mbufs.
*/
static __inline int
_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
int error;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
*nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
bus_addr_t lastaddr = 0;
struct mbuf *m;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len,
NULL, flags, &lastaddr,
segs, nsegs, first);
first = 0;
}
}
} else {
error = EINVAL;
}
/* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
return (error);
}
int
bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
int nsegs, error;
error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
flags);
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments,
nsegs, m0->m_pkthdr.len, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs);
return (error);
}
int
bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
int flags)
{
return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
}
/*
* Like _bus_dmamap_load(), but for uios.
*/
int
bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
struct uio *uio,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
bus_addr_t lastaddr = 0;
int nsegs, error, first, i;
bus_size_t resid;
struct iovec *iov;
pmap_t pmap;
flags |= BUS_DMA_NOWAIT;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
KASSERT(uio->uio_td != NULL,
("bus_dmamap_load_uio: USERSPACE but no proc"));
pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
} else
pmap = NULL;
nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
/*
* Now at the first iovec to load. Load each iovec
* until we have exhausted the residual count.
*/
bus_size_t minlen =
resid < iov[i].iov_len ? resid : iov[i].iov_len;
caddr_t addr = (caddr_t) iov[i].iov_base;
if (minlen > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
addr, minlen, pmap, flags, &lastaddr,
dmat->segments, &nsegs, first);
first = 0;
resid -= minlen;
}
}
if (error) {
/* force "no valid mappings" in callback */
(*callback)(callback_arg, dmat->segments, 0, 0, error);
} else {
(*callback)(callback_arg, dmat->segments,
nsegs+1, uio->uio_resid, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, nsegs + 1);
return (error);
if (segs == NULL)
segs = dmat->segments;
return (segs);
}
/*
@ -953,9 +902,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->datavaddr,
(void *)bpage->vaddr,
bpage->datacount);
else
physcopyout(bpage->dataaddr,
(void *)bpage->vaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
@ -963,9 +917,14 @@ _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
if (bpage->datavaddr != 0)
bcopy((void *)bpage->vaddr,
(void *)bpage->datavaddr,
bpage->datacount);
else
physcopyin((void *)bpage->vaddr,
bpage->dataaddr,
bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
@ -1137,7 +1096,7 @@ reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
static bus_addr_t
add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bus_size_t size)
bus_addr_t addr, bus_size_t size)
{
struct bounce_zone *bz;
struct bounce_page *bpage;
@ -1171,6 +1130,7 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->dataaddr = addr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
return (bpage->busaddr);
@ -1224,8 +1184,9 @@ busdma_swi(void)
mtx_unlock(&bounce_lock);
dmat = map->dmat;
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
map->callback, map->callback_arg, /*flags*/0);
bus_dmamap_load_mem(map->dmat, map, &map->mem,
map->callback, map->callback_arg,
BUS_DMA_WAITOK);
(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
mtx_lock(&bounce_lock);
}