When bouncing pages, allow a new option to preserve the intra-page

offset.  This is needed for the ehci hardware buffer rings that assume
this behavior.

This is an interim solution, and a more general one is being worked
on.  This solution doesn't break anything that doesn't ask for it
directly.  The mbuf and uio variants with this flag likely don't work
and haven't been tested.

Universe builds with these changes.  I don't have a huge-memory
machine to test these changes with, but will be happy to work with
folks that do and hps if this changes turns out not to be sufficient.

Submitted by:	alfred@ from Hans Peter Selasky's original
This commit is contained in:
Warner Losh 2009-02-08 22:54:58 +00:00
parent 7b54b1a9f5
commit 047e5fdabc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=188350
6 changed files with 36 additions and 1 deletions

View File

@ -1128,6 +1128,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bz->active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
/* page offset needs to be preserved */
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
bpage->vaddr |= vaddr & PAGE_MASK;
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);

View File

@ -1417,6 +1417,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bz->active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
/* page offset needs to be preserved */
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
bpage->vaddr |= vaddr & PAGE_MASK;
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);

View File

@ -351,7 +351,7 @@ usb2_dma_tag_create(struct usb2_dma_tag *udt,
(2 + (size / USB_PAGE_SIZE)) : 1,
/* maxsegsz */ (align == 1) ?
USB_PAGE_SIZE : size,
/* flags */ 0,
/* flags */ BUS_DMA_KEEP_PG_OFFSET,
/* lockfn */ &usb2_dma_lock_cb,
/* lockarg */ NULL,
&tag)) {

View File

@ -1146,6 +1146,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
bz->active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
/* page offset needs to be preserved */
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
bpage->vaddr |= vaddr & PAGE_MASK;
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);

View File

@ -936,6 +936,13 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
active_bpages++;
mtx_unlock(&bounce_lock);
if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
/* page offset needs to be preserved */
bpage->vaddr &= ~PAGE_MASK;
bpage->busaddr &= ~PAGE_MASK;
bpage->vaddr |= vaddr & PAGE_MASK;
bpage->busaddr |= vaddr & PAGE_MASK;
}
bpage->datavaddr = vaddr;
bpage->datacount = size;
STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);

View File

@ -102,6 +102,13 @@
#define BUS_DMA_NOWRITE 0x100
#define BUS_DMA_NOCACHE 0x200
/*
* The following flag is a DMA tag hint that the page offset of the
* loaded kernel virtual address must be preserved in the first
* physical segment address, when the KVA is loaded into DMA.
*/
#define BUS_DMA_KEEP_PG_OFFSET 0x400
/* Forwards needed by prototypes below. */
struct mbuf;
struct uio;