Avoid use of contiguous memory allocations in busdma when possible.
This patch improves the boundary checks in busdma to allow more cases using the regular page based kernel memory allocator. Especially in the case of having a non-zero boundary in the parent DMA tag. For example AMD64 based platforms set the PCI DMA tag boundary to PCI_DMA_BOUNDARY, 4GB, which before this patch caused contiguous memory allocations to be preferred when allocating more than PAGE_SIZE bytes. Even if the required alignment was less than PAGE_SIZE bytes. This patch also fixes the nsegments check for using kmem_alloc_attr() when the maximum segment size is less than PAGE_SIZE bytes. Updated some comments describing the code in question. Differential Revision: https://reviews.freebsd.org/D10645 Reviewed by: kib, jhb, gallatin, scottl MFC after: 1 week Sponsored by: Mellanox Technologies
This commit is contained in:
parent
7a36bd9ffd
commit
65b017b420
@ -736,8 +736,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
|||||||
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
||||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
|
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
|
||||||
*vaddr = uma_zalloc(bufzone->umazone, mflags);
|
*vaddr = uma_zalloc(bufzone->umazone, mflags);
|
||||||
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
|
} else if (dmat->nsegments >=
|
||||||
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
|
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
|
||||||
|
dmat->alignment <= PAGE_SIZE &&
|
||||||
|
(dmat->boundary % PAGE_SIZE) == 0) {
|
||||||
*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
||||||
mflags, 0, dmat->lowaddr, memattr);
|
mflags, 0, dmat->lowaddr, memattr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -779,7 +779,9 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
|||||||
* (the allocator aligns buffers to their size boundaries).
|
* (the allocator aligns buffers to their size boundaries).
|
||||||
* - There's no need to handle lowaddr/highaddr exclusion zones.
|
* - There's no need to handle lowaddr/highaddr exclusion zones.
|
||||||
* else allocate non-contiguous pages if...
|
* else allocate non-contiguous pages if...
|
||||||
* - The page count that could get allocated doesn't exceed nsegments.
|
* - The page count that could get allocated doesn't exceed
|
||||||
|
* nsegments also when the maximum segment size is less
|
||||||
|
* than PAGE_SIZE.
|
||||||
* - The alignment constraint isn't larger than a page boundary.
|
* - The alignment constraint isn't larger than a page boundary.
|
||||||
* - There are no boundary-crossing constraints.
|
* - There are no boundary-crossing constraints.
|
||||||
* else allocate a block of contiguous pages because one or more of the
|
* else allocate a block of contiguous pages because one or more of the
|
||||||
@ -788,8 +790,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
|||||||
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
||||||
!exclusion_bounce(dmat)) {
|
!exclusion_bounce(dmat)) {
|
||||||
*vaddr = uma_zalloc(bufzone->umazone, mflags);
|
*vaddr = uma_zalloc(bufzone->umazone, mflags);
|
||||||
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
|
} else if (dmat->nsegments >=
|
||||||
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
|
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
|
||||||
|
dmat->alignment <= PAGE_SIZE &&
|
||||||
|
(dmat->boundary % PAGE_SIZE) == 0) {
|
||||||
*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
||||||
mflags, 0, dmat->lowaddr, memattr);
|
mflags, 0, dmat->lowaddr, memattr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -461,22 +461,35 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
|||||||
(*mapp)->flags = DMAMAP_FROM_DMAMEM;
|
(*mapp)->flags = DMAMAP_FROM_DMAMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX:
|
* Allocate the buffer from the malloc(9) allocator if...
|
||||||
* (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
|
* - It's small enough to fit into a single power of two sized bucket.
|
||||||
* alignment guarantees of malloc need to be nailed down, and the
|
* - The alignment is less than or equal to the maximum size
|
||||||
* code below should be rewritten to take that into account.
|
* - The low address requirement is fulfilled.
|
||||||
|
* else allocate non-contiguous pages if...
|
||||||
|
* - The page count that could get allocated doesn't exceed
|
||||||
|
* nsegments also when the maximum segment size is less
|
||||||
|
* than PAGE_SIZE.
|
||||||
|
* - The alignment constraint isn't larger than a page boundary.
|
||||||
|
* - There are no boundary-crossing constraints.
|
||||||
|
* else allocate a block of contiguous pages because one or more of the
|
||||||
|
* constraints is something that only the contig allocator can fulfill.
|
||||||
*
|
*
|
||||||
* In the meantime, we'll warn the user if malloc gets it wrong.
|
* NOTE: The (dmat->common.alignment <= dmat->maxsize) check
|
||||||
|
* below is just a quick hack. The exact alignment guarantees
|
||||||
|
* of malloc(9) need to be nailed down, and the code below
|
||||||
|
* should be rewritten to take that into account.
|
||||||
|
*
|
||||||
|
* In the meantime warn the user if malloc gets it wrong.
|
||||||
*/
|
*/
|
||||||
if ((dmat->common.maxsize <= PAGE_SIZE) &&
|
if ((dmat->common.maxsize <= PAGE_SIZE) &&
|
||||||
(dmat->common.alignment <= dmat->common.maxsize) &&
|
(dmat->common.alignment <= dmat->common.maxsize) &&
|
||||||
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
|
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
|
||||||
attr == VM_MEMATTR_DEFAULT) {
|
attr == VM_MEMATTR_DEFAULT) {
|
||||||
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
|
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
|
||||||
} else if (dmat->common.nsegments >= btoc(dmat->common.maxsize) &&
|
} else if (dmat->common.nsegments >=
|
||||||
|
howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
|
||||||
dmat->common.alignment <= PAGE_SIZE &&
|
dmat->common.alignment <= PAGE_SIZE &&
|
||||||
(dmat->common.boundary == 0 ||
|
(dmat->common.boundary % PAGE_SIZE) == 0) {
|
||||||
dmat->common.boundary >= dmat->common.lowaddr)) {
|
|
||||||
/* Page-based multi-segment allocations allowed */
|
/* Page-based multi-segment allocations allowed */
|
||||||
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
|
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
|
||||||
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
|
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
|
||||||
|
@ -690,7 +690,9 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
|
|||||||
* (the allocator aligns buffers to their size boundaries).
|
* (the allocator aligns buffers to their size boundaries).
|
||||||
* - There's no need to handle lowaddr/highaddr exclusion zones.
|
* - There's no need to handle lowaddr/highaddr exclusion zones.
|
||||||
* else allocate non-contiguous pages if...
|
* else allocate non-contiguous pages if...
|
||||||
* - The page count that could get allocated doesn't exceed nsegments.
|
* - The page count that could get allocated doesn't exceed
|
||||||
|
* nsegments also when the maximum segment size is less
|
||||||
|
* than PAGE_SIZE.
|
||||||
* - The alignment constraint isn't larger than a page boundary.
|
* - The alignment constraint isn't larger than a page boundary.
|
||||||
* - There are no boundary-crossing constraints.
|
* - There are no boundary-crossing constraints.
|
||||||
* else allocate a block of contiguous pages because one or more of the
|
* else allocate a block of contiguous pages because one or more of the
|
||||||
@ -699,8 +701,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
|
|||||||
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
if (bufzone != NULL && dmat->alignment <= bufzone->size &&
|
||||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
|
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
|
||||||
vaddr = uma_zalloc(bufzone->umazone, mflags);
|
vaddr = uma_zalloc(bufzone->umazone, mflags);
|
||||||
} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
|
} else if (dmat->nsegments >=
|
||||||
dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
|
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
|
||||||
|
dmat->alignment <= PAGE_SIZE &&
|
||||||
|
(dmat->boundary % PAGE_SIZE) == 0) {
|
||||||
vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
|
||||||
mflags, 0, dmat->lowaddr, memattr);
|
mflags, 0, dmat->lowaddr, memattr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -397,22 +397,35 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
|||||||
attr = VM_MEMATTR_DEFAULT;
|
attr = VM_MEMATTR_DEFAULT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX:
|
* Allocate the buffer from the malloc(9) allocator if...
|
||||||
* (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact
|
* - It's small enough to fit into a single power of two sized bucket.
|
||||||
* alignment guarantees of malloc need to be nailed down, and the
|
* - The alignment is less than or equal to the maximum size
|
||||||
* code below should be rewritten to take that into account.
|
* - The low address requirement is fulfilled.
|
||||||
|
* else allocate non-contiguous pages if...
|
||||||
|
* - The page count that could get allocated doesn't exceed
|
||||||
|
* nsegments also when the maximum segment size is less
|
||||||
|
* than PAGE_SIZE.
|
||||||
|
* - The alignment constraint isn't larger than a page boundary.
|
||||||
|
* - There are no boundary-crossing constraints.
|
||||||
|
* else allocate a block of contiguous pages because one or more of the
|
||||||
|
* constraints is something that only the contig allocator can fulfill.
|
||||||
*
|
*
|
||||||
* In the meantime, we'll warn the user if malloc gets it wrong.
|
* NOTE: The (dmat->common.alignment <= dmat->maxsize) check
|
||||||
|
* below is just a quick hack. The exact alignment guarantees
|
||||||
|
* of malloc(9) need to be nailed down, and the code below
|
||||||
|
* should be rewritten to take that into account.
|
||||||
|
*
|
||||||
|
* In the meantime warn the user if malloc gets it wrong.
|
||||||
*/
|
*/
|
||||||
if ((dmat->common.maxsize <= PAGE_SIZE) &&
|
if ((dmat->common.maxsize <= PAGE_SIZE) &&
|
||||||
(dmat->common.alignment <= dmat->common.maxsize) &&
|
(dmat->common.alignment <= dmat->common.maxsize) &&
|
||||||
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
|
dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
|
||||||
attr == VM_MEMATTR_DEFAULT) {
|
attr == VM_MEMATTR_DEFAULT) {
|
||||||
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
|
*vaddr = malloc(dmat->common.maxsize, M_DEVBUF, mflags);
|
||||||
} else if (dmat->common.nsegments >= btoc(dmat->common.maxsize) &&
|
} else if (dmat->common.nsegments >=
|
||||||
|
howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz, PAGE_SIZE)) &&
|
||||||
dmat->common.alignment <= PAGE_SIZE &&
|
dmat->common.alignment <= PAGE_SIZE &&
|
||||||
(dmat->common.boundary == 0 ||
|
(dmat->common.boundary % PAGE_SIZE) == 0) {
|
||||||
dmat->common.boundary >= dmat->common.lowaddr)) {
|
|
||||||
/* Page-based multi-segment allocations allowed */
|
/* Page-based multi-segment allocations allowed */
|
||||||
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
|
*vaddr = (void *)kmem_alloc_attr(kernel_arena,
|
||||||
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
|
dmat->common.maxsize, mflags, 0ul, dmat->common.lowaddr,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user