vm_extern: use standard address checkers everywhere

Define simple functions for alignment and boundary checks and use them
everywhere instead of having slightly different implementations
scattered about. Define them in vm_extern.h and use them where
possible where vm_extern.h is included.

Reviewed by:	kib, markj
Differential Revision:	https://reviews.freebsd.org/D33685
This commit is contained in:
Doug Moore 2021-12-30 22:09:08 -06:00
parent c09981f142
commit c606ab59e7
16 changed files with 1624 additions and 93 deletions

View File

@ -318,7 +318,7 @@ static __inline int
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
{
return (addr & (dmat->alignment - 1));
return (!vm_addr_align_ok(addr, dmat->alignment));
}
/*
@ -1007,18 +1007,13 @@ static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@ -1032,8 +1027,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)

View File

@ -197,7 +197,7 @@ static bool
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
{
return ((addr & (dmat->common.alignment - 1)) != 0);
return (!vm_addr_align_ok(addr, dmat->common.alignment));
}
static bool
@ -616,7 +616,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
} else if (vtophys(*vaddr) & (dmat->alloc_alignment - 1)) {
} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alloc_alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
@ -767,18 +767,13 @@ static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->common.boundary - 1);
if (dmat->common.boundary > 0) {
baddr = (curaddr + dmat->common.boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@ -792,8 +787,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
(dmat->common.boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)

View File

@ -619,8 +619,8 @@ iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
if (buflen1 > tag->common.maxsegsz)
buflen1 = tag->common.maxsegsz;
KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
== 0,
KASSERT(vm_addr_align_ok(entry->start + offset,
tag->common.alignment),
("alignment failed: ctx %p start 0x%jx offset %x "
"align 0x%jx", ctx, (uintmax_t)entry->start, offset,
(uintmax_t)tag->common.alignment));
@ -631,7 +631,7 @@ iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
(uintmax_t)entry->start, (uintmax_t)entry->end,
(uintmax_t)tag->common.lowaddr,
(uintmax_t)tag->common.highaddr));
KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
KASSERT(vm_addr_bound_ok(entry->start + offset, buflen1,
tag->common.boundary),
("boundary failed: ctx %p start 0x%jx end 0x%jx "
"boundary 0x%jx", ctx, (uintmax_t)entry->start,

View File

@ -148,16 +148,6 @@ struct iommu_ctx {
#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
static inline bool
iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size,
iommu_gaddr_t boundary)
{
if (boundary == 0)
return (true);
return (start + size <= ((start + boundary) & ~(boundary - 1)));
}
void iommu_free_ctx(struct iommu_ctx *ctx);
void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,

View File

@ -314,7 +314,7 @@ iommu_gas_match_one(struct iommu_gas_match_args *a, iommu_gaddr_t beg,
return (false);
/* No boundary crossing. */
if (iommu_test_boundary(a->entry->start + a->offset, a->size,
if (vm_addr_bound_ok(a->entry->start + a->offset, a->size,
a->common->boundary))
return (true);

File diff suppressed because it is too large Load Diff

View File

@ -172,7 +172,7 @@ run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
paddr > dmat->lowaddr && paddr <= dmat->highaddr)
retval = 1;
if (dmat->filter == NULL &&
(paddr & (dmat->alignment - 1)) != 0)
vm_addr_align_ok(paddr, dmat->alignment))
retval = 1;
if (dmat->filter != NULL &&
(*dmat->filter)(dmat->filterarg, paddr) != 0)
@ -563,7 +563,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
} else if (vtophys(*vaddr) & (dmat->alignment - 1)) {
} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@ -688,18 +688,13 @@ static int
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->boundary - 1);
if (dmat->boundary > 0) {
baddr = (curaddr + dmat->boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->boundary))
sgsize = roundup2(curaddr, dmat->boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@ -713,8 +708,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
(dmat->boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
dmat->boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->nsegments)

View File

@ -504,7 +504,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
__func__, dmat, dmat->common.flags, ENOMEM);
free(*mapp, M_DEVBUF);
return (ENOMEM);
} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
dmat->map_count++;
@ -636,18 +636,13 @@ static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->common.boundary - 1);
if (dmat->common.boundary > 0) {
baddr = (curaddr + dmat->common.boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@ -661,8 +656,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
(dmat->common.boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)

View File

@ -102,7 +102,7 @@ bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr)
retval = 0;
do {
if (((paddr > tc->lowaddr && paddr <= tc->highaddr) ||
((paddr & (tc->alignment - 1)) != 0)) &&
!vm_addr_align_ok(paddr, tc->alignment) &&
(tc->filter == NULL ||
(*tc->filter)(tc->filterarg, paddr) != 0))
retval = 1;

View File

@ -133,5 +133,32 @@ u_int vm_active_count(void);
u_int vm_inactive_count(void);
u_int vm_laundry_count(void);
u_int vm_wait_count(void);
/*
* Is pa a multiple of alignment, which is a power-of-two?
*/
static inline bool
vm_addr_align_ok(vm_paddr_t pa, u_long alignment)
{
return ((pa & (alignment - 1)) == 0);
}
/*
* Do the first and last addresses of a range match in all bits except the ones
* in -boundary (a power-of-two)? For boundary == 0, all addresses match.
*/
static inline bool
vm_addr_bound_ok(vm_paddr_t pa, vm_paddr_t size, vm_paddr_t boundary)
{
return (((pa ^ (pa + size - 1)) & -boundary) == 0);
}
static inline bool
vm_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
vm_paddr_t boundary)
{
return (vm_addr_align_ok(pa, alignment) &&
vm_addr_bound_ok(pa, size, boundary));
}
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */

View File

@ -2032,10 +2032,8 @@ vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
*/
if (alignment == 0)
pmap_align_superpage(object, offset, addr, length);
else if ((*addr & (alignment - 1)) != 0) {
*addr &= ~(alignment - 1);
*addr += alignment;
}
else
*addr = roundup2(*addr, alignment);
aligned_addr = *addr;
if (aligned_addr == free_addr) {
/*

View File

@ -2656,12 +2656,11 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
if (m + npages > m_end)
break;
pa = VM_PAGE_TO_PHYS(m);
if ((pa & (alignment - 1)) != 0) {
if (!vm_addr_align_ok(pa, alignment)) {
m_inc = atop(roundup2(pa, alignment) - pa);
continue;
}
if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
boundary) != 0) {
if (!vm_addr_bound_ok(pa, ptoa(npages), boundary)) {
m_inc = atop(roundup2(pa, boundary) - pa);
continue;
}

View File

@ -179,6 +179,7 @@ static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
int order, int tail);
/*
* Red-black tree helpers for vm fictitious range management.
*/
@ -1465,8 +1466,7 @@ vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
pa = VM_PAGE_TO_PHYS(m_ret);
pa_end = pa + size;
if (pa >= low && pa_end <= high &&
(pa & (alignment - 1)) == 0 &&
rounddown2(pa ^ (pa_end - 1), boundary) == 0)
vm_addr_ok(pa, size, alignment, boundary))
goto done;
}
}

View File

@ -656,10 +656,8 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
* possible size satisfy the alignment and boundary requirements?
*/
pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
if ((pa & (alignment - 1)) != 0)
return (NULL);
size = npages << PAGE_SHIFT;
if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
if (!vm_addr_ok(pa, size, alignment, boundary))
return (NULL);
/*
@ -682,8 +680,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
m = &rv->pages[index];
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + size > high ||
(pa & (alignment - 1)) != 0 ||
((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
!vm_addr_ok(pa, size, alignment, boundary))
goto out;
/* Handle vm_page_rename(m, new_object, ...). */
for (i = 0; i < npages; i++)
@ -1333,7 +1330,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
* doesn't include a boundary-multiple within it. Otherwise,
* no boundary-constrained allocation is possible.
*/
if (size > boundary && boundary > 0)
if (!vm_addr_bound_ok(0, size, boundary))
return (NULL);
marker = &vm_rvd[domain].marker;
queue = &vm_rvd[domain].partpop;
@ -1360,7 +1357,7 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
/* This entire reservation is too high; go to next. */
continue;
}
if ((pa & (alignment - 1)) != 0) {
if (!vm_addr_align_ok(pa, alignment)) {
/* This entire reservation is unaligned; go to next. */
continue;
}
@ -1397,12 +1394,10 @@ vm_reserv_reclaim_contig(int domain, u_long npages, vm_paddr_t low,
vm_reserv_unlock(rv);
m_ret = &rv->pages[posn];
pa = VM_PAGE_TO_PHYS(m_ret);
KASSERT((pa & (alignment - 1)) == 0,
("%s: adjusted address does not align to %lx",
__func__, alignment));
KASSERT(((pa ^ (pa + size - 1)) & -boundary) == 0,
("%s: adjusted address spans boundary to %jx",
__func__, (uintmax_t)boundary));
KASSERT(vm_addr_ok(pa, size, alignment, boundary),
("%s: adjusted address not aligned/bounded to "
"%lx/%jx",
__func__, alignment, (uintmax_t)boundary));
return (m_ret);
}
vm_reserv_domain_lock(domain);

View File

@ -501,7 +501,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
return (ENOMEM);
} else if (vtophys(*vaddr) & (dmat->common.alignment - 1)) {
} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@ -644,8 +644,9 @@ _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
sg_len = roundup2(sg_len,
dmat->common.alignment);
sg_len = MIN(sg_len, max_sgsize);
KASSERT((sg_len & (dmat->common.alignment - 1))
== 0, ("Segment size is not aligned"));
KASSERT(vm_addr_align_ok(sg_len,
dmat->common.alignment),
("Segment size is not aligned"));
map->pagesneeded++;
}
if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
@ -690,7 +691,6 @@ static bus_size_t
_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
{
bus_addr_t baddr, bmask;
int seg;
KASSERT(curaddr <= BUS_SPACE_MAXADDR,
@ -703,12 +703,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
/*
* Make sure we don't cross any boundaries.
*/
bmask = ~(dmat->common.boundary - 1);
if (dmat->common.boundary > 0) {
baddr = (curaddr + dmat->common.boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
/*
* Insert chunk into a segment, coalescing with
@ -722,8 +718,8 @@ _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
} else {
if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
(segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
(dmat->common.boundary == 0 ||
(segs[seg].ds_addr & bmask) == (curaddr & bmask)))
vm_addr_bound_ok(segs[seg].ds_addr, segs[seg].ds_len,
dmat->common.boundary))
segs[seg].ds_len += sgsize;
else {
if (++seg >= dmat->common.nsegments)
@ -907,7 +903,8 @@ bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_run_filter(&dmat->common, paddr)) {
sgsize = roundup2(sgsize, dmat->common.alignment);
sgsize = MIN(sgsize, max_sgsize);
KASSERT((sgsize & (dmat->common.alignment - 1)) == 0,
KASSERT(vm_addr_align_ok(sgsize,
dmat->common.alignment),
("Segment size is not aligned"));
/*
* Check if two pages of the user provided buffer

View File

@ -107,7 +107,7 @@ bus_dma_run_filter(struct bus_dma_tag_common *tc, vm_paddr_t paddr)
do {
if ((paddr >= BUS_SPACE_MAXADDR ||
(paddr > tc->lowaddr && paddr <= tc->highaddr) ||
(paddr & (tc->alignment - 1)) != 0) &&
!vm_addr_align_ok(paddr, tc->alignment) &&
(tc->filter == NULL ||
(*tc->filter)(tc->filterarg, paddr) != 0))
retval = 1;