Add a helper function for validating VA ranges.

Functions which take untrusted user ranges must validate against the
bounds of the map, and also check for wraparound.  Instead of having the
same logic duplicated in a number of places, add a function to check.

Reviewed by:	dougm, kib
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D25328
This commit is contained in:
Mark Johnston 2020-06-19 03:32:04 +00:00
parent 61b006887e
commit 0f1e6ec591
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362361
5 changed files with 25 additions and 30 deletions

View File

@ -222,7 +222,7 @@ __get_user_pages_fast(unsigned long start, int nr_pages, int write,
va = start;
map = &curthread->td_proc->p_vmspace->vm_map;
end = start + (((size_t)nr_pages) << PAGE_SHIFT);
if (start < vm_map_min(map) || end > vm_map_max(map))
if (!vm_map_range_valid(map, start, end))
return (-EINVAL);
prot = write ? (VM_PROT_READ | VM_PROT_WRITE) : VM_PROT_READ;
for (count = 0, mp = pages, va = start; va < end;

View File

@ -1713,10 +1713,7 @@ vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len,
end = round_page(addr + len);
addr = trunc_page(addr);
/*
* Check for illegal addresses.
*/
if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map))
if (!vm_map_range_valid(map, addr, end))
return (-1);
if (atop(end - addr) > max_count)

View File

@ -1616,8 +1616,7 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
/*
* Check that the start and end points are not bogus.
*/
if (start < vm_map_min(map) || end > vm_map_max(map) ||
start >= end)
if (!vm_map_range_valid(map, start, end))
return (KERN_INVALID_ADDRESS);
/*
@ -2161,9 +2160,7 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
goto done;
}
} else if ((cow & MAP_REMAP) != 0) {
if (*addr < vm_map_min(map) ||
*addr + length > vm_map_max(map) ||
*addr + length <= length) {
if (!vm_map_range_valid(map, *addr, *addr + length)) {
rv = KERN_INVALID_ADDRESS;
goto done;
}
@ -4324,9 +4321,8 @@ vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
("bi-dir stack"));
if (addrbos < vm_map_min(map) ||
addrbos + max_ssize > vm_map_max(map) ||
addrbos + max_ssize <= addrbos)
if (max_ssize == 0 ||
!vm_map_range_valid(map, addrbos, addrbos + max_ssize))
return (KERN_INVALID_ADDRESS);
sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
(curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :

View File

@ -255,6 +255,17 @@ vm_map_modflags(vm_map_t map, vm_flags_t set, vm_flags_t clear)
{
map->flags = (map->flags | set) & ~clear;
}
static inline bool
vm_map_range_valid(vm_map_t map, vm_offset_t start, vm_offset_t end)
{
if (end < start)
return (false);
if (start < vm_map_min(map) || end > vm_map_max(map))
return (false);
return (true);
}
#endif /* KLD_MODULE */
#endif /* _KERNEL */

View File

@ -342,10 +342,7 @@ kern_mmap_req(struct thread *td, const struct mmap_req *mrp)
return (EINVAL);
/* Address range must be all in user VM space. */
if (addr < vm_map_min(&vms->vm_map) ||
addr + size > vm_map_max(&vms->vm_map))
return (EINVAL);
if (addr + size < addr)
if (!vm_map_range_valid(&vms->vm_map, addr, addr + size))
return (EINVAL);
#ifdef MAP_32BIT
if (flags & MAP_32BIT && addr + size > MAP_32BIT_MAX_ADDR)
@ -577,7 +574,7 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
vm_map_entry_t entry;
bool pmc_handled;
#endif
vm_offset_t addr;
vm_offset_t addr, end;
vm_size_t pageoff;
vm_map_t map;
@ -589,15 +586,11 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
addr -= pageoff;
size += pageoff;
size = (vm_size_t) round_page(size);
if (addr + size < addr)
end = addr + size;
map = &td->td_proc->p_vmspace->vm_map;
if (!vm_map_range_valid(map, addr, end))
return (EINVAL);
/*
* Check for illegal addresses. Watch out for address wrap...
*/
map = &td->td_proc->p_vmspace->vm_map;
if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
return (EINVAL);
vm_map_lock(map);
#ifdef HWPMC_HOOKS
pmc_handled = false;
@ -609,7 +602,7 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
*/
pkm.pm_address = (uintptr_t) NULL;
if (vm_map_lookup_entry(map, addr, &entry)) {
for (; entry->start < addr + size;
for (; entry->start < end;
entry = vm_map_entry_succ(entry)) {
if (vm_map_check_protection(map, entry->start,
entry->end, VM_PROT_EXECUTE) == TRUE) {
@ -621,7 +614,7 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
}
}
#endif
vm_map_delete(map, addr, addr + size);
vm_map_delete(map, addr, end);
#ifdef HWPMC_HOOKS
if (__predict_false(pmc_handled)) {
@ -772,9 +765,7 @@ kern_madvise(struct thread *td, uintptr_t addr0, size_t len, int behav)
*/
map = &td->td_proc->p_vmspace->vm_map;
addr = addr0;
if (addr < vm_map_min(map) || addr + len > vm_map_max(map))
return (EINVAL);
if ((addr + len) < addr)
if (!vm_map_range_valid(map, addr, addr + len))
return (EINVAL);
/*