Remove {max/min}_offset() macros, use vm_map_{max/min}() inlines.

Exposing max_offset and min_offset defines in public headers is
causing clashes with variable names, for example when building QEMU.

Based on the submission by:	royger
Reviewed by:	alc, markj (previous version)
Sponsored by:	The FreeBSD Foundation (kib)
MFC after:	1 week
Approved by:	re (marius)
Differential revision:	https://reviews.freebsd.org/D16881
This commit is contained in:
Konstantin Belousov 2018-08-29 12:24:19 +00:00
parent 76f6651cf0
commit f0165b1ca6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338370
11 changed files with 68 additions and 66 deletions

View File

@ -3094,8 +3094,8 @@ pmap_growkernel(vm_offset_t addr)
return;
addr = roundup2(addr, NBPDR);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
while (kernel_vm_end < addr) {
pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
if ((*pdpe & X86_PG_V) == 0) {
@ -3115,8 +3115,8 @@ pmap_growkernel(vm_offset_t addr)
pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
if ((*pde & X86_PG_V) != 0) {
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
continue;
@ -3134,8 +3134,8 @@ pmap_growkernel(vm_offset_t addr)
pde_store(pde, newpdir);
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
}

View File

@ -2043,21 +2043,21 @@ pmap_growkernel(vm_offset_t addr)
* not called, it could be first unused KVA (which is not
* rounded up to PTE1_SIZE),
*
* (2) when all KVA space is mapped and kernel_map->max_offset
* (2) when all KVA space is mapped and vm_map_max(kernel_map)
* address is not rounded up to PTE1_SIZE. (For example,
* it could be 0xFFFFFFFF.)
*/
kernel_vm_end = pte1_roundup(kernel_vm_end);
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
addr = roundup2(addr, PTE1_SIZE);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
while (kernel_vm_end < addr) {
pte1 = pte1_load(kern_pte1(kernel_vm_end));
if (pte1_is_valid(pte1)) {
kernel_vm_end += PTE1_SIZE;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
continue;
@ -2099,8 +2099,8 @@ pmap_growkernel(vm_offset_t addr)
pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa));
kernel_vm_end = kernel_vm_end_new;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
}

View File

@ -1744,8 +1744,8 @@ pmap_growkernel(vm_offset_t addr)
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
addr = roundup2(addr, L2_SIZE);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
while (kernel_vm_end < addr) {
l0 = pmap_l0(kernel_pmap, kernel_vm_end);
KASSERT(pmap_load(l0) != 0,
@ -1768,8 +1768,8 @@ pmap_growkernel(vm_offset_t addr)
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
if ((pmap_load(l2) & ATTR_AF) != 0) {
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
continue;
@ -1787,8 +1787,8 @@ pmap_growkernel(vm_offset_t addr)
pmap_invalidate_page(kernel_pmap, kernel_vm_end);
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
}

View File

@ -104,13 +104,6 @@ extern "C" {
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
/* There is clash. vm_map.h defines the two below and vdev_cache.c use them. */
#ifdef min_offset
#undef min_offset
#endif
#ifdef max_offset
#undef max_offset
#endif
#include <vm/vm_extern.h>
#include <vm/vnode_pager.h>

View File

@ -2229,13 +2229,13 @@ pmap_growkernel(vm_offset_t addr)
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
addr = roundup2(addr, NBPDR);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
while (kernel_vm_end < addr) {
if (pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
continue;
@ -2257,8 +2257,8 @@ pmap_growkernel(vm_offset_t addr)
pmap_kenter_pde(kernel_vm_end, newpdir);
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
}

View File

@ -1255,8 +1255,8 @@ pmap_growkernel(vm_offset_t addr)
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
req_class = VM_ALLOC_INTERRUPT;
addr = roundup2(addr, NBSEG);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
while (kernel_vm_end < addr) {
pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
#ifdef __mips_n64
@ -1272,8 +1272,8 @@ pmap_growkernel(vm_offset_t addr)
pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
if (*pde != 0) {
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
continue;
@ -1305,8 +1305,8 @@ pmap_growkernel(vm_offset_t addr)
pte[i] = PTE_G;
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
}

View File

@ -1424,8 +1424,8 @@ pmap_growkernel(vm_offset_t addr)
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
addr = roundup2(addr, L2_SIZE);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
while (kernel_vm_end < addr) {
l1 = pmap_l1(kernel_pmap, kernel_vm_end);
if (pmap_load(l1) == 0) {
@ -1452,8 +1452,8 @@ pmap_growkernel(vm_offset_t addr)
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
if ((pmap_load(l2) & PTE_A) != 0) {
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
continue;
@ -1478,8 +1478,8 @@ pmap_growkernel(vm_offset_t addr)
pmap_invalidate_page(kernel_pmap, kernel_vm_end);
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
break;
}
}

View File

@ -122,7 +122,7 @@ kernacc(void *addr, int len, int rw)
KASSERT((rw & ~VM_PROT_ALL) == 0,
("illegal ``rw'' argument to kernacc (%x)\n", rw));
if ((vm_offset_t)addr + len > kernel_map->max_offset ||
if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
(vm_offset_t)addr + len < (vm_offset_t)addr)
return (FALSE);

View File

@ -259,8 +259,8 @@ vm_ksubmap_init(struct kva_md_info *kmi)
* Discount the physical memory larger than the size of kernel_map
* to avoid eating up all of KVA space.
*/
physmem_est = lmin(physmem, btoc(kernel_map->max_offset -
kernel_map->min_offset));
physmem_est = lmin(physmem, btoc(vm_map_max(kernel_map) -
vm_map_min(kernel_map)));
v = kern_vfs_bio_buffer_alloc(v, physmem_est);

View File

@ -339,8 +339,8 @@ vmspace_dofree(struct vmspace *vm)
* Delete all of the mappings and pages they hold, then call
* the pmap module to reclaim anything left.
*/
(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
(void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
vm_map_max(&vm->vm_map));
pmap_release(vmspace_pmap(vm));
vm->vm_map.pmap = NULL;
@ -799,8 +799,8 @@ _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
map->needs_wakeup = FALSE;
map->system_map = 0;
map->pmap = pmap;
map->min_offset = min;
map->max_offset = max;
map->header.end = min;
map->header.start = max;
map->flags = 0;
map->root = NULL;
map->timestamp = 0;
@ -1198,7 +1198,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
/*
* Check that the start and end points are not bogus.
*/
if (start < map->min_offset || end > map->max_offset || start >= end)
if (start < vm_map_min(map) || end > vm_map_max(map) ||
start >= end)
return (KERN_INVALID_ADDRESS);
/*
@ -1401,9 +1402,8 @@ vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
* Request must fit within min/max VM address and must avoid
* address wrap.
*/
if (start < map->min_offset)
start = map->min_offset;
if (start + length > map->max_offset || start + length < start)
start = MAX(start, vm_map_min(map));
if (start + length > vm_map_max(map) || start + length < start)
return (1);
/* Empty tree means wide open address space. */
@ -3429,7 +3429,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
old_map = &vm1->vm_map;
/* Copy immutable fields of vm1 to vm2. */
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset, NULL);
vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map), NULL);
if (vm2 == NULL)
return (NULL);
vm2->vm_taddr = vm1->vm_taddr;
@ -4329,14 +4329,14 @@ vm_offset_t
vm_map_max_KBI(const struct vm_map *map)
{
return (map->max_offset);
return (vm_map_max(map));
}
vm_offset_t
vm_map_min_KBI(const struct vm_map *map)
{
return (map->min_offset);
return (vm_map_min(map));
}
pmap_t

View File

@ -173,19 +173,26 @@ vm_map_entry_system_wired_count(vm_map_entry_t entry)
* A map is a set of map entries. These map entries are
* organized both as a binary search tree and as a doubly-linked
* list. Both structures are ordered based upon the start and
* end addresses contained within each map entry. The list
* header has max start value and min end value to act as
* sentinels for sequential search of the doubly-linked list.
* end addresses contained within each map entry.
*
* Counterintuitively, the map's min offset value is stored in
* map->header.end, and its max offset value is stored in
* map->header.start.
*
* The list header has max start value and min end value to act
* as sentinels for sequential search of the doubly-linked list.
* Sleator and Tarjan's top-down splay algorithm is employed to
* control height imbalance in the binary search tree.
*
* List of locks
* List of locks
* (c) const until freed
*/
struct vm_map {
struct vm_map_entry header; /* List of entries */
#define min_offset header.end /* (c) */
#define max_offset header.start /* (c) */
/*
map min_offset header.end (c)
map max_offset header.start (c)
*/
struct sx lock; /* Lock for map data */
struct mtx system_mtx;
int nentries; /* Number of entries */
@ -214,13 +221,15 @@ struct vm_map {
static __inline vm_offset_t
vm_map_max(const struct vm_map *map)
{
return (map->max_offset);
return (map->header.start);
}
static __inline vm_offset_t
vm_map_min(const struct vm_map *map)
{
return (map->min_offset);
return (map->header.end);
}
static __inline pmap_t