2014-02-08 23:54:16 +00:00
|
|
|
/*-
|
2017-11-27 15:04:10 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2014-02-08 23:54:16 +00:00
|
|
|
* Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
|
2016-04-07 18:19:09 +00:00
|
|
|
* All rights reserved.
|
2014-02-08 23:54:16 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2019-02-05 06:25:35 +00:00
|
|
|
#include "opt_acpi.h"
|
2014-02-08 23:54:16 +00:00
|
|
|
#include "opt_ddb.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routines for describing and initializing anything related to physical memory.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2020-12-03 05:39:27 +00:00
|
|
|
#include <sys/kernel.h>
|
2020-04-19 00:12:30 +00:00
|
|
|
#include <sys/physmem.h>
|
2014-02-08 23:54:16 +00:00
|
|
|
#include <vm/vm.h>
|
2019-08-16 00:45:14 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_phys.h>
|
2020-10-14 22:51:40 +00:00
|
|
|
#include <vm/vm_dumpset.h>
|
2015-01-22 17:42:30 +00:00
|
|
|
#include <machine/md_var.h>
|
2014-02-08 23:54:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These structures are used internally to keep track of regions of physical
|
|
|
|
* ram, and regions within the physical ram that need to be excluded. An
|
|
|
|
* exclusion region can be excluded from crash dumps, from the vm pool of pages
|
|
|
|
* that can be allocated, or both, depending on the exclusion flags associated
|
|
|
|
* with the region.
|
|
|
|
*/
|
2019-02-05 06:25:35 +00:00
|
|
|
#ifdef DEV_ACPI
|
|
|
|
#define MAX_HWCNT 32 /* ACPI needs more regions */
|
|
|
|
#define MAX_EXCNT 32
|
|
|
|
#else
|
2018-09-08 21:51:47 +00:00
|
|
|
#define MAX_HWCNT 16
|
|
|
|
#define MAX_EXCNT 16
|
2019-02-05 06:25:35 +00:00
|
|
|
#endif
|
2014-02-08 23:54:16 +00:00
|
|
|
|
2018-05-22 10:31:06 +00:00
|
|
|
#if defined(__arm__)
|
2016-01-25 23:04:40 +00:00
|
|
|
#define MAX_PHYS_ADDR 0xFFFFFFFFull
|
2020-04-19 00:12:30 +00:00
|
|
|
#elif defined(__aarch64__) || defined(__riscv)
|
2018-05-22 10:31:06 +00:00
|
|
|
#define MAX_PHYS_ADDR 0xFFFFFFFFFFFFFFFFull
|
|
|
|
#endif
|
2016-01-25 23:04:40 +00:00
|
|
|
|
2014-02-08 23:54:16 +00:00
|
|
|
struct region {
|
2014-02-09 14:35:44 +00:00
|
|
|
vm_paddr_t addr;
|
2014-02-08 23:54:16 +00:00
|
|
|
vm_size_t size;
|
|
|
|
uint32_t flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct region hwregions[MAX_HWCNT];
|
|
|
|
static struct region exregions[MAX_EXCNT];
|
|
|
|
|
|
|
|
static size_t hwcnt;
|
|
|
|
static size_t excnt;
|
|
|
|
|
2015-01-22 17:42:30 +00:00
|
|
|
/*
|
|
|
|
* realmem is the total number of hardware pages, excluded or not.
|
|
|
|
* Maxmem is one greater than the last physical page number.
|
|
|
|
*/
|
2014-02-08 23:54:16 +00:00
|
|
|
long realmem;
|
2015-01-22 17:42:30 +00:00
|
|
|
long Maxmem;
|
2014-02-08 23:54:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the contents of the physical and excluded region tables using the
|
|
|
|
* provided printf-like output function (which will be either printf or
|
|
|
|
* db_printf).
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
physmem_dump_tables(int (*prfunc)(const char *, ...))
|
|
|
|
{
|
|
|
|
int flags, i;
|
|
|
|
uintmax_t addr, size;
|
|
|
|
const unsigned int mbyte = 1024 * 1024;
|
|
|
|
|
|
|
|
prfunc("Physical memory chunk(s):\n");
|
|
|
|
for (i = 0; i < hwcnt; ++i) {
|
|
|
|
addr = hwregions[i].addr;
|
|
|
|
size = hwregions[i].size;
|
|
|
|
prfunc(" 0x%08jx - 0x%08jx, %5ju MB (%7ju pages)\n", addr,
|
|
|
|
addr + size - 1, size / mbyte, size / PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
prfunc("Excluded memory regions:\n");
|
|
|
|
for (i = 0; i < excnt; ++i) {
|
|
|
|
addr = exregions[i].addr;
|
|
|
|
size = exregions[i].size;
|
|
|
|
flags = exregions[i].flags;
|
|
|
|
prfunc(" 0x%08jx - 0x%08jx, %5ju MB (%7ju pages) %s %s\n",
|
|
|
|
addr, addr + size - 1, size / mbyte, size / PAGE_SIZE,
|
|
|
|
(flags & EXFLAG_NOALLOC) ? "NoAlloc" : "",
|
|
|
|
(flags & EXFLAG_NODUMP) ? "NoDump" : "");
|
|
|
|
}
|
2014-02-09 20:20:49 +00:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
prfunc("Avail lists:\n");
|
|
|
|
for (i = 0; phys_avail[i] != 0; ++i) {
|
|
|
|
prfunc(" phys_avail[%d] 0x%08x\n", i, phys_avail[i]);
|
|
|
|
}
|
|
|
|
for (i = 0; dump_avail[i] != 0; ++i) {
|
|
|
|
prfunc(" dump_avail[%d] 0x%08x\n", i, dump_avail[i]);
|
|
|
|
}
|
|
|
|
#endif
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the contents of the static mapping table. Used for bootverbose.
|
|
|
|
*/
|
|
|
|
void
|
2020-04-19 00:12:30 +00:00
|
|
|
physmem_print_tables(void)
|
2014-02-08 23:54:16 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
physmem_dump_tables(printf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk the list of hardware regions, processing it against the list of
|
|
|
|
* exclusions that contain the given exflags, and generating an "avail list".
|
2020-12-03 05:39:27 +00:00
|
|
|
*
|
|
|
|
* If maxphyssz is not zero it sets upper limit, in bytes, for the total
|
|
|
|
* "avail list" size. Walk stops once the limit is reached and the last region
|
|
|
|
* is cut short if necessary.
|
2014-02-08 23:54:16 +00:00
|
|
|
*
|
2015-03-06 20:52:05 +00:00
|
|
|
* Updates the value at *pavail with the sum of all pages in all hw regions.
|
2014-02-08 23:54:16 +00:00
|
|
|
*
|
|
|
|
* Returns the number of pages of non-excluded memory added to the avail list.
|
|
|
|
*/
|
2015-01-22 17:42:30 +00:00
|
|
|
static size_t
|
2018-05-22 14:26:58 +00:00
|
|
|
regions_to_avail(vm_paddr_t *avail, uint32_t exflags, size_t maxavail,
|
2020-12-03 05:39:27 +00:00
|
|
|
uint64_t maxphyssz, long *pavail, long *prealmem)
|
2014-02-08 23:54:16 +00:00
|
|
|
{
|
|
|
|
size_t acnt, exi, hwi;
|
2016-01-02 22:00:52 +00:00
|
|
|
uint64_t end, start, xend, xstart;
|
2018-05-22 13:21:44 +00:00
|
|
|
long availmem, totalmem;
|
2014-02-08 23:54:16 +00:00
|
|
|
const struct region *exp, *hwp;
|
2020-12-03 05:39:27 +00:00
|
|
|
uint64_t availsz;
|
2014-02-08 23:54:16 +00:00
|
|
|
|
2018-05-22 13:21:44 +00:00
|
|
|
totalmem = 0;
|
2014-02-08 23:54:16 +00:00
|
|
|
availmem = 0;
|
2020-12-03 05:39:27 +00:00
|
|
|
availsz = 0;
|
2014-02-08 23:54:16 +00:00
|
|
|
acnt = 0;
|
|
|
|
for (hwi = 0, hwp = hwregions; hwi < hwcnt; ++hwi, ++hwp) {
|
|
|
|
start = hwp->addr;
|
|
|
|
end = hwp->size + start;
|
2020-04-19 00:12:30 +00:00
|
|
|
totalmem += atop((vm_offset_t)(end - start));
|
2014-02-08 23:54:16 +00:00
|
|
|
for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
|
2014-09-30 21:28:05 +00:00
|
|
|
/*
|
|
|
|
* If the excluded region does not match given flags,
|
|
|
|
* continue checking with the next excluded region.
|
|
|
|
*/
|
|
|
|
if ((exp->flags & exflags) == 0)
|
|
|
|
continue;
|
2014-02-08 23:54:16 +00:00
|
|
|
xstart = exp->addr;
|
|
|
|
xend = exp->size + xstart;
|
|
|
|
/*
|
|
|
|
* If the excluded region ends before this hw region,
|
|
|
|
* continue checking with the next excluded region.
|
|
|
|
*/
|
|
|
|
if (xend <= start)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* If the excluded region begins after this hw region
|
|
|
|
* we're done because both lists are sorted.
|
|
|
|
*/
|
|
|
|
if (xstart >= end)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* If the excluded region completely covers this hw
|
|
|
|
* region, shrink this hw region to zero size.
|
|
|
|
*/
|
|
|
|
if ((start >= xstart) && (end <= xend)) {
|
|
|
|
start = xend;
|
|
|
|
end = xend;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If the excluded region falls wholly within this hw
|
|
|
|
* region without abutting or overlapping the beginning
|
|
|
|
* or end, create an available entry from the leading
|
|
|
|
* fragment, then adjust the start of this hw region to
|
|
|
|
* the end of the excluded region, and continue checking
|
|
|
|
* the next excluded region because another exclusion
|
|
|
|
* could affect the remainder of this hw region.
|
|
|
|
*/
|
|
|
|
if ((xstart > start) && (xend < end)) {
|
2020-12-03 05:39:27 +00:00
|
|
|
|
|
|
|
if ((maxphyssz != 0) &&
|
|
|
|
(availsz + xstart - start > maxphyssz)) {
|
|
|
|
xstart = maxphyssz + start - availsz;
|
|
|
|
}
|
|
|
|
if (xstart <= start)
|
|
|
|
continue;
|
2018-05-22 10:14:20 +00:00
|
|
|
if (acnt > 0 &&
|
|
|
|
avail[acnt - 1] == (vm_paddr_t)start) {
|
|
|
|
avail[acnt - 1] = (vm_paddr_t)xstart;
|
|
|
|
} else {
|
|
|
|
avail[acnt++] = (vm_paddr_t)start;
|
|
|
|
avail[acnt++] = (vm_paddr_t)xstart;
|
|
|
|
}
|
2020-12-03 05:39:27 +00:00
|
|
|
availsz += (xstart - start);
|
2020-04-19 00:12:30 +00:00
|
|
|
availmem += atop((vm_offset_t)(xstart - start));
|
2014-02-08 23:54:16 +00:00
|
|
|
start = xend;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
2014-02-09 20:19:41 +00:00
|
|
|
* We know the excluded region overlaps either the start
|
|
|
|
* or end of this hardware region (but not both), trim
|
|
|
|
* the excluded portion off the appropriate end.
|
2014-02-08 23:54:16 +00:00
|
|
|
*/
|
2014-02-09 20:19:41 +00:00
|
|
|
if (xstart <= start)
|
2014-02-08 23:54:16 +00:00
|
|
|
start = xend;
|
2014-02-09 20:19:41 +00:00
|
|
|
else
|
|
|
|
end = xstart;
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If the trimming actions above left a non-zero size, create an
|
|
|
|
* available entry for it.
|
|
|
|
*/
|
|
|
|
if (end > start) {
|
2020-12-03 05:39:27 +00:00
|
|
|
if ((maxphyssz != 0) &&
|
|
|
|
(availsz + end - start > maxphyssz)) {
|
|
|
|
end = maxphyssz + start - availsz;
|
|
|
|
}
|
|
|
|
if (end <= start)
|
|
|
|
break;
|
|
|
|
|
2018-05-22 10:14:20 +00:00
|
|
|
if (acnt > 0 && avail[acnt - 1] == (vm_paddr_t)start) {
|
|
|
|
avail[acnt - 1] = (vm_paddr_t)end;
|
|
|
|
} else {
|
|
|
|
avail[acnt++] = (vm_paddr_t)start;
|
|
|
|
avail[acnt++] = (vm_paddr_t)end;
|
|
|
|
}
|
2020-12-03 05:39:27 +00:00
|
|
|
availsz += end - start;
|
2020-04-19 00:12:30 +00:00
|
|
|
availmem += atop((vm_offset_t)(end - start));
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
2018-05-22 14:26:58 +00:00
|
|
|
if (acnt >= maxavail)
|
2014-02-08 23:54:16 +00:00
|
|
|
panic("Not enough space in the dump/phys_avail arrays");
|
|
|
|
}
|
|
|
|
|
2018-05-22 13:21:44 +00:00
|
|
|
if (pavail != NULL)
|
2015-01-22 17:42:30 +00:00
|
|
|
*pavail = availmem;
|
2018-05-22 13:21:44 +00:00
|
|
|
if (prealmem != NULL)
|
2018-12-02 07:39:16 +00:00
|
|
|
*prealmem = totalmem;
|
2015-01-22 17:42:30 +00:00
|
|
|
return (acnt);
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
2021-10-28 04:40:08 +00:00
|
|
|
/*
|
|
|
|
* Check if the region at idx can be merged with the region above it.
|
|
|
|
*/
|
|
|
|
static size_t
|
|
|
|
merge_upper_regions(struct region *regions, size_t rcnt, size_t idx)
|
|
|
|
{
|
|
|
|
struct region *lower, *upper;
|
|
|
|
vm_paddr_t lend, uend;
|
|
|
|
size_t i, mergecnt, movecnt;
|
|
|
|
|
|
|
|
lower = ®ions[idx];
|
|
|
|
lend = lower->addr + lower->size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Continue merging in upper entries as long as we have entries to
|
|
|
|
* merge; the new block could have spanned more than one, although one
|
|
|
|
* is likely the common case.
|
|
|
|
*/
|
|
|
|
for (i = idx + 1; i < rcnt; i++) {
|
|
|
|
upper = ®ions[i];
|
|
|
|
if (lend < upper->addr || lower->flags != upper->flags)
|
|
|
|
break;
|
|
|
|
|
|
|
|
uend = upper->addr + upper->size;
|
|
|
|
if (uend > lend) {
|
|
|
|
lower->size += uend - lend;
|
|
|
|
lend = lower->addr + lower->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uend >= lend) {
|
|
|
|
/*
|
|
|
|
* If we didn't move past the end of the upper region,
|
|
|
|
* then we don't need to bother checking for another
|
|
|
|
* merge because it would have been done already. Just
|
|
|
|
* increment i once more to maintain the invariant that
|
|
|
|
* i is one past the last entry merged.
|
|
|
|
*/
|
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We merged in the entries from [idx + 1, i); physically move the tail
|
|
|
|
* end at [i, rcnt) if we need to.
|
|
|
|
*/
|
|
|
|
mergecnt = i - (idx + 1);
|
|
|
|
if (mergecnt > 0) {
|
|
|
|
movecnt = rcnt - i;
|
|
|
|
if (movecnt == 0) {
|
|
|
|
/* Merged all the way to the end, just decrease rcnt. */
|
|
|
|
rcnt = idx + 1;
|
|
|
|
} else {
|
|
|
|
memmove(®ions[idx + 1], ®ions[idx + mergecnt + 1],
|
|
|
|
movecnt * sizeof(*regions));
|
|
|
|
rcnt -= mergecnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (rcnt);
|
|
|
|
}
|
|
|
|
|
2014-02-08 23:54:16 +00:00
|
|
|
/*
|
|
|
|
* Insertion-sort a new entry into a regions list; sorted by start address.
|
|
|
|
*/
|
2018-05-22 10:14:20 +00:00
|
|
|
static size_t
|
2014-02-09 14:35:44 +00:00
|
|
|
insert_region(struct region *regions, size_t rcnt, vm_paddr_t addr,
|
2014-02-08 23:54:16 +00:00
|
|
|
vm_size_t size, uint32_t flags)
|
|
|
|
{
|
|
|
|
size_t i;
|
2021-10-28 04:40:08 +00:00
|
|
|
vm_paddr_t nend, rend;
|
2014-02-08 23:54:16 +00:00
|
|
|
struct region *ep, *rp;
|
|
|
|
|
2021-10-28 04:40:08 +00:00
|
|
|
nend = addr + size;
|
2014-02-08 23:54:16 +00:00
|
|
|
ep = regions + rcnt;
|
|
|
|
for (i = 0, rp = regions; i < rcnt; ++i, ++rp) {
|
2018-05-22 10:14:20 +00:00
|
|
|
if (flags == rp->flags) {
|
2021-10-28 04:40:08 +00:00
|
|
|
rend = rp->addr + rp->size;
|
|
|
|
if (addr <= rp->addr && nend >= rp->addr) {
|
|
|
|
/*
|
|
|
|
* New mapping overlaps at the beginning, shift
|
|
|
|
* for any difference in the beginning then
|
|
|
|
* shift if the new mapping extends past.
|
|
|
|
*/
|
|
|
|
rp->size += rp->addr - addr;
|
2018-05-22 10:14:20 +00:00
|
|
|
rp->addr = addr;
|
2021-10-28 04:40:08 +00:00
|
|
|
if (nend > rend) {
|
|
|
|
rp->size += nend - rend;
|
|
|
|
rcnt = merge_upper_regions(regions,
|
|
|
|
rcnt, i);
|
|
|
|
}
|
2018-05-22 10:14:20 +00:00
|
|
|
return (rcnt);
|
2021-10-28 04:40:08 +00:00
|
|
|
} else if (addr <= rend && nend > rp->addr) {
|
|
|
|
/*
|
|
|
|
* New mapping is either entirely contained
|
|
|
|
* within or it's overlapping at the end.
|
|
|
|
*/
|
|
|
|
if (nend > rend) {
|
|
|
|
rp->size += nend - rend;
|
|
|
|
rcnt = merge_upper_regions(regions,
|
|
|
|
rcnt, i);
|
|
|
|
}
|
2018-05-22 10:14:20 +00:00
|
|
|
return (rcnt);
|
|
|
|
}
|
|
|
|
}
|
2014-02-08 23:54:16 +00:00
|
|
|
if (addr < rp->addr) {
|
|
|
|
bcopy(rp, rp + 1, (ep - rp) * sizeof(*rp));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rp->addr = addr;
|
|
|
|
rp->size = size;
|
|
|
|
rp->flags = flags;
|
2018-05-22 10:14:20 +00:00
|
|
|
rcnt++;
|
|
|
|
|
|
|
|
return (rcnt);
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a hardware memory region.
|
|
|
|
*/
|
|
|
|
void
|
2020-04-19 00:12:30 +00:00
|
|
|
physmem_hardware_region(uint64_t pa, uint64_t sz)
|
2014-02-08 23:54:16 +00:00
|
|
|
{
|
|
|
|
vm_offset_t adj;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Filter out the page at PA 0x00000000. The VM can't handle it, as
|
|
|
|
* pmap_extract() == 0 means failure.
|
2016-01-25 23:04:40 +00:00
|
|
|
*/
|
|
|
|
if (pa == 0) {
|
|
|
|
if (sz <= PAGE_SIZE)
|
|
|
|
return;
|
|
|
|
pa = PAGE_SIZE;
|
|
|
|
sz -= PAGE_SIZE;
|
|
|
|
} else if (pa > MAX_PHYS_ADDR) {
|
|
|
|
/* This range is past usable memory, ignore it */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-01-02 22:31:14 +00:00
|
|
|
* Also filter out the page at the end of the physical address space --
|
2016-01-02 23:14:52 +00:00
|
|
|
* if addr is non-zero and addr+size is zero we wrapped to the next byte
|
|
|
|
* beyond what vm_paddr_t can express. That leads to a NULL pointer
|
|
|
|
* deref early in startup; work around it by leaving the last page out.
|
2016-01-02 22:31:14 +00:00
|
|
|
*
|
|
|
|
* XXX This just in: subtract out a whole megabyte, not just 1 page.
|
2016-01-02 23:14:52 +00:00
|
|
|
* Reducing the size by anything less than 1MB results in the NULL
|
|
|
|
* pointer deref in _vm_map_lock_read(). Better to give up a megabyte
|
|
|
|
* than leave some folks with an unusable system while we investigate.
|
2014-02-08 23:54:16 +00:00
|
|
|
*/
|
2016-01-25 23:04:40 +00:00
|
|
|
if ((pa + sz) > (MAX_PHYS_ADDR - 1024 * 1024)) {
|
|
|
|
sz = MAX_PHYS_ADDR - pa + 1;
|
2016-01-18 00:07:04 +00:00
|
|
|
if (sz <= 1024 * 1024)
|
|
|
|
return;
|
2016-01-02 22:31:14 +00:00
|
|
|
sz -= 1024 * 1024;
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Round the starting address up to a page boundary, and truncate the
|
|
|
|
* ending page down to a page boundary.
|
|
|
|
*/
|
|
|
|
adj = round_page(pa) - pa;
|
|
|
|
pa = round_page(pa);
|
|
|
|
sz = trunc_page(sz - adj);
|
|
|
|
|
2016-01-18 00:07:04 +00:00
|
|
|
if (sz > 0 && hwcnt < nitems(hwregions))
|
2018-05-22 10:14:20 +00:00
|
|
|
hwcnt = insert_region(hwregions, hwcnt, pa, sz, 0);
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add an exclusion region.
|
|
|
|
*/
|
2018-09-08 21:51:47 +00:00
|
|
|
void
|
2020-04-19 00:12:30 +00:00
|
|
|
physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t exflags)
|
2014-02-08 23:54:16 +00:00
|
|
|
{
|
|
|
|
vm_offset_t adj;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Truncate the starting address down to a page boundary, and round the
|
|
|
|
* ending page up to a page boundary.
|
|
|
|
*/
|
|
|
|
adj = pa - trunc_page(pa);
|
|
|
|
pa = trunc_page(pa);
|
|
|
|
sz = round_page(sz + adj);
|
|
|
|
|
2018-09-08 21:51:47 +00:00
|
|
|
if (excnt >= nitems(exregions))
|
2018-09-08 23:39:26 +00:00
|
|
|
panic("failed to exclude region %#jx-%#jx", (uintmax_t)pa,
|
|
|
|
(uintmax_t)(pa + sz));
|
2018-09-08 21:51:47 +00:00
|
|
|
excnt = insert_region(exregions, excnt, pa, sz, exflags);
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
2018-05-24 15:32:49 +00:00
|
|
|
size_t
|
2020-04-19 00:12:30 +00:00
|
|
|
physmem_avail(vm_paddr_t *avail, size_t maxavail)
|
2018-05-24 15:32:49 +00:00
|
|
|
{
|
|
|
|
|
2020-12-03 05:39:27 +00:00
|
|
|
return (regions_to_avail(avail, EXFLAG_NOALLOC, maxavail, 0, NULL, NULL));
|
2018-05-24 15:32:49 +00:00
|
|
|
}
|
|
|
|
|
2014-02-08 23:54:16 +00:00
|
|
|
/*
|
|
|
|
* Process all the regions added earlier into the global avail lists.
|
2015-01-22 17:42:30 +00:00
|
|
|
*
|
|
|
|
* Updates the kernel global 'physmem' with the number of physical pages
|
|
|
|
* available for use (all pages not in any exclusion region).
|
|
|
|
*
|
|
|
|
* Updates the kernel global 'Maxmem' with the page number one greater then the
|
|
|
|
* last page of physical memory in the system.
|
2014-02-08 23:54:16 +00:00
|
|
|
*/
|
|
|
|
void
|
2020-04-19 00:12:30 +00:00
|
|
|
physmem_init_kernel_globals(void)
|
2014-02-08 23:54:16 +00:00
|
|
|
{
|
2015-01-22 17:42:30 +00:00
|
|
|
size_t nextidx;
|
2020-12-03 05:39:27 +00:00
|
|
|
u_long hwphyssz;
|
|
|
|
|
|
|
|
hwphyssz = 0;
|
|
|
|
TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz);
|
2014-02-08 23:54:16 +00:00
|
|
|
|
2020-12-03 05:39:27 +00:00
|
|
|
regions_to_avail(dump_avail, EXFLAG_NODUMP, PHYS_AVAIL_ENTRIES,
|
|
|
|
hwphyssz, NULL, NULL);
|
2018-05-22 14:26:58 +00:00
|
|
|
nextidx = regions_to_avail(phys_avail, EXFLAG_NOALLOC,
|
2020-12-03 05:39:27 +00:00
|
|
|
PHYS_AVAIL_ENTRIES, hwphyssz, &physmem, &realmem);
|
2015-01-22 17:42:30 +00:00
|
|
|
if (nextidx == 0)
|
|
|
|
panic("No memory entries in phys_avail");
|
|
|
|
Maxmem = atop(phys_avail[nextidx - 1]);
|
2014-02-08 23:54:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(physmem, db_show_physmem)
|
|
|
|
{
|
|
|
|
|
|
|
|
physmem_dump_tables(db_printf);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* DDB */
|