Change region 4 to be part of the kernel. This serves 2 purposes:

1.  The PBVM is in region 4, so if we want to make use of it, we
    need region 4 freed up.
2.  Region 4 and above cannot be represented by an off_t by virtue
    of that type being signed. This is problematic for truss(1),
    ktrace(1) and other such programs.
This commit is contained in:
Marcel Moolenaar 2011-03-21 01:09:50 +00:00
parent ef89d04f13
commit 7c9eed5c4e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=219808
5 changed files with 40 additions and 46 deletions

View File

@ -260,7 +260,7 @@ db_backtrace(struct thread *td, struct pcb *pcb, int count)
sym = db_search_symbol(ip, DB_STGY_ANY, &offset);
db_symbol_values(sym, &name, NULL);
db_printf("%s(", name);
if (bsp >= IA64_RR_BASE(5)) {
if (bsp >= VM_MAXUSER_ADDRESS) {
for (i = 0; i < args; i++) {
if ((bsp & 0x1ff) == 0x1f8)
bsp += 8;
@ -279,12 +279,12 @@ db_backtrace(struct thread *td, struct pcb *pcb, int count)
if (error != ERESTART)
continue;
if (sp < IA64_RR_BASE(5))
if (sp < VM_MAXUSER_ADDRESS)
break;
tf = (struct trapframe *)(sp + 16);
if ((tf->tf_flags & FRAME_SYSCALL) != 0 ||
tf->tf_special.iip < IA64_RR_BASE(5))
tf->tf_special.iip < VM_MAXUSER_ADDRESS)
break;
/* XXX ask if we should unwind across the trapframe. */

View File

@ -177,7 +177,7 @@ gdb_cpu_query(void)
* kernel stack address. See also ptrace_machdep().
*/
bspstore = kdb_frame->tf_special.bspstore;
kstack = (bspstore >= IA64_RR_BASE(5)) ? (uint64_t*)bspstore :
kstack = (bspstore >= VM_MAXUSER_ADDRESS) ? (uint64_t*)bspstore :
(uint64_t*)(kdb_thread->td_kstack + (bspstore & 0x1ffUL));
gdb_tx_begin('\0');
gdb_tx_mem((void*)(kstack + slot), 8);

View File

@ -102,17 +102,11 @@ __FBSDID("$FreeBSD$");
* We reserve region ID 0 for the kernel and allocate the remaining
* IDs for user pmaps.
*
* Region 0..4
* User virtually mapped
*
* Region 5
* Kernel virtually mapped
*
* Region 6
* Kernel physically mapped uncacheable
*
* Region 7
* Kernel physically mapped cacheable
* Region 0-3: User virtually mapped
* Region 4: PBVM and special mappings
* Region 5: Kernel virtual memory
* Region 6: Direct-mapped uncacheable
* Region 7: Direct-mapped cacheable
*/
/* XXX move to a header. */
@ -346,9 +340,9 @@ pmap_bootstrap()
* Setup RIDs. RIDs 0..7 are reserved for the kernel.
*
* We currently need at least 19 bits in the RID because PID_MAX
* can only be encoded in 17 bits and we need RIDs for 5 regions
* can only be encoded in 17 bits and we need RIDs for 4 regions
* per process. With PID_MAX equalling 99999 this means that we
* need to be able to encode 499995 (=5*PID_MAX).
* need to be able to encode 399996 (=4*PID_MAX).
* The Itanium processor only has 18 bits and the architected
* minimum is exactly that. So, we cannot use a PID based scheme
* in those cases. Enter pmap_ridmap...
@ -390,7 +384,7 @@ pmap_bootstrap()
*/
ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
nkpt = 0;
kernel_vm_end = VM_MIN_KERNEL_ADDRESS - VM_GATEWAY_SIZE;
kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
for (i = 0; phys_avail[i+2]; i+= 2)
;
@ -451,16 +445,13 @@ pmap_bootstrap()
* Initialize the kernel pmap (which is statically allocated).
*/
PMAP_LOCK_INIT(kernel_pmap);
for (i = 0; i < 5; i++)
for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
kernel_pmap->pm_rid[i] = 0;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
PCPU_SET(md.current_pmap, kernel_pmap);
/*
* Region 5 is mapped via the vhpt.
*/
ia64_set_rr(IA64_RR_BASE(5),
(5 << 8) | (PAGE_SHIFT << 2) | 1);
/* Region 5 is mapped via the VHPT. */
ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
/*
* Region 6 is direct mapped UC and region 7 is direct mapped
@ -678,7 +669,7 @@ pmap_pinit(struct pmap *pmap)
int i;
PMAP_LOCK_INIT(pmap);
for (i = 0; i < 5; i++)
for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
pmap->pm_rid[i] = pmap_allocate_rid();
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -699,7 +690,7 @@ pmap_release(pmap_t pmap)
{
int i;
for (i = 0; i < 5; i++)
for (i = 0; i < IA64_VM_MINKERN_REGION; i++)
if (pmap->pm_rid[i])
pmap_free_rid(pmap->pm_rid[i]);
PMAP_LOCK_DESTROY(pmap);
@ -1221,7 +1212,7 @@ pmap_kextract(vm_offset_t va)
struct ia64_lpte *pte;
vm_offset_t gwpage;
KASSERT(va >= IA64_RR_BASE(5), ("Must be kernel VA"));
KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA"));
/* Regions 6 and 7 are direct mapped. */
if (va >= IA64_RR_BASE(6))
@ -1229,7 +1220,7 @@ pmap_kextract(vm_offset_t va)
/* EPC gateway page? */
gwpage = (vm_offset_t)ia64_get_k5();
if (va >= gwpage && va < gwpage + VM_GATEWAY_SIZE)
if (va >= gwpage && va < gwpage + PAGE_SIZE)
return (IA64_RR_MASK((vm_offset_t)ia64_gateway_page));
/* Bail out if the virtual address is beyond our limits. */
@ -2285,12 +2276,12 @@ pmap_switch(pmap_t pm)
if (prevpm == pm)
goto out;
if (pm == NULL) {
for (i = 0; i < 5; i++) {
for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
ia64_set_rr(IA64_RR_BASE(i),
(i << 8)|(PAGE_SHIFT << 2)|1);
}
} else {
for (i = 0; i < 5; i++) {
for (i = 0; i < IA64_VM_MINKERN_REGION; i++) {
ia64_set_rr(IA64_RR_BASE(i),
(pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
}

View File

@ -50,6 +50,7 @@
#include <sys/_mutex.h>
#include <machine/atomic.h>
#include <machine/pte.h>
#include <machine/vmparam.h>
#ifdef _KERNEL
@ -75,7 +76,7 @@ struct md_page {
struct pmap {
struct mtx pm_mtx;
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
uint32_t pm_rid[5]; /* base RID for pmap */
uint32_t pm_rid[IA64_VM_MINKERN_REGION];
struct pmap_statistics pm_stats; /* pmap statistics */
};

View File

@ -41,12 +41,6 @@
#ifndef _MACHINE_VMPARAM_H_
#define _MACHINE_VMPARAM_H_
/*
* USRSTACK is the top (end) of the user stack. Immediately above the user
* stack resides the syscall gateway page.
*/
#define USRSTACK VM_MAXUSER_ADDRESS
/*
* Virtual memory related constants, all in bytes
*/
@ -122,6 +116,8 @@
#define VM_NRESERVLEVEL 0
#endif
#define IA64_VM_MINKERN_REGION 4
/*
* Manipulating region bits of an address.
*/
@ -138,7 +134,8 @@
* to 0x1ffbffffffffffff. We define the top half of a region in terms of
* this worst-case gap.
*/
#define IA64_REGION_TOP_HALF 0x1ffc000000000000
#define IA64_REGION_GAP_START 0x0004000000000000
#define IA64_REGION_GAP_EXTEND 0x1ffc000000000000
/*
* Page size of the identity mappings in region 7.
@ -151,7 +148,6 @@
#define IA64_ID_PAGE_SIZE (1<<(LOG2_ID_PAGE_SIZE))
#define IA64_ID_PAGE_MASK (IA64_ID_PAGE_SIZE-1)
#define IA64_BACKINGSTORE IA64_RR_BASE(4)
/*
* Parameters for Pre-Boot Virtual Memory (PBVM).
@ -177,9 +173,9 @@
* and wired into the CPU, but does not assume that the mapping covers the
* whole of PBVM.
*/
#define IA64_PBVM_RR 4
#define IA64_PBVM_RR IA64_VM_MINKERN_REGION
#define IA64_PBVM_BASE \
(IA64_RR_BASE(IA64_PBVM_RR) + IA64_REGION_TOP_HALF)
(IA64_RR_BASE(IA64_PBVM_RR) + IA64_REGION_GAP_EXTEND)
#define IA64_PBVM_PGTBL_MAXSZ 1048576
#define IA64_PBVM_PGTBL \
@ -194,15 +190,21 @@
*/
/* user/kernel map constants */
#define VM_MIN_ADDRESS 0
#define VM_MAXUSER_ADDRESS IA64_RR_BASE(5)
#define VM_GATEWAY_SIZE PAGE_SIZE
#define VM_MIN_KERNEL_ADDRESS (VM_MAXUSER_ADDRESS + VM_GATEWAY_SIZE)
#define VM_MAX_KERNEL_ADDRESS (IA64_RR_BASE(6) - 1)
#define VM_MIN_ADDRESS 0
#define VM_MAXUSER_ADDRESS IA64_RR_BASE(IA64_VM_MINKERN_REGION)
#define VM_MIN_KERNEL_ADDRESS IA64_RR_BASE(IA64_VM_MINKERN_REGION + 1)
#define VM_MAX_KERNEL_ADDRESS (IA64_RR_BASE(IA64_VM_MINKERN_REGION + 2) - 1)
#define VM_MAX_ADDRESS ~0UL
#define KERNBASE VM_MAXUSER_ADDRESS
/*
* USRSTACK is the top (end) of the user stack. Immediately above the user
* stack resides the syscall gateway page.
*/
#define USRSTACK VM_MAXUSER_ADDRESS
#define IA64_BACKINGSTORE (USRSTACK - (2 * MAXSSIZ) - PAGE_SIZE)
/* virtual sizes (bytes) for various kernel submaps */
#ifndef VM_KMEM_SIZE
#define VM_KMEM_SIZE (12 * 1024 * 1024)