Insert a layer of indirection to the pmap code, using a kobj for

the interface. This allows run-time selection of MMU code, based
on CPU-type detection, or tunable-overrides when testing new code.

Pre-requisite for G5 support.

conf/files.powerpc
  - remove pmap.c
  - add mmu_if.h, mmu_oea.c, pmap_dispatch.c

powerpc/include/mmuvar.h
  - definitions for MMU implementations

powerpc/include/pmap.h
  - remove pmap_pte_spill declaration
  - add pmap_mmu_install declaration
  - size the phys_avail array
  - pmap_bootstrapped is now global-scope

powerpc/powerpc/machdep.c
  - call kobj_machdep_init early in the boot sequence to allow
    kobj usage prior to SI_SUB_LOCK
  - install the OEA pmap code. This will be moved to CPU-specific
    init code in the future.

powerpc/powerpc/mmu_if.m
  - Kobj MMU interface definitions

powerpc/powerpc/pmap_dispatch.c
  - central dispatch for pmap calls
  - contains the global mmu kobj and the routine to locate the
   the mmu implementation and init the kobj
This commit is contained in:
grehan 2005-11-08 06:48:08 +00:00
parent bf6d4253ee
commit eff5b98fc4
7 changed files with 1251 additions and 5 deletions

View File

@ -45,12 +45,14 @@ powerpc/powerpc/interrupt.c standard
powerpc/powerpc/intr_machdep.c standard
powerpc/powerpc/locore.S standard no-obj
powerpc/powerpc/machdep.c standard
powerpc/powerpc/mmu_if.m standard
powerpc/powerpc/mmu_oea.c standard
powerpc/powerpc/nexus.c standard
powerpc/powerpc/ofwmagic.S standard
powerpc/powerpc/ofw_machdep.c standard
powerpc/powerpc/openpic.c standard
powerpc/powerpc/pic_if.m standard
powerpc/powerpc/pmap.c standard
powerpc/powerpc/pmap_dispatch.c standard
powerpc/powerpc/sc_machdep.c optional sc
powerpc/powerpc/setjmp.S standard
powerpc/powerpc/sigcode.S standard

View File

@ -110,6 +110,7 @@ __FBSDID("$FreeBSD$");
#include <machine/fpu.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/mmuvar.h>
#include <machine/pcb.h>
#include <machine/powerpc.h>
#include <machine/reg.h>
@ -317,6 +318,8 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
kdb_init();
kobj_machdep_init();
/*
* XXX: Initialize the interrupt tables.
* Disable translation in case the vector area
@ -358,6 +361,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Initialise virtual memory.
*/
pmap_mmu_install(MMU_TYPE_OEA, 0); /* XXX temporary */
pmap_bootstrap(startkernel, endkernel);
/*

View File

@ -0,0 +1,96 @@
/*-
* Copyright (c) 2005 Peter Grehan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_MMUVAR_H_
#define _MACHINE_MMUVAR_H_
/*
* A PowerPC MMU implementation is declared with a kernel object and
* an associated method table, similar to a device driver.
*
* e.g.
*
* static mmu_method_t ppc8xx_methods[] = {
* MMUMETHOD(mmu_change_wiring, ppc8xx_mmu_change_wiring),
* MMUMETHOD(mmu_clear_modify, ppc8xx_mmu_clear_modify),
* MMUMETHOD(mmu_clear_reference, ppc8xx_mmu_clear_reference),
* ...
* MMUMETHOD(mmu_dev_direct_mapped, ppc8xx_mmu_dev_direct_mapped),
* { 0, 0 }
* };
*
* static mmu_def_t ppc8xx_mmu = {
* "ppc8xx",
* ppc8xx_methods,
* sizeof(ppc8xx_mmu_softc), // or 0 if no softc
* };
*
* MMU_DEF(ppc8xx_mmu);
*/
#include <sys/kobj.h>
struct mmu_kobj {
/*
* An MMU instance is a kernel object
*/
KOBJ_FIELDS;
/*
* Utility elements that an instance may use
*/
struct mtx mmu_mtx; /* available for instance use */
void *mmu_iptr; /* instance data pointer */
/*
* Opaque data that can be overlaid with an instance-private
* structure. MMU code can test that this is large enough at
* compile time with a sizeof() test againt it's softc. There
* is also a run-time test when the MMU kernel object is
* registered.
*/
#define MMU_OPAQUESZ 64
u_int mmu_opaque[MMU_OPAQUESZ];
};
typedef struct mmu_kobj *mmu_t;
typedef struct kobj_class mmu_def_t;
#define mmu_method_t kobj_method_t
#define MMUMETHOD KOBJMETHOD
#define MMU_DEF(name) DATA_SET(mmu_set, name)
/*
* Known MMU names
*/
#define MMU_TYPE_OEA "mmu_oea" /* 32-bit OEA */
#define MMU_TYPE_G5 "mmu_g5" /* 64-bit bridge (ibm 970) */
#define MMU_TYPE_8xx "mmu_8xx" /* 8xx quicc TLB */
#endif /* _MACHINE_MMUVAR_H_ */

View File

@ -94,19 +94,21 @@ void *pmap_mapdev(vm_offset_t, vm_size_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_deactivate(struct thread *);
vm_offset_t pmap_kextract(vm_offset_t);
int pmap_pte_spill(vm_offset_t);
int pmap_dev_direct_mapped(vm_offset_t, vm_size_t);
boolean_t pmap_mmu_install(char *name, int prio);
#define vtophys(va) pmap_kextract(((vm_offset_t)(va)))
extern vm_offset_t phys_avail[];
#define PHYS_AVAIL_SZ 128
extern vm_offset_t phys_avail[PHYS_AVAIL_SZ];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
extern vm_offset_t msgbuf_phys;
extern int pmap_bootstrapped;
#endif
#endif /* !_MACHINE_PMAP_H_ */

View File

@ -110,6 +110,7 @@ __FBSDID("$FreeBSD$");
#include <machine/fpu.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/mmuvar.h>
#include <machine/pcb.h>
#include <machine/powerpc.h>
#include <machine/reg.h>
@ -317,6 +318,8 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
kdb_init();
kobj_machdep_init();
/*
* XXX: Initialize the interrupt tables.
* Disable translation in case the vector area
@ -358,6 +361,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Initialise virtual memory.
*/
pmap_mmu_install(MMU_TYPE_OEA, 0); /* XXX temporary */
pmap_bootstrap(startkernel, endkernel);
/*

View File

@ -0,0 +1,758 @@
#-
# Copyright (c) 2005 Peter Grehan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <machine/mmuvar.h>
/**
* @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
* @brief A set of methods required by all MMU implementations. These
* are basically direct call-thru's from the pmap machine-dependent
* code.
* Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
*@{
*/
INTERFACE mmu;
#
# Default implementations of some methods
#
CODE {
static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
{
return;
}
static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
{
return;
}
static void mmu_null_init(mmu_t mmu)
{
return;
}
static void mmu_null_init2(mmu_t mmu)
{
return;
}
static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
vm_offset_t va)
{
return (FALSE);
}
static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
vm_offset_t addr, vm_object_t object, vm_pindex_t index,
vm_size_t size)
{
return;
}
static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
{
return;
}
static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap,
vm_offset_t start, vm_offset_t end)
{
return;
}
static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
{
return (0);
}
static void mmu_null_deactivate(struct thread *td)
{
return;
}
static vm_offset_t mmu_null_addr_hint(mmu_t mmu, vm_object_t object,
vm_offset_t va, vm_size_t size)
{
return (va);
}
};
/**
* @brief Change the wiring attribute for the page in the given physical
* map and virtual address.
*
* @param _pmap physical map of page
* @param _va page virtual address
* @param _wired TRUE to increment wired count, FALSE to decrement
*/
METHOD void change_wiring {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
boolean_t _wired;
};
/**
* @brief Clear the 'modified' bit on the given physical page
*
* @param _pg physical page
*/
METHOD void clear_modify {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Clear the 'referenced' bit on the given physical page
*
* @param _pg physical page
*/
METHOD void clear_reference {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Copy the address range given by the source physical map, virtual
* address and length to the destination physical map and virtual address.
* This routine is optional (xxx default null implementation ?)
*
* @param _dst_pmap destination physical map
* @param _src_pmap source physical map
* @param _dst_addr destination virtual address
* @param _len size of range
* @param _src_addr source virtual address
*/
METHOD void copy {
mmu_t _mmu;
pmap_t _dst_pmap;
pmap_t _src_pmap;
vm_offset_t _dst_addr;
vm_size_t _len;
vm_offset_t _src_addr;
} DEFAULT mmu_null_copy;
/**
* @brief Copy the source physical page to the destination physical page
*
* @param _src source physical page
* @param _dst destination physical page
*/
METHOD void copy_page {
mmu_t _mmu;
vm_page_t _src;
vm_page_t _dst;
};
/**
* @brief Create a mapping between a virtual/physical address pair in the
* passed physical map with the specified protection and wiring
*
* @param _pmap physical map
* @param _va mapping virtual address
* @param _p mapping physical page
* @param _prot mapping page protection
* @param _wired TRUE if page will be wired
*/
METHOD void enter {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
vm_page_t _p;
vm_prot_t _prot;
boolean_t _wired;
};
/**
* @brief A faster entry point for page mapping where it is possible
* to short-circuit some of the tests in pmap_enter.
*
* @param _pmap physical map (and also currently active pmap)
* @param _va mapping virtual address
* @param _pg mapping physical page
* @param _prot new page protection - used to see if page is exec.
* @param _mpte ???
*
* @retval NULL (possibly a hint for future calls ?)
*/
METHOD vm_page_t enter_quick {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
vm_page_t _pg;
vm_prot_t _prot;
vm_page_t _mpte;
};
/**
* @brief Reverse map the given virtual address, returning the physical
* page associated with the address if a mapping exists.
*
* @param _pmap physical map
* @param _va mapping virtual address
*
* @retval 0 No mapping found
* @retval addr The mapping physical address
*/
METHOD vm_paddr_t extract {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
};
/**
* @brief Reverse map the given virtual address, returning the
* physical page if found. The page must be held (by calling
* vm_page_hold) if the page protection matches the given protection
*
* @param _pmap physical map
* @param _va mapping virtual address
* @param _prot protection used to determine if physical page
* should be locked
*
* @retval NULL No mapping found
* @retval page Pointer to physical page. Held if protections match
*/
METHOD vm_page_t extract_and_hold {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
vm_prot_t _prot;
};
/**
* @brief Increase kernel virtual address space to the given virtual address.
* Not really required for PowerPC, so optional unless the MMU implementation
* can use it.
*
* @param _va new upper limit for kernel virtual address space
*/
METHOD void growkernel {
mmu_t _mmu;
vm_offset_t _va;
} DEFAULT mmu_null_growkernel;
/**
* @brief Called from vm_mem_init. Zone allocation is available at
* this stage so a convenient time to create zones. This routine is
* for MMU-implementation convenience and is optional.
*/
METHOD void init {
mmu_t _mmu;
} DEFAULT mmu_null_init;
/**
* @brief Called from vm_init2/proc0_init, so can be used as a last-chance
* init before process scheduling starts. This routine is optional.
*/
METHOD void init2 {
mmu_t _mmu;
} DEFAULT mmu_null_init2;
/**
* @brief Return if the page has been marked by MMU hardware to have been
* modified
*
* @param _pg physical page to test
*
* @retval boolean TRUE if page has been modified
*/
METHOD boolean_t is_modified {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Return whether the specified virtual address is a candidate to be
* prefaulted in. This routine is optional.
*
* @param _pmap physical map
* @param _va virtual address to test
*
* @retval boolean TRUE if the address is a candidate.
*/
METHOD boolean_t is_prefaultable {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _va;
} DEFAULT mmu_null_is_prefaultable;
/**
* @brief Return a count of referenced bits for a page, clearing those bits.
* Not all referenced bits need to be cleared, but it is necessary that 0
* only be returned when there are none set.
*
* @params _m physical page
*
* @retval int count of referenced bits
*/
METHOD boolean_t ts_referenced {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Map the requested physical address range into kernel virtual
* address space. The value in _virt is taken as a hint. The virtual
* address of the range is returned, or NULL if the mapping could not
* be created. The range can be direct-mapped if that is supported.
*
* @param *_virt Hint for start virtual address, and also return
* value
* @param _start physical address range start
* @param _end physical address range end
* @param _prot protection of range (currently ignored)
*
* @retval NULL could not map the area
* @retval addr, *_virt mapping start virtual address
*/
METHOD vm_offset_t map {
mmu_t _mmu;
vm_offset_t *_virt;
vm_paddr_t _start;
vm_paddr_t _end;
int _prot;
};
/**
* @brief Used to create a contiguous set of read-only mappings for a
* given object to try and eliminate a cascade of on-demand faults as
* the object is accessed sequentially. This routine is optional.
*
* @param _pmap physical map
* @param _addr mapping start virtual address
* @param _object device-backed V.M. object to be mapped
* @param _pindex page-index within object of mapping start
* @param _size size in bytes of mapping
*/
METHOD void object_init_pt {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _addr;
vm_object_t _object;
vm_pindex_t _pindex;
vm_size_t _size;
} DEFAULT mmu_null_object_init_pt;
/**
* @brief Used to determine if the specified page has a mapping for the
* given physical map, by scanning the list of reverse-mappings from the
* page. The list is scanned to a maximum of 16 entries.
*
* @param _pmap physical map
* @param _pg physical page
*
* @retval bool TRUE if the physical map was found in the first 16
* reverse-map list entries off the physical page.
*/
METHOD boolean_t page_exists_quick {
mmu_t _mmu;
pmap_t _pmap;
vm_page_t _pg;
};
/**
* @brief Initialise the machine-dependent section of the physical page
* data structure. This routine is optional.
*
* @param _pg physical page
*/
METHOD void page_init {
mmu_t _mmu;
vm_page_t _pg;
} DEFAULT mmu_null_page_init;
/**
* @brief Lower the protection to the given value for all mappings of the
* given physical page.
*
* @param _pg physical page
* @param _prot updated page protection
*/
METHOD void page_protect {
mmu_t _mmu;
vm_page_t _pg;
vm_prot_t _prot;
};
/**
* @brief Initialise a physical map data structure
*
* @param _pmap physical map
*/
METHOD void pinit {
mmu_t _mmu;
pmap_t _pmap;
};
/**
* @brief Initialise the physical map for process 0, the initial process
* in the system.
* XXX default to pinit ?
*
* @param _pmap physical map
*/
METHOD void pinit0 {
mmu_t _mmu;
pmap_t _pmap;
};
/**
* @brief Set the protection for physical pages in the given virtual address
* range to the given value.
*
* @param _pmap physical map
* @param _start virtual range start
* @param _end virtual range end
* @param _prot new page protection
*/
METHOD void protect {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _start;
vm_offset_t _end;
vm_prot_t _prot;
};
/**
* @brief Create a mapping in kernel virtual address space for the given array
* of wired physical pages.
*
* @param _start mapping virtual address start
* @param *_m array of physical page pointers
* @param _count array elements
*/
METHOD void qenter {
mmu_t _mmu;
vm_offset_t _start;
vm_page_t *_pg;
int _count;
};
/**
* @brief Remove the temporary mappings created by qenter.
*
* @param _start mapping virtual address start
* @param _count number of pages in mapping
*/
METHOD void qremove {
mmu_t _mmu;
vm_offset_t _start;
int _count;
};
/**
* @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
* should be no existing mappings for the physical map at this point
*
* @param _pmap physical map
*/
METHOD void release {
mmu_t _mmu;
pmap_t _pmap;
};
/**
* @brief Remove all mappings in the given physical map for the start/end
* virtual address range. The range will be page-aligned.
*
* @param _pmap physical map
* @param _start mapping virtual address start
* @param _end mapping virtual address end
*/
METHOD void remove {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _start;
vm_offset_t _end;
};
/**
* @brief Traverse the reverse-map list off the given physical page and
* remove all mappings. Clear the PG_WRITEABLE attribute from the page.
*
* @param _pg physical page
*/
METHOD void remove_all {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Remove all mappings in the given start/end virtual address range
* for the given physical map. Similar to the remove method, but it used
* when tearing down all mappings in an address space. This method is
* optional, since pmap_remove will be called for each valid vm_map in
* the address space later.
*
* @param _pmap physical map
* @param _start mapping virtual address start
* @param _end mapping virtual address end
*/
METHOD void remove_pages {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _start;
vm_offset_t _end;
} DEFAULT mmu_null_remove_pages;
/**
* @brief Zero a physical page. It is not assumed that the page is mapped,
* so a temporary (or direct) mapping may need to be used.
*
* @param _pg physical page
*/
METHOD void zero_page {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Zero a portion of a physical page, starting at a given offset and
* for a given size (multiples of 512 bytes for 4k pages).
*
* @param _pg physical page
* @param _off byte offset from start of page
* @param _size size of area to zero
*/
METHOD void zero_page_area {
mmu_t _mmu;
vm_page_t _pg;
int _off;
int _size;
};
/**
* @brief Called from the idle loop to zero pages. XXX I think locking
* constraints might be different here compared to zero_page.
*
* @param _pg physical page
*/
METHOD void zero_page_idle {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Extract mincore(2) information from a mapping. This routine is
* optional and is an optimisation: the mincore code will call is_modified
* and ts_referenced if no result is returned.
*
* @param _pmap physical map
* @param _addr page virtual address
*
* @retval 0 no result
* @retval non-zero mincore(2) flag values
*/
METHOD int mincore {
mmu_t _mmu;
pmap_t _pmap;
vm_offset_t _addr;
} DEFAULT mmu_null_mincore;
/**
* @brief Perform any operations required to allow a physical map to be used
* before it's address space is accessed.
*
* @param _td thread associated with physical map
*/
METHOD void activate {
mmu_t _mmu;
struct thread *_td;
};
/**
* @brief Perform any operations required to deactivate a physical map,
* for instance as it is context-switched out.
*
* @param _td thread associated with physical map
*/
METHOD void deactivate {
mmu_t _mmu;
struct thread *_td;
} DEFAULT mmu_null_deactivate;
/**
* @brief Return a hint for the best virtual address to map a tentative
* virtual address range in a given VM object. The default is to just
* return the given tentative start address.
*
* @param _obj VM backing object
* @param _addr initial guess at virtual address
* @param _size size of virtual address range
*/
METHOD vm_offset_t addr_hint {
mmu_t _mmu;
vm_object_t _obj;
vm_offset_t _addr;
vm_size_t _size;
} DEFAULT mmu_null_addr_hint;
/**
* INTERNAL INTERFACES
*/
/**
* @brief Bootstrap the VM system. At the completion of this routine, the
* kernel will be running in it's own address space with full control over
* paging.
*
* @param _start start of reserved memory (obsolete ???)
* @param _end end of reserved memory (obsolete ???)
* XXX I think the intent of these was to allow
* the memory used by kernel text+data+bss and
* loader variables/load-time kld's to be carved out
* of available physical mem.
*
*/
METHOD void bootstrap {
mmu_t _mmu;
vm_offset_t _start;
vm_offset_t _end;
};
/**
* @brief Create a kernel mapping for a given physical address range.
* Called by bus code on behalf of device drivers. The mapping does not
* have to be a virtual address: it can be a direct-mapped physical address
* if that is supported by the MMU.
*
* @param _pa start physical address
* @param _size size in bytes of mapping
*
* @retval addr address of mapping.
*/
METHOD void * mapdev {
mmu_t _mmu;
vm_offset_t _pa;
vm_size_t _size;
};
/**
* @brief Remove the mapping created by mapdev. Called when a driver
* is unloaded.
*
* @param _va Mapping address returned from mapdev
* @param _size size in bytes of mapping
*/
METHOD void unmapdev {
mmu_t _mmu;
vm_offset_t _va;
vm_size_t _size;
};
/**
* @brief Reverse-map a kernel virtual address
*
* @param _va kernel virtual address to reverse-map
*
* @retval pa physical address corresponding to mapping
*/
METHOD vm_offset_t kextract {
mmu_t _mmu;
vm_offset_t _va;
};
/**
* @brief Map a wired page into kernel virtual address space
*
* @param _va mapping virtual address
* @param _pa mapping physical address
*/
METHOD void kenter {
mmu_t _mmu;
vm_offset_t _va;
vm_offset_t _pa;
};
/**
* @brief Determine if the given physical address range has been direct-mapped.
*
* @param _pa physical address start
* @param _size physical address range size
*
* @retval bool TRUE if the range is direct-mapped.
*/
METHOD boolean_t dev_direct_mapped {
mmu_t _mmu;
vm_offset_t _pa;
vm_size_t _size;
};

View File

@ -0,0 +1,380 @@
/*-
* Copyright (c) 2005 Peter Grehan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Dispatch MI pmap calls to the appropriate MMU implementation
* through a previously registered kernel object.
*
* Before pmap_bootstrap() can be called, a CPU module must have
* called pmap_mmu_install(). This may be called multiple times:
* the highest priority call will be installed as the default
* MMU handler when pmap_bootstrap() is called.
*
* It is required that kobj_machdep_init() be called before
* pmap_bootstrap() to allow the kobj subsystem to initialise. This
* in turn requires that mutex_init() has been called.
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <machine/mmuvar.h>
#include "mmu_if.h"
static mmu_def_t *mmu_def_impl;
static mmu_t mmu_obj;
static struct mmu_kobj mmu_kernel_obj;
static struct kobj_ops mmu_kernel_kops;
/*
* pmap globals
*/
struct pmap kernel_pmap_store;
struct msgbuf *msgbufp;
vm_offset_t msgbuf_phys;
vm_offset_t avail_start;
vm_offset_t avail_end;
vm_offset_t kernel_vm_end;
vm_offset_t phys_avail[PHYS_AVAIL_SZ];
vm_offset_t virtual_avail;
vm_offset_t virtual_end;
int pmap_pagedaemon_waken;
int pmap_bootstrapped;
void
pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
{
MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
}
void
pmap_clear_modify(vm_page_t m)
{
MMU_CLEAR_MODIFY(mmu_obj, m);
}
void
pmap_clear_reference(vm_page_t m)
{
MMU_CLEAR_REFERENCE(mmu_obj, m);
}
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
vm_size_t len, vm_offset_t src_addr)
{
MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
}
void
pmap_copy_page(vm_page_t src, vm_page_t dst)
{
MMU_COPY_PAGE(mmu_obj, src, dst);
}
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
boolean_t wired)
{
MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
}
vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
return (MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot, mpte));
}
vm_paddr_t
pmap_extract(pmap_t pmap, vm_offset_t va)
{
return (MMU_EXTRACT(mmu_obj, pmap, va));
}
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
}
void
pmap_growkernel(vm_offset_t va)
{
MMU_GROWKERNEL(mmu_obj, va);
}
void
pmap_init(void)
{
MMU_INIT(mmu_obj);
}
void
pmap_init2(void)
{
MMU_INIT2(mmu_obj);
}
boolean_t
pmap_is_modified(vm_page_t m)
{
return (MMU_IS_MODIFIED(mmu_obj, m));
}
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
{
return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
}
boolean_t
pmap_ts_referenced(vm_page_t m)
{
return (MMU_TS_REFERENCED(mmu_obj, m));
}
vm_offset_t
pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
{
return (MMU_MAP(mmu_obj, virt, start, end, prot));
}
void
pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
vm_pindex_t pindex, vm_size_t size)
{
MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
}
boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
}
void
pmap_page_init(vm_page_t m)
{
MMU_PAGE_INIT(mmu_obj, m);
}
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
MMU_PAGE_PROTECT(mmu_obj, m, prot);
}
void
pmap_pinit(pmap_t pmap)
{
MMU_PINIT(mmu_obj, pmap);
}
void
pmap_pinit0(pmap_t pmap)
{
MMU_PINIT0(mmu_obj, pmap);
}
void
pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
{
MMU_PROTECT(mmu_obj, pmap, start, end, prot);
}
void
pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
{
MMU_QENTER(mmu_obj, start, m, count);
}
void
pmap_qremove(vm_offset_t start, int count)
{
MMU_QREMOVE(mmu_obj, start, count);
}
void
pmap_release(pmap_t pmap)
{
MMU_RELEASE(mmu_obj, pmap);
}
void
pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
{
MMU_REMOVE(mmu_obj, pmap, start, end);
}
void
pmap_remove_all(vm_page_t m)
{
MMU_REMOVE_ALL(mmu_obj, m);
}
void
pmap_remove_pages(pmap_t pmap, vm_offset_t start, vm_offset_t end)
{
MMU_REMOVE_PAGES(mmu_obj, pmap, start, end);
}
void
pmap_zero_page(vm_page_t m)
{
MMU_ZERO_PAGE(mmu_obj, m);
}
void
pmap_zero_page_area(vm_page_t m, int off, int size)
{
MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
}
void
pmap_zero_page_idle(vm_page_t m)
{
MMU_ZERO_PAGE_IDLE(mmu_obj, m);
}
int
pmap_mincore(pmap_t pmap, vm_offset_t addr)
{
return (MMU_MINCORE(mmu_obj, pmap, addr));
}
void
pmap_activate(struct thread *td)
{
MMU_ACTIVATE(mmu_obj, td);
}
void
pmap_deactivate(struct thread *td)
{
MMU_DEACTIVATE(mmu_obj, td);
}
vm_offset_t
pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
{
return (MMU_ADDR_HINT(mmu_obj, obj, addr, size));
}
/*
* Routines used in machine-dependent code
*/
void
pmap_bootstrap(vm_offset_t start, vm_offset_t end)
{
mmu_obj = &mmu_kernel_obj;
/*
* Take care of compiling the selected class, and
* then statically initialise the MMU object
*/
kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
kobj_init((kobj_t)mmu_obj, mmu_def_impl);
MMU_BOOTSTRAP(mmu_obj, start, end);
}
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
return (MMU_MAPDEV(mmu_obj, pa, size));
}
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
MMU_UNMAPDEV(mmu_obj, va, size);
}
vm_offset_t
pmap_kextract(vm_offset_t va)
{
return (MMU_KEXTRACT(mmu_obj, va));
}
void
pmap_kenter(vm_offset_t va, vm_offset_t pa)
{
MMU_KENTER(mmu_obj, va, pa);
}
boolean_t
pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
{
return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
}
/*
* MMU install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.
*/
SET_DECLARE(mmu_set, mmu_def_t);
boolean_t
pmap_mmu_install(char *name, int prio)
{
mmu_def_t **mmupp, *mmup;
static int curr_prio = 0;
/*
* Try and locate the MMU kobj corresponding to the name
*/
SET_FOREACH(mmupp, mmu_set) {
mmup = *mmupp;
if (mmup->name &&
!strcmp(mmup->name, name) &&
prio >= curr_prio) {
curr_prio = prio;
mmu_def_impl = mmup;
return (TRUE);
}
}
return (FALSE);
}