2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
2002-02-14 01:39:11 +00:00
|
|
|
* Copyright (c) 2001 The NetBSD Foundation, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
2001-06-10 02:39:37 +00:00
|
|
|
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
|
|
|
|
* Copyright (C) 1995, 1996 TooLs GmbH.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by TooLs GmbH.
|
|
|
|
* 4. The name of TooLs GmbH may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
|
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
|
|
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
|
|
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
2001-06-27 12:20:48 +00:00
|
|
|
* $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
2001-06-10 02:39:37 +00:00
|
|
|
* Copyright (C) 2001 Benno Rice.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
|
|
|
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
|
|
|
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
|
|
|
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
|
|
|
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-04-03 21:36:33 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Manages physical address maps.
|
|
|
|
*
|
|
|
|
* Since the information managed by this module is also stored by the
|
|
|
|
* logical address mapping module, this module may throw away valid virtual
|
|
|
|
* to physical mappings at almost any time. However, invalidations of
|
|
|
|
* mappings must be done as requested.
|
|
|
|
*
|
|
|
|
* In order to cope with hardware architectures which make virtual to
|
|
|
|
* physical map invalidates expensive, this module may delay invalidate
|
|
|
|
* reduced protection operations until such time as they are actually
|
|
|
|
* necessary. This module is given full information as to which processors
|
|
|
|
* are currently using which maps, and to when physical maps must be made
|
|
|
|
* correct.
|
|
|
|
*/
|
|
|
|
|
2003-07-31 01:31:32 +00:00
|
|
|
#include "opt_kstack_pages.h"
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
#include <sys/param.h>
|
2001-07-27 01:08:59 +00:00
|
|
|
#include <sys/kernel.h>
|
2011-05-09 16:16:15 +00:00
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/cpuset.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <sys/ktr.h>
|
2001-10-19 22:45:46 +00:00
|
|
|
#include <sys/lock.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <sys/msgbuf.h>
|
2001-06-10 02:39:37 +00:00
|
|
|
#include <sys/mutex.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <sys/proc.h>
|
2012-07-06 02:18:49 +00:00
|
|
|
#include <sys/rwlock.h>
|
2011-05-09 16:16:15 +00:00
|
|
|
#include <sys/sched.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/vmmeter.h>
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <dev/ofw/openfirm.h>
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
#include <vm/vm.h>
|
2001-06-10 02:39:37 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_map.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/vm_pageout.h>
|
|
|
|
#include <vm/vm_pager.h>
|
2002-03-21 01:11:31 +00:00
|
|
|
#include <vm/uma.h>
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2004-02-11 07:27:34 +00:00
|
|
|
#include <machine/cpu.h>
|
2009-05-14 00:34:26 +00:00
|
|
|
#include <machine/platform.h>
|
2001-09-20 15:32:56 +00:00
|
|
|
#include <machine/bat.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <machine/frame.h>
|
|
|
|
#include <machine/md_var.h>
|
|
|
|
#include <machine/psl.h>
|
2001-06-10 02:39:37 +00:00
|
|
|
#include <machine/pte.h>
|
2008-04-27 22:33:43 +00:00
|
|
|
#include <machine/smp.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
#include <machine/sr.h>
|
2005-11-08 06:49:45 +00:00
|
|
|
#include <machine/mmuvar.h>
|
2011-12-16 23:46:05 +00:00
|
|
|
#include <machine/trap_aim.h>
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
#include "mmu_if.h"
|
|
|
|
|
|
|
|
#define MOEA_DEBUG
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
#define TODO panic("%s: not implemented", __func__);
|
|
|
|
|
|
|
|
#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
|
|
|
|
#define VSID_TO_SR(vsid) ((vsid) & 0xf)
|
|
|
|
#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
|
|
|
|
|
|
|
|
struct ofw_map {
|
|
|
|
vm_offset_t om_va;
|
|
|
|
vm_size_t om_len;
|
|
|
|
vm_offset_t om_pa;
|
|
|
|
u_int om_mode;
|
|
|
|
};
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Map of physical memory regions.
|
|
|
|
*/
|
2002-05-27 11:18:12 +00:00
|
|
|
static struct mem_region *regions;
|
|
|
|
static struct mem_region *pregions;
|
2010-07-13 05:32:19 +00:00
|
|
|
static u_int phys_avail_count;
|
|
|
|
static int regions_sz, pregions_sz;
|
2002-07-18 12:43:08 +00:00
|
|
|
static struct ofw_map *translations;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2004-08-30 21:39:22 +00:00
|
|
|
/*
|
|
|
|
* Lock for the pteg and pvo tables.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
struct mtx moea_table_mutex;
|
2010-09-06 23:07:58 +00:00
|
|
|
struct mtx moea_vsid_mutex;
|
2004-08-30 21:39:22 +00:00
|
|
|
|
2008-09-16 19:16:33 +00:00
|
|
|
/* tlbie instruction synchronization */
|
|
|
|
static struct mtx tlbie_mtx;
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* PTEG data.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
static struct pteg *moea_pteg_table;
|
|
|
|
u_int moea_pteg_count;
|
|
|
|
u_int moea_pteg_mask;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* PVO data.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */
|
|
|
|
struct pvo_head moea_pvo_kunmanaged =
|
|
|
|
LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2012-11-03 23:03:14 +00:00
|
|
|
static struct rwlock_padalign pvh_global_lock;
|
2012-07-06 02:18:49 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
|
|
|
|
uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-06-29 09:43:59 +00:00
|
|
|
#define BPVO_POOL_SIZE 32768
|
2005-11-08 06:49:45 +00:00
|
|
|
static struct pvo_entry *moea_bpvo_pool;
|
|
|
|
static int moea_bpvo_pool_index = 0;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
#define VSID_NBPW (sizeof(u_int32_t) * 8)
|
2005-11-08 06:49:45 +00:00
|
|
|
static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW];
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
static boolean_t moea_initialized = FALSE;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Statistics.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
u_int moea_pte_valid = 0;
|
|
|
|
u_int moea_pte_overflow = 0;
|
|
|
|
u_int moea_pte_replacements = 0;
|
|
|
|
u_int moea_pvo_entries = 0;
|
|
|
|
u_int moea_pvo_enter_calls = 0;
|
|
|
|
u_int moea_pvo_remove_calls = 0;
|
|
|
|
u_int moea_pte_spills = 0;
|
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid,
|
2002-02-14 01:39:11 +00:00
|
|
|
0, "");
|
2005-11-08 06:49:45 +00:00
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD,
|
|
|
|
&moea_pte_overflow, 0, "");
|
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD,
|
|
|
|
&moea_pte_replacements, 0, "");
|
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries,
|
2002-02-14 01:39:11 +00:00
|
|
|
0, "");
|
2005-11-08 06:49:45 +00:00
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD,
|
|
|
|
&moea_pvo_enter_calls, 0, "");
|
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD,
|
|
|
|
&moea_pvo_remove_calls, 0, "");
|
|
|
|
SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
|
|
|
|
&moea_pte_spills, 0, "");
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
/*
|
2005-11-08 06:49:45 +00:00
|
|
|
* Allocate physical memory for use in moea_bootstrap.
|
2002-02-14 01:39:11 +00:00
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* PTE calls.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
static int moea_pte_insert(u_int, struct pte *);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* PVO calls.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
|
2002-02-14 01:39:11 +00:00
|
|
|
vm_offset_t, vm_offset_t, u_int, int);
|
2005-11-08 06:49:45 +00:00
|
|
|
static void moea_pvo_remove(struct pvo_entry *, int);
|
|
|
|
static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
|
|
|
|
static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Utility routines.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2006-06-05 20:35:27 +00:00
|
|
|
static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
|
|
|
vm_prot_t, boolean_t);
|
2005-11-08 06:49:45 +00:00
|
|
|
static void moea_syncicache(vm_offset_t, vm_size_t);
|
|
|
|
static boolean_t moea_query_bit(vm_page_t, int);
|
2010-06-10 16:56:35 +00:00
|
|
|
static u_int moea_clear_bit(vm_page_t, int);
|
2005-11-08 06:49:45 +00:00
|
|
|
static void moea_kremove(mmu_t, vm_offset_t);
|
|
|
|
int moea_pte_spill(vm_offset_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernel MMU interface
|
|
|
|
*/
|
|
|
|
void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
|
|
|
|
void moea_clear_modify(mmu_t, vm_page_t);
|
|
|
|
void moea_clear_reference(mmu_t, vm_page_t);
|
|
|
|
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
|
|
|
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
2006-06-05 20:35:27 +00:00
|
|
|
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
|
|
|
vm_prot_t);
|
2006-06-15 01:01:06 +00:00
|
|
|
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
2005-11-08 06:49:45 +00:00
|
|
|
vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
|
|
|
|
vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
|
|
|
|
void moea_init(mmu_t);
|
|
|
|
boolean_t moea_is_modified(mmu_t, vm_page_t);
|
2010-11-01 02:22:48 +00:00
|
|
|
boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
|
Resurrect pmap_is_referenced() and use it in mincore(). Essentially,
pmap_ts_referenced() is not always appropriate for checking whether or
not pages have been referenced because it clears any reference bits
that it encounters. For example, in mincore(), clearing the reference
bits has two negative consequences. First, it throws off the activity
count calculations performed by the page daemon. Specifically, a page
on which mincore() has called pmap_ts_referenced() looks less active
to the page daemon than it should. Consequently, the page could be
deactivated prematurely by the page daemon. Arguably, this problem
could be fixed by having mincore() duplicate the activity count
calculation on the page. However, there is a second problem for which
that is not a solution. In order to clear a reference on a 4KB page,
it may be necessary to demote a 2/4MB page mapping. Thus, a mincore()
by one process can have the side effect of demoting a superpage
mapping within another process!
2010-04-24 17:32:52 +00:00
|
|
|
boolean_t moea_is_referenced(mmu_t, vm_page_t);
|
2012-07-10 22:10:21 +00:00
|
|
|
int moea_ts_referenced(mmu_t, vm_page_t);
|
2012-05-24 21:13:24 +00:00
|
|
|
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
|
2005-11-08 06:49:45 +00:00
|
|
|
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
|
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed. However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count. This can be a mistake. Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings. Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed. Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired. The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed. To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().
Reviewed by: tegge
MFC after: 6 weeks
2007-11-17 22:52:29 +00:00
|
|
|
int moea_page_wired_mappings(mmu_t, vm_page_t);
|
2005-11-08 06:49:45 +00:00
|
|
|
void moea_pinit(mmu_t, pmap_t);
|
|
|
|
void moea_pinit0(mmu_t, pmap_t);
|
|
|
|
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
|
|
|
|
void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
|
|
|
|
void moea_qremove(mmu_t, vm_offset_t, int);
|
|
|
|
void moea_release(mmu_t, pmap_t);
|
|
|
|
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
|
|
|
|
void moea_remove_all(mmu_t, vm_page_t);
|
2006-08-01 19:06:06 +00:00
|
|
|
void moea_remove_write(mmu_t, vm_page_t);
|
2005-11-08 06:49:45 +00:00
|
|
|
void moea_zero_page(mmu_t, vm_page_t);
|
|
|
|
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
|
|
|
|
void moea_zero_page_idle(mmu_t, vm_page_t);
|
|
|
|
void moea_activate(mmu_t, struct thread *);
|
|
|
|
void moea_deactivate(mmu_t, struct thread *);
|
2009-04-04 00:22:44 +00:00
|
|
|
void moea_cpu_bootstrap(mmu_t, int);
|
2005-11-08 06:49:45 +00:00
|
|
|
void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
|
2012-05-24 21:13:24 +00:00
|
|
|
void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
|
2010-09-30 18:14:12 +00:00
|
|
|
void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
|
2005-11-08 06:49:45 +00:00
|
|
|
void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
|
2012-05-24 21:13:24 +00:00
|
|
|
vm_paddr_t moea_kextract(mmu_t, vm_offset_t);
|
2010-09-30 18:14:12 +00:00
|
|
|
void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t);
|
2012-05-24 21:13:24 +00:00
|
|
|
void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
|
2010-09-30 18:14:12 +00:00
|
|
|
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
|
2012-05-24 21:13:24 +00:00
|
|
|
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
|
2009-10-21 18:38:02 +00:00
|
|
|
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
|
2005-11-08 06:49:45 +00:00
|
|
|
|
|
|
|
static mmu_method_t moea_methods[] = {
|
|
|
|
MMUMETHOD(mmu_change_wiring, moea_change_wiring),
|
|
|
|
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
|
|
|
|
MMUMETHOD(mmu_clear_reference, moea_clear_reference),
|
|
|
|
MMUMETHOD(mmu_copy_page, moea_copy_page),
|
|
|
|
MMUMETHOD(mmu_enter, moea_enter),
|
2006-06-05 20:35:27 +00:00
|
|
|
MMUMETHOD(mmu_enter_object, moea_enter_object),
|
2005-11-08 06:49:45 +00:00
|
|
|
MMUMETHOD(mmu_enter_quick, moea_enter_quick),
|
|
|
|
MMUMETHOD(mmu_extract, moea_extract),
|
|
|
|
MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
|
|
|
|
MMUMETHOD(mmu_init, moea_init),
|
|
|
|
MMUMETHOD(mmu_is_modified, moea_is_modified),
|
2010-11-01 02:22:48 +00:00
|
|
|
MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable),
|
Resurrect pmap_is_referenced() and use it in mincore(). Essentially,
pmap_ts_referenced() is not always appropriate for checking whether or
not pages have been referenced because it clears any reference bits
that it encounters. For example, in mincore(), clearing the reference
bits has two negative consequences. First, it throws off the activity
count calculations performed by the page daemon. Specifically, a page
on which mincore() has called pmap_ts_referenced() looks less active
to the page daemon than it should. Consequently, the page could be
deactivated prematurely by the page daemon. Arguably, this problem
could be fixed by having mincore() duplicate the activity count
calculation on the page. However, there is a second problem for which
that is not a solution. In order to clear a reference on a 4KB page,
it may be necessary to demote a 2/4MB page mapping. Thus, a mincore()
by one process can have the side effect of demoting a superpage
mapping within another process!
2010-04-24 17:32:52 +00:00
|
|
|
MMUMETHOD(mmu_is_referenced, moea_is_referenced),
|
2005-11-08 06:49:45 +00:00
|
|
|
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
|
|
|
|
MMUMETHOD(mmu_map, moea_map),
|
|
|
|
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
|
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed. However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count. This can be a mistake. Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings. Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed. Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired. The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed. To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().
Reviewed by: tegge
MFC after: 6 weeks
2007-11-17 22:52:29 +00:00
|
|
|
MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
|
2005-11-08 06:49:45 +00:00
|
|
|
MMUMETHOD(mmu_pinit, moea_pinit),
|
|
|
|
MMUMETHOD(mmu_pinit0, moea_pinit0),
|
|
|
|
MMUMETHOD(mmu_protect, moea_protect),
|
|
|
|
MMUMETHOD(mmu_qenter, moea_qenter),
|
|
|
|
MMUMETHOD(mmu_qremove, moea_qremove),
|
|
|
|
MMUMETHOD(mmu_release, moea_release),
|
|
|
|
MMUMETHOD(mmu_remove, moea_remove),
|
|
|
|
MMUMETHOD(mmu_remove_all, moea_remove_all),
|
2006-08-01 19:06:06 +00:00
|
|
|
MMUMETHOD(mmu_remove_write, moea_remove_write),
|
2009-10-21 18:38:02 +00:00
|
|
|
MMUMETHOD(mmu_sync_icache, moea_sync_icache),
|
2005-11-08 06:49:45 +00:00
|
|
|
MMUMETHOD(mmu_zero_page, moea_zero_page),
|
|
|
|
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
|
|
|
|
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
|
|
|
|
MMUMETHOD(mmu_activate, moea_activate),
|
|
|
|
MMUMETHOD(mmu_deactivate, moea_deactivate),
|
2010-09-30 18:14:12 +00:00
|
|
|
MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
|
2005-11-08 06:49:45 +00:00
|
|
|
|
|
|
|
/* Internal interfaces */
|
|
|
|
MMUMETHOD(mmu_bootstrap, moea_bootstrap),
|
2009-04-04 00:22:44 +00:00
|
|
|
MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap),
|
2010-09-30 18:14:12 +00:00
|
|
|
MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr),
|
2005-11-08 06:49:45 +00:00
|
|
|
MMUMETHOD(mmu_mapdev, moea_mapdev),
|
|
|
|
MMUMETHOD(mmu_unmapdev, moea_unmapdev),
|
|
|
|
MMUMETHOD(mmu_kextract, moea_kextract),
|
|
|
|
MMUMETHOD(mmu_kenter, moea_kenter),
|
2010-09-30 18:14:12 +00:00
|
|
|
MMUMETHOD(mmu_kenter_attr, moea_kenter_attr),
|
2005-11-08 06:49:45 +00:00
|
|
|
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
|
|
|
|
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
2010-09-15 00:17:52 +00:00
|
|
|
MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
|
|
|
|
|
2010-09-30 18:14:12 +00:00
|
|
|
static __inline uint32_t
|
|
|
|
moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
|
|
|
|
{
|
|
|
|
uint32_t pte_lo;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ma != VM_MEMATTR_DEFAULT) {
|
|
|
|
switch (ma) {
|
|
|
|
case VM_MEMATTR_UNCACHEABLE:
|
|
|
|
return (PTE_I | PTE_G);
|
|
|
|
case VM_MEMATTR_WRITE_COMBINING:
|
|
|
|
case VM_MEMATTR_WRITE_BACK:
|
|
|
|
case VM_MEMATTR_PREFETCHABLE:
|
|
|
|
return (PTE_I);
|
|
|
|
case VM_MEMATTR_WRITE_THROUGH:
|
|
|
|
return (PTE_W | PTE_M);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assume the page is cache inhibited and access is guarded unless
|
|
|
|
* it's in our available memory array.
|
|
|
|
*/
|
|
|
|
pte_lo = PTE_I | PTE_G;
|
|
|
|
for (i = 0; i < pregions_sz; i++) {
|
|
|
|
if ((pa >= pregions[i].mr_start) &&
|
|
|
|
(pa < (pregions[i].mr_start + pregions[i].mr_size))) {
|
|
|
|
pte_lo = PTE_M;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pte_lo;
|
|
|
|
}
|
2005-11-08 06:49:45 +00:00
|
|
|
|
2008-09-16 19:16:33 +00:00
|
|
|
static void
|
|
|
|
tlbie(vm_offset_t va)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_lock_spin(&tlbie_mtx);
|
2010-10-04 16:07:48 +00:00
|
|
|
__asm __volatile("ptesync");
|
2008-09-16 19:16:33 +00:00
|
|
|
__asm __volatile("tlbie %0" :: "r"(va));
|
2010-10-04 16:07:48 +00:00
|
|
|
__asm __volatile("eieio; tlbsync; ptesync");
|
2008-09-16 19:16:33 +00:00
|
|
|
mtx_unlock_spin(&tlbie_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tlbia(void)
|
|
|
|
{
|
|
|
|
vm_offset_t va;
|
|
|
|
|
|
|
|
for (va = 0; va < 0x00040000; va += 0x00001000) {
|
|
|
|
__asm __volatile("tlbie %0" :: "r"(va));
|
|
|
|
powerpc_sync();
|
|
|
|
}
|
|
|
|
__asm __volatile("tlbsync");
|
|
|
|
powerpc_sync();
|
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
static __inline int
|
|
|
|
va_to_sr(u_int *sr, vm_offset_t va)
|
|
|
|
{
|
|
|
|
return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline u_int
|
|
|
|
va_to_pteg(u_int sr, vm_offset_t addr)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
u_int hash;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
|
|
|
|
ADDR_PIDX_SHFT);
|
2005-11-08 06:49:45 +00:00
|
|
|
return (hash & moea_pteg_mask);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline struct pvo_head *
|
|
|
|
vm_page_to_pvoh(vm_page_t m)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
return (&m->md.mdpg_pvoh);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_attr_clear(vm_page_t m, int ptebit)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
2002-02-14 01:39:11 +00:00
|
|
|
m->md.mdpg_attrs &= ~ptebit;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_attr_fetch(vm_page_t m)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
return (m->md.mdpg_attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_attr_save(vm_page_t m, int ptebit)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
2002-02-14 01:39:11 +00:00
|
|
|
m->md.mdpg_attrs |= ptebit;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pt->pte_hi == pvo_pt->pte_hi)
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
return (0);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
return (pt->pte_hi & ~PTE_VALID) ==
|
|
|
|
(((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
|
|
|
|
((va >> ADDR_API_SHFT) & PTE_API) | which);
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2006-06-25 19:07:01 +00:00
|
|
|
|
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Construct a PTE. Default to IMB initially. Valid bit only gets
|
|
|
|
* set when the real pte is set in memory.
|
|
|
|
*
|
|
|
|
* Note: Don't set the valid bit for correct operation of tlb update.
|
|
|
|
*/
|
|
|
|
pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
|
|
|
|
(((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
|
|
|
|
pt->pte_lo = pte_lo;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_synch(struct pte *pt, struct pte *pvo_pt)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* As shown in Section 7.6.3.2.3
|
|
|
|
*/
|
|
|
|
pt->pte_lo &= ~ptebit;
|
2008-09-16 19:16:33 +00:00
|
|
|
tlbie(va);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_set(struct pte *pt, struct pte *pvo_pt)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo_pt->pte_hi |= PTE_VALID;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Update the PTE as defined in section 7.6.3.1.
|
|
|
|
* Note that the REF/CHG bits are from pvo_pt and thus should havce
|
|
|
|
* been saved so this routine can restore them (if desired).
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
pt->pte_lo = pvo_pt->pte_lo;
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2002-02-14 01:39:11 +00:00
|
|
|
pt->pte_hi = pvo_pt->pte_hi;
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_valid++;
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo_pt->pte_hi &= ~PTE_VALID;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Force the reg & chg bits back into the PTEs.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Invalidate the pte.
|
|
|
|
*/
|
|
|
|
pt->pte_hi &= ~PTE_VALID;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2008-09-16 19:16:33 +00:00
|
|
|
tlbie(va);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Save the reg & chg bits.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_synch(pt, pvo_pt);
|
|
|
|
moea_pte_valid--;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Invalidate the PTE
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_unset(pt, pvo_pt, va);
|
|
|
|
moea_pte_set(pt, pvo_pt);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Quick sort callout for comparing memory regions.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
static int om_cmp(const void *a, const void *b);
|
|
|
|
|
|
|
|
static int
|
|
|
|
om_cmp(const void *a, const void *b)
|
|
|
|
{
|
|
|
|
const struct ofw_map *mapa;
|
|
|
|
const struct ofw_map *mapb;
|
|
|
|
|
|
|
|
mapa = a;
|
|
|
|
mapb = b;
|
|
|
|
if (mapa->om_pa < mapb->om_pa)
|
|
|
|
return (-1);
|
|
|
|
else if (mapa->om_pa > mapb->om_pa)
|
|
|
|
return (1);
|
|
|
|
else
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-04-27 22:33:43 +00:00
|
|
|
void
|
2009-04-04 00:22:44 +00:00
|
|
|
moea_cpu_bootstrap(mmu_t mmup, int ap)
|
2008-04-27 22:33:43 +00:00
|
|
|
{
|
|
|
|
u_int sdr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (ap) {
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2008-04-27 22:33:43 +00:00
|
|
|
__asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu));
|
|
|
|
__asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl));
|
|
|
|
isync();
|
|
|
|
__asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu));
|
|
|
|
__asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl));
|
|
|
|
isync();
|
|
|
|
}
|
|
|
|
|
2008-04-28 03:04:41 +00:00
|
|
|
__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
|
|
|
|
__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
|
|
|
|
isync();
|
2008-04-27 22:33:43 +00:00
|
|
|
|
2008-04-28 03:04:41 +00:00
|
|
|
__asm __volatile("mtibatu 1,%0" :: "r"(0));
|
|
|
|
__asm __volatile("mtdbatu 2,%0" :: "r"(0));
|
|
|
|
__asm __volatile("mtibatu 2,%0" :: "r"(0));
|
|
|
|
__asm __volatile("mtdbatu 3,%0" :: "r"(0));
|
|
|
|
__asm __volatile("mtibatu 3,%0" :: "r"(0));
|
2008-04-27 22:33:43 +00:00
|
|
|
isync();
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++)
|
2010-11-12 05:12:38 +00:00
|
|
|
mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2008-04-27 22:33:43 +00:00
|
|
|
|
|
|
|
sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10);
|
|
|
|
__asm __volatile("mtsdr1 %0" :: "r"(sdr));
|
|
|
|
isync();
|
|
|
|
|
2008-05-23 19:16:24 +00:00
|
|
|
tlbia();
|
2008-04-27 22:33:43 +00:00
|
|
|
}
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-05-27 11:18:12 +00:00
|
|
|
ihandle_t mmui;
|
2002-02-14 01:39:11 +00:00
|
|
|
phandle_t chosen, mmu;
|
|
|
|
int sz;
|
|
|
|
int i, j;
|
2005-03-07 01:46:06 +00:00
|
|
|
vm_size_t size, physsz, hwphyssz;
|
2002-02-14 01:39:11 +00:00
|
|
|
vm_offset_t pa, va, off;
|
Implement a facility for dynamic per-cpu variables.
- Modules and kernel code alike may use DPCPU_DEFINE(),
DPCPU_GET(), DPCPU_SET(), etc. akin to the statically defined
PCPU_*. Requires only one extra instruction more than PCPU_* and is
virtually the same as __thread for builtin and much faster for shared
objects. DPCPU variables can be initialized when defined.
- Modules are supported by relocating the module's per-cpu linker set
over space reserved in the kernel. Modules may fail to load if there
is insufficient space available.
- Track space available for modules with a one-off extent allocator.
Free may block for memory to allocate space for an extent.
Reviewed by: jhb, rwatson, kan, sam, grehan, marius, marcel, stas
2009-06-23 22:42:39 +00:00
|
|
|
void *dpcpu;
|
2010-06-20 16:56:48 +00:00
|
|
|
register_t msr;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-06-29 09:43:59 +00:00
|
|
|
/*
|
2002-09-19 04:36:20 +00:00
|
|
|
* Set up BAT0 to map the lowest 256 MB area
|
2002-06-29 09:43:59 +00:00
|
|
|
*/
|
|
|
|
battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
|
|
|
|
battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map PCI memory space.
|
|
|
|
*/
|
|
|
|
battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
|
|
|
|
battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
|
|
|
|
|
|
|
|
battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
|
|
|
|
battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
|
|
|
|
|
|
|
|
battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
|
|
|
|
battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
|
|
|
|
|
|
|
|
battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
|
|
|
|
battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map obio devices.
|
|
|
|
*/
|
|
|
|
battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
|
|
|
|
battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Use an IBAT and a DBAT to map the bottom segment of memory
|
2010-06-20 16:56:48 +00:00
|
|
|
* where we are. Turn off instruction relocation temporarily
|
|
|
|
* to prevent faults while reprogramming the IBAT.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2010-06-20 16:56:48 +00:00
|
|
|
msr = mfmsr();
|
|
|
|
mtmsr(msr & ~PSL_IR);
|
2005-11-08 06:49:45 +00:00
|
|
|
__asm (".balign 32; \n"
|
2005-09-10 21:03:10 +00:00
|
|
|
"mtibatu 0,%0; mtibatl 0,%1; isync; \n"
|
2004-07-08 12:47:36 +00:00
|
|
|
"mtdbatu 0,%0; mtdbatl 0,%1; isync"
|
2008-04-27 22:33:43 +00:00
|
|
|
:: "r"(battable[0].batu), "r"(battable[0].batl));
|
2010-06-20 16:56:48 +00:00
|
|
|
mtmsr(msr);
|
2002-06-29 09:43:59 +00:00
|
|
|
|
|
|
|
/* map pci space */
|
2008-04-27 22:33:43 +00:00
|
|
|
__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
|
|
|
|
__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
|
|
|
|
isync();
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2009-04-04 00:22:44 +00:00
|
|
|
/* set global direct map flag */
|
|
|
|
hw_direct_map = 1;
|
|
|
|
|
2002-05-27 11:18:12 +00:00
|
|
|
mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
|
2005-11-08 06:49:45 +00:00
|
|
|
CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
|
2002-05-27 11:18:12 +00:00
|
|
|
|
|
|
|
for (i = 0; i < pregions_sz; i++) {
|
2002-09-19 04:36:20 +00:00
|
|
|
vm_offset_t pa;
|
|
|
|
vm_offset_t end;
|
|
|
|
|
2002-05-27 11:18:12 +00:00
|
|
|
CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
|
|
|
|
pregions[i].mr_start,
|
|
|
|
pregions[i].mr_start + pregions[i].mr_size,
|
|
|
|
pregions[i].mr_size);
|
2002-09-19 04:36:20 +00:00
|
|
|
/*
|
|
|
|
* Install entries into the BAT table to allow all
|
|
|
|
* of physmem to be convered by on-demand BAT entries.
|
|
|
|
* The loop will sometimes set the same battable element
|
|
|
|
* twice, but that's fine since they won't be used for
|
|
|
|
* a while yet.
|
|
|
|
*/
|
|
|
|
pa = pregions[i].mr_start & 0xf0000000;
|
|
|
|
end = pregions[i].mr_start + pregions[i].mr_size;
|
|
|
|
do {
|
|
|
|
u_int n = pa >> ADDR_SR_SHFT;
|
2005-11-08 06:49:45 +00:00
|
|
|
|
2002-09-19 04:36:20 +00:00
|
|
|
battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
|
|
|
|
battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
|
|
|
|
pa += SEGMENT_LENGTH;
|
|
|
|
} while (pa < end);
|
2002-05-27 11:18:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_bootstrap: phys_avail too small");
|
2011-06-02 14:15:44 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
phys_avail_count = 0;
|
2002-03-07 10:09:24 +00:00
|
|
|
physsz = 0;
|
2005-03-07 07:31:20 +00:00
|
|
|
hwphyssz = 0;
|
|
|
|
TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
|
2002-05-27 11:18:12 +00:00
|
|
|
for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
|
2002-02-14 01:39:11 +00:00
|
|
|
CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
|
|
|
|
regions[i].mr_start + regions[i].mr_size,
|
|
|
|
regions[i].mr_size);
|
2005-03-07 01:46:06 +00:00
|
|
|
if (hwphyssz != 0 &&
|
|
|
|
(physsz + regions[i].mr_size) >= hwphyssz) {
|
|
|
|
if (physsz < hwphyssz) {
|
|
|
|
phys_avail[j] = regions[i].mr_start;
|
|
|
|
phys_avail[j + 1] = regions[i].mr_start +
|
|
|
|
hwphyssz - physsz;
|
|
|
|
physsz = hwphyssz;
|
|
|
|
phys_avail_count++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
phys_avail[j] = regions[i].mr_start;
|
|
|
|
phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
|
|
|
|
phys_avail_count++;
|
2002-03-07 10:09:24 +00:00
|
|
|
physsz += regions[i].mr_size;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2011-12-16 23:46:05 +00:00
|
|
|
|
|
|
|
/* Check for overlap with the kernel and exception vectors */
|
|
|
|
for (j = 0; j < 2*phys_avail_count; j+=2) {
|
|
|
|
if (phys_avail[j] < EXC_LAST)
|
|
|
|
phys_avail[j] += EXC_LAST;
|
|
|
|
|
|
|
|
if (kernelstart >= phys_avail[j] &&
|
|
|
|
kernelstart < phys_avail[j+1]) {
|
|
|
|
if (kernelend < phys_avail[j+1]) {
|
|
|
|
phys_avail[2*phys_avail_count] =
|
|
|
|
(kernelend & ~PAGE_MASK) + PAGE_SIZE;
|
|
|
|
phys_avail[2*phys_avail_count + 1] =
|
|
|
|
phys_avail[j+1];
|
|
|
|
phys_avail_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_avail[j+1] = kernelstart & ~PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kernelend >= phys_avail[j] &&
|
|
|
|
kernelend < phys_avail[j+1]) {
|
|
|
|
if (kernelstart > phys_avail[j]) {
|
|
|
|
phys_avail[2*phys_avail_count] = phys_avail[j];
|
|
|
|
phys_avail[2*phys_avail_count + 1] =
|
|
|
|
kernelstart & ~PAGE_MASK;
|
|
|
|
phys_avail_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-07 10:09:24 +00:00
|
|
|
physmem = btoc(physsz);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Allocate PTEG table.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
#ifdef PTEGCOUNT
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pteg_count = PTEGCOUNT;
|
2001-06-10 02:39:37 +00:00
|
|
|
#else
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pteg_count = 0x1000;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
while (moea_pteg_count < physmem)
|
|
|
|
moea_pteg_count <<= 1;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pteg_count >>= 1;
|
2002-02-14 01:39:11 +00:00
|
|
|
#endif /* PTEGCOUNT */
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
size = moea_pteg_count * sizeof(struct pteg);
|
|
|
|
CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count,
|
2002-02-14 01:39:11 +00:00
|
|
|
size);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size);
|
|
|
|
CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table);
|
|
|
|
bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg));
|
|
|
|
moea_pteg_mask = moea_pteg_count - 1;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
|
|
|
/*
|
2002-04-16 12:15:17 +00:00
|
|
|
* Allocate pv/overflow lists.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
size = sizeof(struct pvo_head) * moea_pteg_count;
|
|
|
|
moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size,
|
2002-02-14 01:39:11 +00:00
|
|
|
PAGE_SIZE);
|
2005-11-08 06:49:45 +00:00
|
|
|
CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table);
|
|
|
|
for (i = 0; i < moea_pteg_count; i++)
|
|
|
|
LIST_INIT(&moea_pvo_table[i]);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2004-08-30 21:39:22 +00:00
|
|
|
/*
|
|
|
|
* Initialize the lock that synchronizes access to the pteg and pvo
|
|
|
|
* tables.
|
|
|
|
*/
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
|
|
|
|
MTX_RECURSE);
|
2010-09-06 23:07:58 +00:00
|
|
|
mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF);
|
2004-08-30 21:39:22 +00:00
|
|
|
|
2008-09-16 19:16:33 +00:00
|
|
|
mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN);
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Initialise the unmanaged pvo pool.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
|
2002-06-29 09:43:59 +00:00
|
|
|
BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_bpvo_pool_index = 0;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Make sure kernel vsid is allocated as well as VSID 0.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
|
2002-02-14 01:39:11 +00:00
|
|
|
|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_vsid_bitmap[0] |= 1;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
2010-11-12 05:12:38 +00:00
|
|
|
* Initialize the kernel pmap (which is statically allocated).
|
|
|
|
*/
|
|
|
|
PMAP_LOCK_INIT(kernel_pmap);
|
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
|
2011-05-09 16:16:15 +00:00
|
|
|
CPU_FILL(&kernel_pmap->pm_active);
|
2012-05-20 14:33:28 +00:00
|
|
|
RB_INIT(&kernel_pmap->pmap_pvo);
|
2010-11-12 05:12:38 +00:00
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
/*
|
|
|
|
* Initialize the global pv list lock.
|
|
|
|
*/
|
|
|
|
rw_init(&pvh_global_lock, "pmap pv global");
|
|
|
|
|
2010-11-12 05:12:38 +00:00
|
|
|
/*
|
|
|
|
* Set up the Open Firmware mappings
|
2002-02-14 01:39:11 +00:00
|
|
|
*/
|
2011-12-16 23:46:05 +00:00
|
|
|
chosen = OF_finddevice("/chosen");
|
|
|
|
if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 &&
|
|
|
|
(mmu = OF_instance_to_package(mmui)) != -1 &&
|
|
|
|
(sz = OF_getproplen(mmu, "translations")) != -1) {
|
|
|
|
translations = NULL;
|
|
|
|
for (i = 0; phys_avail[i] != 0; i += 2) {
|
|
|
|
if (phys_avail[i + 1] >= sz) {
|
|
|
|
translations = (struct ofw_map *)phys_avail[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (translations == NULL)
|
|
|
|
panic("moea_bootstrap: no space to copy translations");
|
|
|
|
bzero(translations, sz);
|
|
|
|
if (OF_getprop(mmu, "translations", translations, sz) == -1)
|
|
|
|
panic("moea_bootstrap: can't get ofw translations");
|
|
|
|
CTR0(KTR_PMAP, "moea_bootstrap: translations");
|
|
|
|
sz /= sizeof(*translations);
|
|
|
|
qsort(translations, sz, sizeof (*translations), om_cmp);
|
|
|
|
for (i = 0; i < sz; i++) {
|
|
|
|
CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
|
|
|
|
translations[i].om_pa, translations[i].om_va,
|
|
|
|
translations[i].om_len);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the mapping is 1:1, let the RAM and device
|
|
|
|
* on-demand BAT tables take care of the translation.
|
|
|
|
*/
|
|
|
|
if (translations[i].om_va == translations[i].om_pa)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Enter the pages */
|
|
|
|
for (off = 0; off < translations[i].om_len;
|
|
|
|
off += PAGE_SIZE)
|
|
|
|
moea_kenter(mmup, translations[i].om_va + off,
|
|
|
|
translations[i].om_pa + off);
|
2004-07-01 08:01:49 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2008-04-16 23:28:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the last available physical address.
|
|
|
|
*/
|
|
|
|
for (i = 0; phys_avail[i + 2] != 0; i += 2)
|
|
|
|
;
|
|
|
|
Maxmem = powerpc_btop(phys_avail[i + 1]);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2009-04-04 00:22:44 +00:00
|
|
|
moea_cpu_bootstrap(mmup,0);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
pmap_bootstrapped++;
|
2008-04-16 23:28:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the start and end of kva.
|
|
|
|
*/
|
|
|
|
virtual_avail = VM_MIN_KERNEL_ADDRESS;
|
2010-02-20 16:23:29 +00:00
|
|
|
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
|
2008-04-16 23:28:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a kernel stack with a guard page for thread0 and map it
|
|
|
|
* into the kernel page map.
|
|
|
|
*/
|
|
|
|
pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
|
|
|
|
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
|
|
|
|
virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
|
|
|
|
CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
|
|
|
|
thread0.td_kstack = va;
|
|
|
|
thread0.td_kstack_pages = KSTACK_PAGES;
|
|
|
|
for (i = 0; i < KSTACK_PAGES; i++) {
|
2010-01-07 21:01:37 +00:00
|
|
|
moea_kenter(mmup, va, pa);
|
2008-04-16 23:28:12 +00:00
|
|
|
pa += PAGE_SIZE;
|
|
|
|
va += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate virtual address space for the message buffer.
|
|
|
|
*/
|
2011-01-21 10:26:26 +00:00
|
|
|
pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE);
|
2008-04-16 23:28:12 +00:00
|
|
|
msgbufp = (struct msgbuf *)virtual_avail;
|
|
|
|
va = virtual_avail;
|
2011-01-21 10:26:26 +00:00
|
|
|
virtual_avail += round_page(msgbufsize);
|
2008-04-16 23:28:12 +00:00
|
|
|
while (va < virtual_avail) {
|
2010-01-07 21:01:37 +00:00
|
|
|
moea_kenter(mmup, va, pa);
|
2008-04-16 23:28:12 +00:00
|
|
|
pa += PAGE_SIZE;
|
|
|
|
va += PAGE_SIZE;
|
|
|
|
}
|
Implement a facility for dynamic per-cpu variables.
- Modules and kernel code alike may use DPCPU_DEFINE(),
DPCPU_GET(), DPCPU_SET(), etc. akin to the statically defined
PCPU_*. Requires only one extra instruction more than PCPU_* and is
virtually the same as __thread for builtin and much faster for shared
objects. DPCPU variables can be initialized when defined.
- Modules are supported by relocating the module's per-cpu linker set
over space reserved in the kernel. Modules may fail to load if there
is insufficient space available.
- Track space available for modules with a one-off extent allocator.
Free may block for memory to allocate space for an extent.
Reviewed by: jhb, rwatson, kan, sam, grehan, marius, marcel, stas
2009-06-23 22:42:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate virtual address space for the dynamic percpu area.
|
|
|
|
*/
|
|
|
|
pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
|
|
|
|
dpcpu = (void *)virtual_avail;
|
|
|
|
va = virtual_avail;
|
|
|
|
virtual_avail += DPCPU_SIZE;
|
|
|
|
while (va < virtual_avail) {
|
2010-01-07 21:01:37 +00:00
|
|
|
moea_kenter(mmup, va, pa);
|
Implement a facility for dynamic per-cpu variables.
- Modules and kernel code alike may use DPCPU_DEFINE(),
DPCPU_GET(), DPCPU_SET(), etc. akin to the statically defined
PCPU_*. Requires only one extra instruction more than PCPU_* and is
virtually the same as __thread for builtin and much faster for shared
objects. DPCPU variables can be initialized when defined.
- Modules are supported by relocating the module's per-cpu linker set
over space reserved in the kernel. Modules may fail to load if there
is insufficient space available.
- Track space available for modules with a one-off extent allocator.
Free may block for memory to allocate space for an extent.
Reviewed by: jhb, rwatson, kan, sam, grehan, marius, marcel, stas
2009-06-23 22:42:39 +00:00
|
|
|
pa += PAGE_SIZE;
|
|
|
|
va += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
dpcpu_init(dpcpu, 0);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Activate a user pmap. The pmap must be activated before it's address
|
|
|
|
* space can be accessed in any way.
|
|
|
|
*/
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_activate(mmu_t mmu, struct thread *td)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-05-09 14:09:19 +00:00
|
|
|
pmap_t pm, pmr;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
|
|
|
/*
|
2002-09-19 04:36:20 +00:00
|
|
|
* Load all the data we need up front to encourage the compiler to
|
2002-02-14 01:39:11 +00:00
|
|
|
* not issue any loads while we have interrupts disabled below.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
pm = &td->td_proc->p_vmspace->vm_pmap;
|
2008-09-23 03:02:57 +00:00
|
|
|
pmr = pm->pmap_phys;
|
2002-05-09 14:09:19 +00:00
|
|
|
|
2011-06-16 07:27:13 +00:00
|
|
|
CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
|
2002-05-09 14:09:19 +00:00
|
|
|
PCPU_SET(curpmap, pmr);
|
2002-02-28 11:55:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_deactivate(mmu_t mmu, struct thread *td)
|
2002-02-28 11:55:44 +00:00
|
|
|
{
|
|
|
|
pmap_t pm;
|
|
|
|
|
|
|
|
pm = &td->td_proc->p_vmspace->vm_pmap;
|
2011-06-16 07:27:13 +00:00
|
|
|
CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
|
2002-05-09 14:09:19 +00:00
|
|
|
PCPU_SET(curpmap, NULL);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-09-20 00:47:17 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-05-10 14:21:48 +00:00
|
|
|
struct pvo_entry *pvo;
|
|
|
|
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK(pm);
|
2005-11-08 06:49:45 +00:00
|
|
|
pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
|
2002-05-10 14:21:48 +00:00
|
|
|
|
|
|
|
if (pvo != NULL) {
|
|
|
|
if (wired) {
|
|
|
|
if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
|
|
|
|
pm->pm_stats.wired_count++;
|
|
|
|
pvo->pvo_vaddr |= PVO_WIRED;
|
|
|
|
} else {
|
|
|
|
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
|
|
|
|
pm->pm_stats.wired_count--;
|
|
|
|
pvo->pvo_vaddr &= ~PVO_WIRED;
|
|
|
|
}
|
|
|
|
}
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(pm);
|
2001-09-20 00:47:17 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-05-28 07:38:55 +00:00
|
|
|
vm_offset_t dst;
|
|
|
|
vm_offset_t src;
|
|
|
|
|
|
|
|
dst = VM_PAGE_TO_PHYS(mdst);
|
|
|
|
src = VM_PAGE_TO_PHYS(msrc);
|
|
|
|
|
2012-04-11 22:23:50 +00:00
|
|
|
bcopy((void *)src, (void *)dst, PAGE_SIZE);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Zero a page of physical memory by temporarily mapping it into the tlb.
|
|
|
|
*/
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_zero_page(mmu_t mmu, vm_page_t m)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-04-15 16:00:03 +00:00
|
|
|
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
|
2008-04-17 00:37:40 +00:00
|
|
|
void *va = (void *)pa;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
bzero(va, PAGE_SIZE);
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-07-09 13:44:24 +00:00
|
|
|
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
|
2008-04-17 00:37:40 +00:00
|
|
|
void *va = (void *)(pa + off);
|
2002-07-09 13:44:24 +00:00
|
|
|
|
2008-04-17 00:37:40 +00:00
|
|
|
bzero(va, size);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-07-08 04:24:26 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_zero_page_idle(mmu_t mmu, vm_page_t m)
|
2002-07-08 04:24:26 +00:00
|
|
|
{
|
2008-04-17 00:37:40 +00:00
|
|
|
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
|
|
|
|
void *va = (void *)pa;
|
2002-07-08 04:24:26 +00:00
|
|
|
|
2008-04-17 00:37:40 +00:00
|
|
|
bzero(va, PAGE_SIZE);
|
2002-07-08 04:24:26 +00:00
|
|
|
}
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Map the given physical page at the specified virtual address in the
|
|
|
|
* target pmap with the protection requested. If specified the page
|
|
|
|
* will be wired down.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
2002-02-14 01:39:11 +00:00
|
|
|
boolean_t wired)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2006-06-05 20:35:27 +00:00
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_LOCK(pmap);
|
2006-06-06 02:02:10 +00:00
|
|
|
moea_enter_locked(pmap, va, m, prot, wired);
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_UNLOCK(pmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map the given physical page at the specified virtual address in the
|
|
|
|
* target pmap with the protection requested. If specified the page
|
|
|
|
* will be wired down.
|
|
|
|
*
|
|
|
|
* The page queues and pmap must be locked.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
|
|
|
boolean_t wired)
|
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pvo_head *pvo_head;
|
2002-03-21 01:11:31 +00:00
|
|
|
uma_zone_t zone;
|
2002-05-09 14:09:19 +00:00
|
|
|
vm_page_t pg;
|
2012-04-06 16:03:38 +00:00
|
|
|
u_int pte_lo, pvo_flags;
|
2002-02-14 01:39:11 +00:00
|
|
|
int error;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
if (!moea_initialized) {
|
|
|
|
pvo_head = &moea_pvo_kunmanaged;
|
|
|
|
zone = moea_upvo_zone;
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo_flags = 0;
|
2002-05-09 14:09:19 +00:00
|
|
|
pg = NULL;
|
2002-02-14 01:39:11 +00:00
|
|
|
} else {
|
2003-02-01 02:56:48 +00:00
|
|
|
pvo_head = vm_page_to_pvoh(m);
|
|
|
|
pg = m;
|
2005-11-08 06:49:45 +00:00
|
|
|
zone = moea_mpvo_zone;
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo_flags = PVO_MANAGED;
|
2002-05-09 14:09:19 +00:00
|
|
|
}
|
2004-08-30 21:39:22 +00:00
|
|
|
if (pmap_bootstrapped)
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
|
|
|
VM_OBJECT_LOCKED(m->object),
|
2010-05-16 23:45:10 +00:00
|
|
|
("moea_enter_locked: page %p is not busy", m));
|
2002-05-09 14:09:19 +00:00
|
|
|
|
2005-02-25 02:42:15 +00:00
|
|
|
/* XXX change the pvo head for fake pages */
|
2011-08-09 21:01:36 +00:00
|
|
|
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
2009-03-11 03:19:19 +00:00
|
|
|
pvo_flags &= ~PVO_MANAGED;
|
2005-11-08 06:49:45 +00:00
|
|
|
pvo_head = &moea_pvo_kunmanaged;
|
2009-03-11 03:19:19 +00:00
|
|
|
zone = moea_upvo_zone;
|
|
|
|
}
|
2005-02-25 02:42:15 +00:00
|
|
|
|
2010-10-01 18:59:30 +00:00
|
|
|
pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2006-11-12 21:48:34 +00:00
|
|
|
if (prot & VM_PROT_WRITE) {
|
2002-02-14 01:39:11 +00:00
|
|
|
pte_lo |= PTE_BW;
|
2010-06-05 06:56:06 +00:00
|
|
|
if (pmap_bootstrapped &&
|
2011-08-09 21:01:36 +00:00
|
|
|
(m->oflags & VPO_UNMANAGED) == 0)
|
2011-09-06 10:30:11 +00:00
|
|
|
vm_page_aflag_set(m, PGA_WRITEABLE);
|
2006-11-12 21:48:34 +00:00
|
|
|
} else
|
2002-02-14 01:39:11 +00:00
|
|
|
pte_lo |= PTE_BR;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-02-25 02:42:15 +00:00
|
|
|
if (prot & VM_PROT_EXECUTE)
|
|
|
|
pvo_flags |= PVO_EXECUTABLE;
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
if (wired)
|
|
|
|
pvo_flags |= PVO_WIRED;
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
|
2002-05-09 14:09:19 +00:00
|
|
|
pte_lo, pvo_flags);
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2002-05-09 14:09:19 +00:00
|
|
|
/*
|
2012-04-06 16:03:38 +00:00
|
|
|
* Flush the real page from the instruction cache. This has be done
|
|
|
|
* for all user mappings to prevent information leakage via the
|
2012-04-11 20:28:05 +00:00
|
|
|
* instruction cache. moea_pvo_enter() returns ENOENT for the first
|
|
|
|
* mapping for a page.
|
2002-05-09 14:09:19 +00:00
|
|
|
*/
|
2012-04-11 20:28:05 +00:00
|
|
|
if (pmap != kernel_pmap && error == ENOENT &&
|
|
|
|
(pte_lo & (PTE_I | PTE_G)) == 0)
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
2006-06-05 20:35:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Maps a sequence of resident pages belonging to the same object.
|
|
|
|
* The sequence begins with the given page m_start. This page is
|
|
|
|
* mapped at the given virtual address start. Each subsequent page is
|
|
|
|
* mapped at a virtual address that is offset from start by the same
|
|
|
|
* amount as the page is offset from m_start within the object. The
|
|
|
|
* last page in the sequence is the page with the largest offset from
|
|
|
|
* m_start that can be mapped at a virtual address less than the given
|
|
|
|
* virtual address end. Not every virtual page between start and end
|
|
|
|
* is mapped; only those for which a resident page exists with the
|
|
|
|
* corresponding offset from m_start are mapped.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
|
|
|
vm_page_t m_start, vm_prot_t prot)
|
|
|
|
{
|
|
|
|
vm_page_t m;
|
|
|
|
vm_pindex_t diff, psize;
|
|
|
|
|
|
|
|
psize = atop(end - start);
|
|
|
|
m = m_start;
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_LOCK(pm);
|
|
|
|
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
|
|
|
moea_enter_locked(pm, start + ptoa(diff), m, prot &
|
|
|
|
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
|
|
|
m = TAILQ_NEXT(m, listq);
|
|
|
|
}
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_UNLOCK(pm);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2006-06-15 01:01:06 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
2006-06-15 01:01:06 +00:00
|
|
|
vm_prot_t prot)
|
2003-06-29 21:20:04 +00:00
|
|
|
{
|
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_LOCK(pm);
|
|
|
|
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
2005-11-08 06:49:45 +00:00
|
|
|
FALSE);
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2006-06-05 20:35:27 +00:00
|
|
|
PMAP_UNLOCK(pm);
|
2003-06-29 21:20:04 +00:00
|
|
|
}
|
|
|
|
|
2004-07-05 23:08:27 +00:00
|
|
|
vm_paddr_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-05-10 14:21:48 +00:00
|
|
|
struct pvo_entry *pvo;
|
2004-08-26 04:15:36 +00:00
|
|
|
vm_paddr_t pa;
|
2002-05-10 14:21:48 +00:00
|
|
|
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK(pm);
|
2005-11-08 06:49:45 +00:00
|
|
|
pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
|
2004-08-26 04:15:36 +00:00
|
|
|
if (pvo == NULL)
|
|
|
|
pa = 0;
|
|
|
|
else
|
2008-09-23 03:02:57 +00:00
|
|
|
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(pm);
|
|
|
|
return (pa);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2003-09-22 11:59:05 +00:00
|
|
|
/*
|
|
|
|
* Atomically extract and hold the physical page with the given
|
|
|
|
* pmap and virtual address pair if that mapping permits the given
|
|
|
|
* protection.
|
|
|
|
*/
|
|
|
|
vm_page_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
2003-09-22 11:59:05 +00:00
|
|
|
{
|
2004-07-26 18:10:10 +00:00
|
|
|
struct pvo_entry *pvo;
|
2003-09-22 11:59:05 +00:00
|
|
|
vm_page_t m;
|
2010-04-30 00:46:43 +00:00
|
|
|
vm_paddr_t pa;
|
|
|
|
|
2003-09-22 11:59:05 +00:00
|
|
|
m = NULL;
|
2010-04-30 00:46:43 +00:00
|
|
|
pa = 0;
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK(pmap);
|
2010-04-30 00:46:43 +00:00
|
|
|
retry:
|
2005-11-08 06:49:45 +00:00
|
|
|
pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) &&
|
|
|
|
((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW ||
|
2004-07-26 18:10:10 +00:00
|
|
|
(prot & VM_PROT_WRITE) == 0)) {
|
2010-04-30 00:46:43 +00:00
|
|
|
if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa))
|
|
|
|
goto retry;
|
2008-09-23 03:02:57 +00:00
|
|
|
m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
|
2003-09-22 11:59:05 +00:00
|
|
|
vm_page_hold(m);
|
|
|
|
}
|
2010-04-30 00:46:43 +00:00
|
|
|
PA_UNLOCK_COND(pa);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(pmap);
|
2003-09-22 11:59:05 +00:00
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_init(mmu_t mmu)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
|
2004-02-04 13:16:21 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
|
|
|
|
UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
|
2004-02-04 13:16:21 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
|
|
|
|
UMA_ZONE_VM | UMA_ZONE_NOFREE);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_initialized = TRUE;
|
2002-06-29 09:43:59 +00:00
|
|
|
}
|
|
|
|
|
Resurrect pmap_is_referenced() and use it in mincore(). Essentially,
pmap_ts_referenced() is not always appropriate for checking whether or
not pages have been referenced because it clears any reference bits
that it encounters. For example, in mincore(), clearing the reference
bits has two negative consequences. First, it throws off the activity
count calculations performed by the page daemon. Specifically, a page
on which mincore() has called pmap_ts_referenced() looks less active
to the page daemon than it should. Consequently, the page could be
deactivated prematurely by the page daemon. Arguably, this problem
could be fixed by having mincore() duplicate the activity count
calculation on the page. However, there is a second problem for which
that is not a solution. In order to clear a reference on a 4KB page,
it may be necessary to demote a 2/4MB page mapping. Thus, a mincore()
by one process can have the side effect of demoting a superpage
mapping within another process!
2010-04-24 17:32:52 +00:00
|
|
|
boolean_t
|
|
|
|
moea_is_referenced(mmu_t mmu, vm_page_t m)
|
|
|
|
{
|
2012-07-10 22:10:21 +00:00
|
|
|
boolean_t rv;
|
Resurrect pmap_is_referenced() and use it in mincore(). Essentially,
pmap_ts_referenced() is not always appropriate for checking whether or
not pages have been referenced because it clears any reference bits
that it encounters. For example, in mincore(), clearing the reference
bits has two negative consequences. First, it throws off the activity
count calculations performed by the page daemon. Specifically, a page
on which mincore() has called pmap_ts_referenced() looks less active
to the page daemon than it should. Consequently, the page could be
deactivated prematurely by the page daemon. Arguably, this problem
could be fixed by having mincore() duplicate the activity count
calculation on the page. However, there is a second problem for which
that is not a solution. In order to clear a reference on a 4KB page,
it may be necessary to demote a 2/4MB page mapping. Thus, a mincore()
by one process can have the side effect of demoting a superpage
mapping within another process!
2010-04-24 17:32:52 +00:00
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
2010-05-26 18:00:44 +00:00
|
|
|
("moea_is_referenced: page %p is not managed", m));
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
|
|
|
rv = moea_query_bit(m, PTE_REF);
|
|
|
|
rw_wunlock(&pvh_global_lock);
|
|
|
|
return (rv);
|
Resurrect pmap_is_referenced() and use it in mincore(). Essentially,
pmap_ts_referenced() is not always appropriate for checking whether or
not pages have been referenced because it clears any reference bits
that it encounters. For example, in mincore(), clearing the reference
bits has two negative consequences. First, it throws off the activity
count calculations performed by the page daemon. Specifically, a page
on which mincore() has called pmap_ts_referenced() looks less active
to the page daemon than it should. Consequently, the page could be
deactivated prematurely by the page daemon. Arguably, this problem
could be fixed by having mincore() duplicate the activity count
calculation on the page. However, there is a second problem for which
that is not a solution. In order to clear a reference on a 4KB page,
it may be necessary to demote a 2/4MB page mapping. Thus, a mincore()
by one process can have the side effect of demoting a superpage
mapping within another process!
2010-04-24 17:32:52 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
boolean_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_is_modified(mmu_t mmu, vm_page_t m)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2012-07-10 22:10:21 +00:00
|
|
|
boolean_t rv;
|
2002-05-10 14:21:48 +00:00
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
("moea_is_modified: page %p is not managed", m));
|
2002-05-10 14:21:48 +00:00
|
|
|
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
/*
|
2011-09-06 10:30:11 +00:00
|
|
|
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
|
|
|
|
* concurrently set while the object is locked. Thus, if PGA_WRITEABLE
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
* is clear, no PTEs can have PTE_CHG set.
|
|
|
|
*/
|
|
|
|
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
|
|
|
if ((m->oflags & VPO_BUSY) == 0 &&
|
2011-09-06 10:30:11 +00:00
|
|
|
(m->aflags & PGA_WRITEABLE) == 0)
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
return (FALSE);
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
|
|
|
rv = moea_query_bit(m, PTE_CHG);
|
|
|
|
rw_wunlock(&pvh_global_lock);
|
|
|
|
return (rv);
|
2003-10-03 22:46:53 +00:00
|
|
|
}
|
|
|
|
|
2010-11-01 02:22:48 +00:00
|
|
|
boolean_t
|
|
|
|
moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
|
|
|
|
{
|
|
|
|
struct pvo_entry *pvo;
|
|
|
|
boolean_t rv;
|
|
|
|
|
|
|
|
PMAP_LOCK(pmap);
|
|
|
|
pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
|
|
|
|
rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0;
|
|
|
|
PMAP_UNLOCK(pmap);
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_clear_reference(mmu_t mmu, vm_page_t m)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2003-02-01 02:56:48 +00:00
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
("moea_clear_reference: page %p is not managed", m));
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2010-06-10 16:56:35 +00:00
|
|
|
moea_clear_bit(m, PTE_REF);
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2003-02-01 02:56:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_clear_modify(mmu_t mmu, vm_page_t m)
|
2003-02-01 02:56:48 +00:00
|
|
|
{
|
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
("moea_clear_modify: page %p is not managed", m));
|
|
|
|
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
|
|
|
KASSERT((m->oflags & VPO_BUSY) == 0,
|
|
|
|
("moea_clear_modify: page %p is busy", m));
|
|
|
|
|
|
|
|
/*
|
2011-09-06 10:30:11 +00:00
|
|
|
* If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
* set. If the object containing the page is locked and the page is
|
2011-09-06 10:30:11 +00:00
|
|
|
* not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
|
Roughly half of a typical pmap_mincore() implementation is machine-
independent code. Move this code into mincore(), and eliminate the
page queues lock from pmap_mincore().
Push down the page queues lock into pmap_clear_modify(),
pmap_clear_reference(), and pmap_is_modified(). Assert that these
functions are never passed an unmanaged page.
Eliminate an inaccurate comment from powerpc/powerpc/mmu_if.m:
Contrary to what the comment says, pmap_mincore() is not simply an
optimization. Without a complete pmap_mincore() implementation,
mincore() cannot return either MINCORE_MODIFIED or MINCORE_REFERENCED
because only the pmap can provide this information.
Eliminate the page queues lock from vfs_setdirty_locked_object(),
vm_pageout_clean(), vm_object_page_collect_flush(), and
vm_object_page_clean(). Generally speaking, these are all accesses
to the page's dirty field, which are synchronized by the containing
vm object's lock.
Reduce the scope of the page queues lock in vm_object_madvise() and
vm_page_dontneed().
Reviewed by: kib (an earlier version)
2010-05-24 14:26:57 +00:00
|
|
|
*/
|
2011-09-06 10:30:11 +00:00
|
|
|
if ((m->aflags & PGA_WRITEABLE) == 0)
|
2003-02-01 02:56:48 +00:00
|
|
|
return;
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2010-06-10 16:56:35 +00:00
|
|
|
moea_clear_bit(m, PTE_CHG);
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2006-08-01 19:06:06 +00:00
|
|
|
/*
|
|
|
|
* Clear the write and modified bits in each of the given page's mappings.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
moea_remove_write(mmu_t mmu, vm_page_t m)
|
|
|
|
{
|
|
|
|
struct pvo_entry *pvo;
|
|
|
|
struct pte *pt;
|
|
|
|
pmap_t pmap;
|
|
|
|
u_int lo;
|
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
2010-05-16 23:45:10 +00:00
|
|
|
("moea_remove_write: page %p is not managed", m));
|
|
|
|
|
|
|
|
/*
|
2011-09-06 10:30:11 +00:00
|
|
|
* If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
|
|
|
|
* another thread while the object is locked. Thus, if PGA_WRITEABLE
|
2010-05-16 23:45:10 +00:00
|
|
|
* is clear, no page table entries need updating.
|
|
|
|
*/
|
|
|
|
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
|
|
|
|
if ((m->oflags & VPO_BUSY) == 0 &&
|
2011-09-06 10:30:11 +00:00
|
|
|
(m->aflags & PGA_WRITEABLE) == 0)
|
2006-08-01 19:06:06 +00:00
|
|
|
return;
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2006-08-01 19:06:06 +00:00
|
|
|
lo = moea_attr_fetch(m);
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2006-08-01 19:06:06 +00:00
|
|
|
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
|
|
|
pmap = pvo->pvo_pmap;
|
|
|
|
PMAP_LOCK(pmap);
|
2008-09-23 03:02:57 +00:00
|
|
|
if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) {
|
2006-08-01 19:06:06 +00:00
|
|
|
pt = moea_pvo_to_pte(pvo, -1);
|
2008-09-23 03:02:57 +00:00
|
|
|
pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
|
|
|
|
pvo->pvo_pte.pte.pte_lo |= PTE_BR;
|
2006-08-01 19:06:06 +00:00
|
|
|
if (pt != NULL) {
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_synch(pt, &pvo->pvo_pte.pte);
|
|
|
|
lo |= pvo->pvo_pte.pte.pte_lo;
|
|
|
|
pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG;
|
|
|
|
moea_pte_change(pt, &pvo->pvo_pte.pte,
|
2006-08-01 19:06:06 +00:00
|
|
|
pvo->pvo_vaddr);
|
|
|
|
mtx_unlock(&moea_table_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
PMAP_UNLOCK(pmap);
|
|
|
|
}
|
|
|
|
if ((lo & PTE_CHG) != 0) {
|
|
|
|
moea_attr_clear(m, PTE_CHG);
|
|
|
|
vm_page_dirty(m);
|
|
|
|
}
|
2011-09-06 10:30:11 +00:00
|
|
|
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2006-08-01 19:06:06 +00:00
|
|
|
}
|
|
|
|
|
2002-02-27 18:03:02 +00:00
|
|
|
/*
|
2005-11-08 06:49:45 +00:00
|
|
|
* moea_ts_referenced:
|
2002-02-27 18:03:02 +00:00
|
|
|
*
|
|
|
|
* Return a count of reference bits for a page, clearing those bits.
|
|
|
|
* It is not necessary for every reference bit to be cleared, but it
|
|
|
|
* is necessary that 0 only be returned when there are truly no
|
|
|
|
* reference bits set.
|
|
|
|
*
|
|
|
|
* XXX: The exact number of bits to check and clear is a matter that
|
|
|
|
* should be tested and standardized at some point in the future for
|
|
|
|
* optimal aging of shared pages.
|
|
|
|
*/
|
2012-07-10 22:10:21 +00:00
|
|
|
int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_ts_referenced(mmu_t mmu, vm_page_t m)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2012-07-10 22:10:21 +00:00
|
|
|
int count;
|
2003-02-01 02:56:48 +00:00
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
2010-06-10 16:56:35 +00:00
|
|
|
("moea_ts_referenced: page %p is not managed", m));
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
|
|
|
count = moea_clear_bit(m, PTE_REF);
|
|
|
|
rw_wunlock(&pvh_global_lock);
|
|
|
|
return (count);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2010-09-30 18:14:12 +00:00
|
|
|
/*
|
|
|
|
* Modify the WIMG settings of all mappings for a page.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
|
|
|
{
|
|
|
|
struct pvo_entry *pvo;
|
2010-10-01 18:59:30 +00:00
|
|
|
struct pvo_head *pvo_head;
|
2010-09-30 18:14:12 +00:00
|
|
|
struct pte *pt;
|
|
|
|
pmap_t pmap;
|
|
|
|
u_int lo;
|
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
2010-10-01 18:59:30 +00:00
|
|
|
m->md.mdpg_cache_attrs = ma;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2010-10-01 18:59:30 +00:00
|
|
|
pvo_head = vm_page_to_pvoh(m);
|
2010-09-30 18:14:12 +00:00
|
|
|
lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
|
2010-10-01 18:59:30 +00:00
|
|
|
|
|
|
|
LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
|
2010-09-30 18:14:12 +00:00
|
|
|
pmap = pvo->pvo_pmap;
|
|
|
|
PMAP_LOCK(pmap);
|
|
|
|
pt = moea_pvo_to_pte(pvo, -1);
|
|
|
|
pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
|
|
|
|
pvo->pvo_pte.pte.pte_lo |= lo;
|
|
|
|
if (pt != NULL) {
|
|
|
|
moea_pte_change(pt, &pvo->pvo_pte.pte,
|
|
|
|
pvo->pvo_vaddr);
|
|
|
|
if (pvo->pvo_pmap == kernel_pmap)
|
|
|
|
isync();
|
|
|
|
}
|
|
|
|
mtx_unlock(&moea_table_mutex);
|
|
|
|
PMAP_UNLOCK(pmap);
|
|
|
|
}
|
|
|
|
m->md.mdpg_cache_attrs = ma;
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2010-09-30 18:14:12 +00:00
|
|
|
}
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Map a wired page into kernel virtual address space.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
|
|
|
void
|
2012-05-24 21:13:24 +00:00
|
|
|
moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
|
2010-09-30 18:14:12 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
u_int pte_lo;
|
|
|
|
int error;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
#if 0
|
|
|
|
if (va < VM_MIN_KERNEL_ADDRESS)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_kenter: attempt to enter non-kernel address %#x",
|
2002-02-14 01:39:11 +00:00
|
|
|
va);
|
2001-06-10 02:39:37 +00:00
|
|
|
#endif
|
|
|
|
|
2010-09-30 18:14:12 +00:00
|
|
|
pte_lo = moea_calc_wimg(pa, ma);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2004-09-13 20:36:01 +00:00
|
|
|
PMAP_LOCK(kernel_pmap);
|
2005-11-08 06:49:45 +00:00
|
|
|
error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
|
|
|
|
&moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
if (error != 0 && error != ENOENT)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
|
2002-02-14 01:39:11 +00:00
|
|
|
pa, error);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2004-09-13 20:36:01 +00:00
|
|
|
PMAP_UNLOCK(kernel_pmap);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-04-16 12:13:10 +00:00
|
|
|
/*
|
|
|
|
* Extract the physical page address associated with the given kernel virtual
|
|
|
|
* address.
|
|
|
|
*/
|
2012-05-24 21:13:24 +00:00
|
|
|
vm_paddr_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_kextract(mmu_t mmu, vm_offset_t va)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-04-16 12:13:10 +00:00
|
|
|
struct pvo_entry *pvo;
|
2004-08-26 04:15:36 +00:00
|
|
|
vm_paddr_t pa;
|
2002-04-16 12:13:10 +00:00
|
|
|
|
2004-01-29 00:45:41 +00:00
|
|
|
/*
|
2008-09-23 03:02:57 +00:00
|
|
|
* Allow direct mappings on 32-bit OEA
|
2004-01-29 00:45:41 +00:00
|
|
|
*/
|
|
|
|
if (va < VM_MIN_KERNEL_ADDRESS) {
|
|
|
|
return (va);
|
|
|
|
}
|
|
|
|
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK(kernel_pmap);
|
2005-11-08 06:49:45 +00:00
|
|
|
pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
|
|
|
|
KASSERT(pvo != NULL, ("moea_kextract: no addr found"));
|
2008-09-23 03:02:57 +00:00
|
|
|
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(kernel_pmap);
|
|
|
|
return (pa);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-28 02:54:16 +00:00
|
|
|
/*
|
|
|
|
* Remove a wired page from kernel virtual address space.
|
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_kremove(mmu_t mmu, vm_offset_t va)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-02-28 02:54:16 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Map a range of physical addresses into kernel virtual address space.
|
2001-06-10 02:39:37 +00:00
|
|
|
*
|
2002-02-14 01:39:11 +00:00
|
|
|
* The value passed in *virt is a suggested virtual address for the mapping.
|
|
|
|
* Architectures which can support a direct-mapped physical to virtual region
|
|
|
|
* can return the appropriate address within that region, leaving '*virt'
|
|
|
|
* unchanged. We cannot and therefore do not; *virt is updated with the
|
|
|
|
* first usable address after the mapped region.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
vm_offset_t
|
2012-05-24 21:13:24 +00:00
|
|
|
moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
|
|
|
|
vm_paddr_t pa_end, int prot)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
vm_offset_t sva, va;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
sva = *virt;
|
|
|
|
va = sva;
|
|
|
|
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_kenter(mmu, va, pa_start);
|
2002-02-14 01:39:11 +00:00
|
|
|
*virt = va;
|
|
|
|
return (sva);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-27 18:03:02 +00:00
|
|
|
/*
|
|
|
|
* Returns true if the pmap's pv is one of the first
|
|
|
|
* 16 pvs linked to from this page. This count may
|
|
|
|
* be changed upwards or downwards in the future; it
|
|
|
|
* is only necessary that true be returned for a small
|
|
|
|
* subset of pmaps for proper page aging.
|
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
boolean_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2003-02-01 02:56:48 +00:00
|
|
|
int loops;
|
|
|
|
struct pvo_entry *pvo;
|
2010-06-10 16:56:35 +00:00
|
|
|
boolean_t rv;
|
2003-02-01 02:56:48 +00:00
|
|
|
|
2011-08-09 21:01:36 +00:00
|
|
|
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
2010-06-10 16:56:35 +00:00
|
|
|
("moea_page_exists_quick: page %p is not managed", m));
|
2003-02-01 02:56:48 +00:00
|
|
|
loops = 0;
|
2010-06-10 16:56:35 +00:00
|
|
|
rv = FALSE;
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2003-02-01 02:56:48 +00:00
|
|
|
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
2010-06-10 16:56:35 +00:00
|
|
|
if (pvo->pvo_pmap == pmap) {
|
|
|
|
rv = TRUE;
|
|
|
|
break;
|
|
|
|
}
|
2003-02-01 02:56:48 +00:00
|
|
|
if (++loops >= 16)
|
|
|
|
break;
|
|
|
|
}
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2010-06-10 16:56:35 +00:00
|
|
|
return (rv);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed. However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count. This can be a mistake. Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings. Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed. Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired. The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed. To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().
Reviewed by: tegge
MFC after: 6 weeks
2007-11-17 22:52:29 +00:00
|
|
|
/*
|
|
|
|
* Return the number of managed mappings to the given physical page
|
|
|
|
* that are wired.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
|
|
|
{
|
|
|
|
struct pvo_entry *pvo;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 0;
|
2011-08-09 21:01:36 +00:00
|
|
|
if ((m->oflags & VPO_UNMANAGED) != 0)
|
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed. However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count. This can be a mistake. Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings. Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed. Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired. The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed. To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().
Reviewed by: tegge
MFC after: 6 weeks
2007-11-17 22:52:29 +00:00
|
|
|
return (count);
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed. However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count. This can be a mistake. Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings. Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed. Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired. The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed. To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().
Reviewed by: tegge
MFC after: 6 weeks
2007-11-17 22:52:29 +00:00
|
|
|
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
|
|
|
|
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
|
|
|
|
count++;
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
Prevent the leakage of wired pages in the following circumstances:
First, a file is mmap(2)ed and then mlock(2)ed. Later, it is truncated.
Under "normal" circumstances, i.e., when the file is not mlock(2)ed, the
pages beyond the EOF are unmapped and freed. However, when the file is
mlock(2)ed, the pages beyond the EOF are unmapped but not freed because
they have a non-zero wire count. This can be a mistake. Specifically,
it is a mistake if the sole reason why the pages are wired is because of
wired, managed mappings. Previously, unmapping the pages destroys these
wired, managed mappings, but does not reduce the pages' wire count.
Consequently, when the file is unmapped, the pages are not unwired
because the wired mapping has been destroyed. Moreover, when the vm
object is finally destroyed, the pages are leaked because they are still
wired. The fix is to reduce the pages' wired count by the number of
wired, managed mappings destroyed. To do this, I introduce a new pmap
function pmap_page_wired_mappings() that returns the number of managed
mappings to the given physical page that are wired, and I use this
function in vm_object_page_remove().
Reviewed by: tegge
MFC after: 6 weeks
2007-11-17 22:52:29 +00:00
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
static u_int moea_vsidcontext;
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pinit(mmu_t mmu, pmap_t pmap)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
int i, mask;
|
|
|
|
u_int entropy;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK_INIT(pmap);
|
2012-05-20 14:33:28 +00:00
|
|
|
RB_INIT(&pmap->pmap_pvo);
|
2004-03-02 06:49:21 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
entropy = 0;
|
|
|
|
__asm __volatile("mftb %0" : "=r"(entropy));
|
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap))
|
|
|
|
== NULL) {
|
|
|
|
pmap->pmap_phys = pmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-09-06 23:07:58 +00:00
|
|
|
mtx_lock(&moea_vsid_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Allocate some segment registers for this pmap.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < NPMAPS; i += VSID_NBPW) {
|
|
|
|
u_int hash, n;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new value by mutiplying by a prime and adding in
|
|
|
|
* entropy from the timebase register. This is to make the
|
|
|
|
* VSID more random so that the PT hash function collides
|
|
|
|
* less often. (Note that the prime casues gcc to do shifts
|
|
|
|
* instead of a multiply.)
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy;
|
|
|
|
hash = moea_vsidcontext & (NPMAPS - 1);
|
2002-02-14 01:39:11 +00:00
|
|
|
if (hash == 0) /* 0 is special, avoid it */
|
|
|
|
continue;
|
|
|
|
n = hash >> 5;
|
|
|
|
mask = 1 << (hash & (VSID_NBPW - 1));
|
2005-11-08 06:49:45 +00:00
|
|
|
hash = (moea_vsidcontext & 0xfffff);
|
|
|
|
if (moea_vsid_bitmap[n] & mask) { /* collision? */
|
2002-02-14 01:39:11 +00:00
|
|
|
/* anything free in this bucket? */
|
2005-11-08 06:49:45 +00:00
|
|
|
if (moea_vsid_bitmap[n] == 0xffffffff) {
|
|
|
|
entropy = (moea_vsidcontext >> 20);
|
2002-02-14 01:39:11 +00:00
|
|
|
continue;
|
|
|
|
}
|
2010-09-08 16:58:06 +00:00
|
|
|
i = ffs(~moea_vsid_bitmap[n]) - 1;
|
2002-02-14 01:39:11 +00:00
|
|
|
mask = 1 << i;
|
|
|
|
hash &= 0xfffff & ~(VSID_NBPW - 1);
|
|
|
|
hash |= i;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2011-11-17 15:48:12 +00:00
|
|
|
KASSERT(!(moea_vsid_bitmap[n] & mask),
|
|
|
|
("Allocating in-use VSID group %#x\n", hash));
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_vsid_bitmap[n] |= mask;
|
2002-02-14 01:39:11 +00:00
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
pmap->pm_sr[i] = VSID_MAKE(i, hash);
|
2010-09-06 23:07:58 +00:00
|
|
|
mtx_unlock(&moea_vsid_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
return;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2010-09-06 23:07:58 +00:00
|
|
|
mtx_unlock(&moea_vsid_mutex);
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pinit: out of segments");
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Initialize the pmap associated with process 0.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pinit0(mmu_t mmu, pmap_t pm)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pinit(mmu, pm);
|
2002-02-14 01:39:11 +00:00
|
|
|
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-04-16 12:13:10 +00:00
|
|
|
/*
|
|
|
|
* Set the physical protection on the specified range of this map as requested.
|
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
|
|
|
vm_prot_t prot)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2012-05-20 14:33:28 +00:00
|
|
|
struct pvo_entry *pvo, *tpvo, key;
|
2002-04-16 12:13:10 +00:00
|
|
|
struct pte *pt;
|
|
|
|
|
|
|
|
KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
|
2005-11-08 06:49:45 +00:00
|
|
|
("moea_protect: non current pmap"));
|
2002-04-16 12:13:10 +00:00
|
|
|
|
|
|
|
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_remove(mmu, pm, sva, eva);
|
2002-04-16 12:13:10 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK(pm);
|
2012-05-20 14:33:28 +00:00
|
|
|
key.pvo_vaddr = sva;
|
|
|
|
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
|
|
|
|
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
|
|
|
|
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
|
2002-04-16 12:13:10 +00:00
|
|
|
if ((prot & VM_PROT_EXECUTE) == 0)
|
|
|
|
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Grab the PTE pointer before we diddle with the cached PTE
|
|
|
|
* copy.
|
|
|
|
*/
|
2012-05-20 14:33:28 +00:00
|
|
|
pt = moea_pvo_to_pte(pvo, -1);
|
2002-04-16 12:13:10 +00:00
|
|
|
/*
|
|
|
|
* Change the protection of the page.
|
|
|
|
*/
|
2008-09-23 03:02:57 +00:00
|
|
|
pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
|
|
|
|
pvo->pvo_pte.pte.pte_lo |= PTE_BR;
|
2002-04-16 12:13:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PVO is in the page table, update that pte as well.
|
|
|
|
*/
|
2006-06-25 19:07:01 +00:00
|
|
|
if (pt != NULL) {
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
|
|
|
}
|
2002-04-16 12:13:10 +00:00
|
|
|
}
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(pm);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-28 02:54:16 +00:00
|
|
|
/*
|
|
|
|
* Map a list of wired pages into kernel virtual address space. This is
|
|
|
|
* intended for temporary mappings which do not need page modification or
|
|
|
|
* references recorded. Existing mappings in the region are overwritten.
|
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2003-02-01 02:56:48 +00:00
|
|
|
vm_offset_t va;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2003-02-01 02:56:48 +00:00
|
|
|
va = sva;
|
|
|
|
while (count-- > 0) {
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
|
2003-02-01 02:56:48 +00:00
|
|
|
va += PAGE_SIZE;
|
|
|
|
m++;
|
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-28 02:54:16 +00:00
|
|
|
/*
|
|
|
|
* Remove page mappings from kernel virtual address space. Intended for
|
2005-11-08 06:49:45 +00:00
|
|
|
* temporary mappings entered by moea_qenter.
|
2002-02-28 02:54:16 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_qremove(mmu_t mmu, vm_offset_t sva, int count)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2003-02-01 02:56:48 +00:00
|
|
|
vm_offset_t va;
|
2002-02-28 02:54:16 +00:00
|
|
|
|
2003-02-01 02:56:48 +00:00
|
|
|
va = sva;
|
|
|
|
while (count-- > 0) {
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_kremove(mmu, va);
|
2003-02-01 02:56:48 +00:00
|
|
|
va += PAGE_SIZE;
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_release(mmu_t mmu, pmap_t pmap)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2002-09-19 04:36:20 +00:00
|
|
|
int idx, mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free segment register's VSID
|
|
|
|
*/
|
|
|
|
if (pmap->pm_sr[0] == 0)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_release");
|
2002-09-19 04:36:20 +00:00
|
|
|
|
2010-09-06 23:07:58 +00:00
|
|
|
mtx_lock(&moea_vsid_mutex);
|
2002-09-19 04:36:20 +00:00
|
|
|
idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
|
|
|
|
mask = 1 << (idx % VSID_NBPW);
|
|
|
|
idx /= VSID_NBPW;
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_vsid_bitmap[idx] &= ~mask;
|
2010-09-06 23:07:58 +00:00
|
|
|
mtx_unlock(&moea_vsid_mutex);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK_DESTROY(pmap);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-28 02:54:16 +00:00
|
|
|
/*
|
|
|
|
* Remove the given range of addresses from the specified map.
|
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2012-05-20 14:33:28 +00:00
|
|
|
struct pvo_entry *pvo, *tpvo, key;
|
2002-02-28 02:54:16 +00:00
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_LOCK(pm);
|
2012-05-20 14:33:28 +00:00
|
|
|
key.pvo_vaddr = sva;
|
|
|
|
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
|
|
|
|
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
|
|
|
|
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
|
|
|
|
moea_pvo_remove(pvo, -1);
|
2002-02-28 02:54:16 +00:00
|
|
|
}
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(pm);
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2003-02-01 02:56:48 +00:00
|
|
|
/*
|
2005-11-08 06:49:45 +00:00
|
|
|
* Remove physical page from all pmaps in which it resides. moea_pvo_remove()
|
2003-02-01 02:56:48 +00:00
|
|
|
* will reflect changes in pte's back to the vm_page.
|
|
|
|
*/
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_remove_all(mmu_t mmu, vm_page_t m)
|
2003-02-01 02:56:48 +00:00
|
|
|
{
|
|
|
|
struct pvo_head *pvo_head;
|
|
|
|
struct pvo_entry *pvo, *next_pvo;
|
2004-08-26 04:15:36 +00:00
|
|
|
pmap_t pmap;
|
2003-02-01 02:56:48 +00:00
|
|
|
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wlock(&pvh_global_lock);
|
2003-02-01 02:56:48 +00:00
|
|
|
pvo_head = vm_page_to_pvoh(m);
|
|
|
|
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
|
|
|
|
next_pvo = LIST_NEXT(pvo, pvo_vlink);
|
2004-08-05 12:44:12 +00:00
|
|
|
|
2004-08-26 04:15:36 +00:00
|
|
|
pmap = pvo->pvo_pmap;
|
|
|
|
PMAP_LOCK(pmap);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_remove(pvo, -1);
|
2004-08-26 04:15:36 +00:00
|
|
|
PMAP_UNLOCK(pmap);
|
2003-02-01 02:56:48 +00:00
|
|
|
}
|
2012-07-10 22:10:21 +00:00
|
|
|
if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
|
2010-06-05 18:24:41 +00:00
|
|
|
moea_attr_clear(m, PTE_CHG);
|
2010-02-18 15:00:43 +00:00
|
|
|
vm_page_dirty(m);
|
|
|
|
}
|
2011-09-06 10:30:11 +00:00
|
|
|
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
2012-07-06 02:18:49 +00:00
|
|
|
rw_wunlock(&pvh_global_lock);
|
2003-02-01 02:56:48 +00:00
|
|
|
}
|
|
|
|
|
2001-06-10 02:39:37 +00:00
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Allocate a physical page of memory directly from the phys_avail map.
|
2005-11-08 06:49:45 +00:00
|
|
|
* Can only be called from moea_bootstrap before avail start and end are
|
2002-02-14 01:39:11 +00:00
|
|
|
* calculated.
|
2001-06-10 02:39:37 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
static vm_offset_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_bootstrap_alloc(vm_size_t size, u_int align)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
vm_offset_t s, e;
|
|
|
|
int i, j;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
size = round_page(size);
|
|
|
|
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
|
|
|
if (align != 0)
|
|
|
|
s = (phys_avail[i] + align - 1) & ~(align - 1);
|
|
|
|
else
|
|
|
|
s = phys_avail[i];
|
|
|
|
e = s + size;
|
|
|
|
|
|
|
|
if (s < phys_avail[i] || e > phys_avail[i + 1])
|
|
|
|
continue;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
if (s == phys_avail[i]) {
|
|
|
|
phys_avail[i] += size;
|
|
|
|
} else if (e == phys_avail[i + 1]) {
|
|
|
|
phys_avail[i + 1] -= size;
|
|
|
|
} else {
|
|
|
|
for (j = phys_avail_count * 2; j > i; j -= 2) {
|
|
|
|
phys_avail[j] = phys_avail[j - 2];
|
|
|
|
phys_avail[j + 1] = phys_avail[j - 1];
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_avail[i + 3] = phys_avail[i + 1];
|
|
|
|
phys_avail[i + 1] = s;
|
|
|
|
phys_avail[i + 2] = e;
|
|
|
|
phys_avail_count++;
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
return (s);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_bootstrap_alloc: could not allocate memory");
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_syncicache(vm_offset_t pa, vm_size_t len)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
__syncicache((void *)pa, len);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
|
2002-02-14 01:39:11 +00:00
|
|
|
vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pvo_entry *pvo;
|
|
|
|
u_int sr;
|
|
|
|
int first;
|
|
|
|
u_int ptegidx;
|
|
|
|
int i;
|
2002-09-19 04:36:20 +00:00
|
|
|
int bootstrap;
|
2001-07-27 01:08:59 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_enter_calls++;
|
2002-05-09 14:09:19 +00:00
|
|
|
first = 0;
|
2002-09-19 04:36:20 +00:00
|
|
|
bootstrap = 0;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Compute the PTE Group index.
|
|
|
|
*/
|
|
|
|
va &= ~ADDR_POFF;
|
|
|
|
sr = va_to_sr(pm->pm_sr, va);
|
|
|
|
ptegidx = va_to_pteg(sr, va);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Remove any existing mapping for this page. Reuse the pvo entry if
|
|
|
|
* there is a mapping.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_lock(&moea_table_mutex);
|
|
|
|
LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
|
2008-09-23 03:02:57 +00:00
|
|
|
if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa &&
|
|
|
|
(pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
|
2002-05-10 06:27:08 +00:00
|
|
|
(pte_lo & PTE_PP)) {
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-03-17 23:58:12 +00:00
|
|
|
return (0);
|
2002-05-10 06:27:08 +00:00
|
|
|
}
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_remove(pvo, -1);
|
2002-02-14 01:39:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* If we aren't overwriting a mapping, try to allocate.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
if (moea_initialized) {
|
2002-03-21 01:11:31 +00:00
|
|
|
pvo = uma_zalloc(zone, M_NOWAIT);
|
2002-03-17 23:58:12 +00:00
|
|
|
} else {
|
2005-11-08 06:49:45 +00:00
|
|
|
if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
|
|
|
|
panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
|
|
|
|
moea_bpvo_pool_index, BPVO_POOL_SIZE,
|
2002-06-29 09:43:59 +00:00
|
|
|
BPVO_POOL_SIZE * sizeof(struct pvo_entry));
|
2002-03-17 23:58:12 +00:00
|
|
|
}
|
2005-11-08 06:49:45 +00:00
|
|
|
pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
|
|
|
|
moea_bpvo_pool_index++;
|
2002-09-19 04:36:20 +00:00
|
|
|
bootstrap = 1;
|
2002-03-17 23:58:12 +00:00
|
|
|
}
|
2001-09-20 00:47:17 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pvo == NULL) {
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
2001-09-20 00:47:17 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_entries++;
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo->pvo_vaddr = va;
|
|
|
|
pvo->pvo_pmap = pm;
|
2005-11-08 06:49:45 +00:00
|
|
|
LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo->pvo_vaddr &= ~ADDR_POFF;
|
|
|
|
if (flags & VM_PROT_EXECUTE)
|
|
|
|
pvo->pvo_vaddr |= PVO_EXECUTABLE;
|
|
|
|
if (flags & PVO_WIRED)
|
|
|
|
pvo->pvo_vaddr |= PVO_WIRED;
|
2005-11-08 06:49:45 +00:00
|
|
|
if (pvo_head != &moea_pvo_kunmanaged)
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo->pvo_vaddr |= PVO_MANAGED;
|
2002-09-19 04:36:20 +00:00
|
|
|
if (bootstrap)
|
|
|
|
pvo->pvo_vaddr |= PVO_BOOTSTRAP;
|
2005-02-25 02:42:15 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo);
|
2001-09-20 00:47:17 +00:00
|
|
|
|
2011-12-11 17:19:48 +00:00
|
|
|
/*
|
|
|
|
* Add to pmap list
|
|
|
|
*/
|
2012-05-20 14:33:28 +00:00
|
|
|
RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
|
2011-12-11 17:19:48 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Remember if the list was empty and therefore will be the first
|
|
|
|
* item.
|
|
|
|
*/
|
2002-05-09 14:09:19 +00:00
|
|
|
if (LIST_FIRST(pvo_head) == NULL)
|
|
|
|
first = 1;
|
2002-02-14 01:39:11 +00:00
|
|
|
LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
|
2005-02-25 02:42:15 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED)
|
2004-08-28 20:27:12 +00:00
|
|
|
pm->pm_stats.wired_count++;
|
|
|
|
pm->pm_stats.resident_count++;
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We hope this succeeds but it isn't required.
|
|
|
|
*/
|
2008-09-23 03:02:57 +00:00
|
|
|
i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
|
2002-02-14 01:39:11 +00:00
|
|
|
if (i >= 0) {
|
|
|
|
PVO_PTEGIDX_SET(pvo, i);
|
|
|
|
} else {
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pvo_enter: overflow");
|
|
|
|
moea_pte_overflow++;
|
2001-09-20 00:47:17 +00:00
|
|
|
}
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2005-02-25 02:42:15 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
return (first ? ENOENT : 0);
|
2001-09-20 00:47:17 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
|
2001-09-20 00:47:17 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pte *pt;
|
2001-09-20 00:47:17 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* If there is an active pte entry, we need to deactivate it (and
|
|
|
|
* save the ref & cfg bits).
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
pt = moea_pvo_to_pte(pvo, pteidx);
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pt != NULL) {
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
PVO_PTEGIDX_CLR(pvo);
|
|
|
|
} else {
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_overflow--;
|
2005-02-25 02:42:15 +00:00
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update our statistics.
|
|
|
|
*/
|
|
|
|
pvo->pvo_pmap->pm_stats.resident_count--;
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED)
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo->pvo_pmap->pm_stats.wired_count--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the REF/CHG bits into their cache if the page is managed.
|
|
|
|
*/
|
2011-08-09 21:01:36 +00:00
|
|
|
if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
|
2002-02-14 01:39:11 +00:00
|
|
|
struct vm_page *pg;
|
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pg != NULL) {
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo &
|
2002-02-14 01:39:11 +00:00
|
|
|
(PTE_REF | PTE_CHG));
|
2001-09-20 00:47:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
2011-12-11 17:19:48 +00:00
|
|
|
* Remove this PVO from the PV and pmap lists.
|
2002-02-14 01:39:11 +00:00
|
|
|
*/
|
|
|
|
LIST_REMOVE(pvo, pvo_vlink);
|
2012-05-20 14:33:28 +00:00
|
|
|
RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Remove this from the overflow list and return it to the pool
|
|
|
|
* if we aren't going to reuse it.
|
|
|
|
*/
|
|
|
|
LIST_REMOVE(pvo, pvo_olink);
|
2002-03-17 23:58:12 +00:00
|
|
|
if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
|
2005-11-08 06:49:45 +00:00
|
|
|
uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone :
|
|
|
|
moea_upvo_zone, pvo);
|
|
|
|
moea_pvo_entries--;
|
|
|
|
moea_pvo_remove_calls++;
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static __inline int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
int pteidx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can find the actual pte entry without searching by grabbing
|
|
|
|
* the PTEG index from 3 unused bits in pte_lo[11:9] and by
|
|
|
|
* noticing the HID bit.
|
|
|
|
*/
|
|
|
|
pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo->pvo_pte.pte.pte_hi & PTE_HID)
|
2005-11-08 06:49:45 +00:00
|
|
|
pteidx ^= moea_pteg_mask * 8;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
return (pteidx);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static struct pvo_entry *
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pvo_entry *pvo;
|
|
|
|
int ptegidx;
|
|
|
|
u_int sr;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
va &= ~ADDR_POFF;
|
|
|
|
sr = va_to_sr(pm->pm_sr, va);
|
|
|
|
ptegidx = va_to_pteg(sr, va);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_lock(&moea_table_mutex);
|
|
|
|
LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
|
|
|
|
if (pteidx_p)
|
2005-11-08 06:49:45 +00:00
|
|
|
*pteidx_p = moea_pvo_pte_index(pvo, ptegidx);
|
2004-08-30 21:39:22 +00:00
|
|
|
break;
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
|
|
|
}
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2004-08-30 21:39:22 +00:00
|
|
|
return (pvo);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static struct pte *
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
|
2001-06-10 02:39:37 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pte *pt;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* If we haven't been supplied the ptegidx, calculate it.
|
|
|
|
*/
|
|
|
|
if (pteidx == -1) {
|
|
|
|
int ptegidx;
|
|
|
|
u_int sr;
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
|
|
|
|
ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
|
2005-11-08 06:49:45 +00:00
|
|
|
pteidx = moea_pvo_pte_index(pvo, ptegidx);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7];
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_lock(&moea_table_mutex);
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no "
|
2002-02-14 01:39:11 +00:00
|
|
|
"valid pte index", pvo);
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo "
|
2002-02-14 01:39:11 +00:00
|
|
|
"pvo but no valid pte", pvo);
|
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
|
|
|
|
if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) {
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pvo_to_pte: pvo %p has valid pte in "
|
|
|
|
"moea_pteg_table %p but invalid in pvo", pvo, pt);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF))
|
2002-02-14 01:39:11 +00:00
|
|
|
!= 0) {
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pvo_to_pte: pvo %p pte does not match "
|
|
|
|
"pte %p in moea_pteg_table", pvo, pt);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (pt);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) {
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pvo_to_pte: pvo %p has invalid pte %p in "
|
|
|
|
"moea_pteg_table but valid in pvo", pvo, pt);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2001-06-10 02:39:37 +00:00
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (NULL);
|
2001-06-10 02:39:37 +00:00
|
|
|
}
|
2001-06-27 12:20:48 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* XXX: THIS STUFF SHOULD BE IN pte.c?
|
2001-06-27 12:20:48 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_spill(vm_offset_t addr)
|
2001-06-27 12:20:48 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pvo_entry *source_pvo, *victim_pvo;
|
|
|
|
struct pvo_entry *pvo;
|
|
|
|
int ptegidx, i, j;
|
|
|
|
u_int sr;
|
|
|
|
struct pteg *pteg;
|
|
|
|
struct pte *pt;
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_spills++;
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2002-04-16 12:07:41 +00:00
|
|
|
sr = mfsrin(addr);
|
2002-02-14 01:39:11 +00:00
|
|
|
ptegidx = va_to_pteg(sr, addr);
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Have to substitute some entry. Use the primary hash for this.
|
|
|
|
* Use low bits of timebase as random generator.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
pteg = &moea_pteg_table[ptegidx];
|
|
|
|
mtx_lock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
__asm __volatile("mftb %0" : "=r"(i));
|
|
|
|
i &= 7;
|
|
|
|
pt = &pteg->pt[i];
|
|
|
|
|
|
|
|
source_pvo = NULL;
|
|
|
|
victim_pvo = NULL;
|
2005-11-08 06:49:45 +00:00
|
|
|
LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
|
2001-06-27 12:20:48 +00:00
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* We need to find a pvo entry for this address.
|
2001-06-27 12:20:48 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
if (source_pvo == NULL &&
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_match(&pvo->pvo_pte.pte, sr, addr,
|
|
|
|
pvo->pvo_pte.pte.pte_hi & PTE_HID)) {
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Now found an entry to be spilled into the pteg.
|
|
|
|
* The PTE is now valid, so we know it's active.
|
|
|
|
*/
|
2008-09-23 03:02:57 +00:00
|
|
|
j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
if (j >= 0) {
|
|
|
|
PVO_PTEGIDX_SET(pvo, j);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_overflow--;
|
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
source_pvo = pvo;
|
|
|
|
|
|
|
|
if (victim_pvo != NULL)
|
|
|
|
break;
|
|
|
|
}
|
2001-06-27 12:20:48 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* We also need the pvo entry of the victim we are replacing
|
|
|
|
* so save the R & C bits of the PTE.
|
2001-06-27 12:20:48 +00:00
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
|
2002-02-14 01:39:11 +00:00
|
|
|
victim_pvo = pvo;
|
|
|
|
if (source_pvo != NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-08-30 21:39:22 +00:00
|
|
|
if (source_pvo == NULL) {
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (0);
|
2004-08-30 21:39:22 +00:00
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
if (victim_pvo == NULL) {
|
|
|
|
if ((pt->pte_hi & PTE_HID) == 0)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pte_spill: victim p-pte (%p) has no pvo"
|
2002-02-14 01:39:11 +00:00
|
|
|
"entry", pt);
|
2001-06-27 12:20:48 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* If this is a secondary PTE, we need to search it's primary
|
|
|
|
* pvo bucket for the matching PVO.
|
2001-06-27 12:20:48 +00:00
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask],
|
2002-02-14 01:39:11 +00:00
|
|
|
pvo_olink) {
|
|
|
|
/*
|
|
|
|
* We also need the pvo entry of the victim we are
|
|
|
|
* replacing so save the R & C bits of the PTE.
|
|
|
|
*/
|
2008-09-23 03:02:57 +00:00
|
|
|
if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
|
2002-02-14 01:39:11 +00:00
|
|
|
victim_pvo = pvo;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
if (victim_pvo == NULL)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pte_spill: victim s-pte (%p) has no pvo"
|
2002-02-14 01:39:11 +00:00
|
|
|
"entry", pt);
|
|
|
|
}
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* We are invalidating the TLB entry for the EA we are replacing even
|
|
|
|
* though it's valid. If we don't, we lose any ref/chg bit changes
|
|
|
|
* contained in the TLB entry.
|
|
|
|
*/
|
2008-09-23 03:02:57 +00:00
|
|
|
source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID;
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
|
|
|
|
moea_pte_set(pt, &source_pvo->pvo_pte.pte);
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
PVO_PTEGIDX_CLR(victim_pvo);
|
|
|
|
PVO_PTEGIDX_SET(source_pvo, i);
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_replacements++;
|
2001-06-27 12:20:48 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
|
|
|
struct pte *pt;
|
|
|
|
int i;
|
|
|
|
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_assert(&moea_table_mutex, MA_OWNED);
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* First try primary hash.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
|
2002-02-14 01:39:11 +00:00
|
|
|
if ((pt->pte_hi & PTE_VALID) == 0) {
|
|
|
|
pvo_pt->pte_hi &= ~PTE_HID;
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_set(pt, pvo_pt);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (i);
|
|
|
|
}
|
2001-06-27 12:20:48 +00:00
|
|
|
}
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now try secondary hash.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
ptegidx ^= moea_pteg_mask;
|
2006-12-20 01:10:21 +00:00
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
|
2002-02-14 01:39:11 +00:00
|
|
|
if ((pt->pte_hi & PTE_VALID) == 0) {
|
|
|
|
pvo_pt->pte_hi |= PTE_HID;
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_set(pt, pvo_pt);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_pte_insert: overflow");
|
2002-02-14 01:39:11 +00:00
|
|
|
return (-1);
|
2001-06-27 12:20:48 +00:00
|
|
|
}
|
2001-10-14 08:38:16 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
static boolean_t
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_query_bit(vm_page_t m, int ptebit)
|
2001-10-14 08:38:16 +00:00
|
|
|
{
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pvo_entry *pvo;
|
|
|
|
struct pte *pt;
|
2001-10-14 08:38:16 +00:00
|
|
|
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
2005-11-08 06:49:45 +00:00
|
|
|
if (moea_attr_fetch(m) & ptebit)
|
2002-02-14 01:39:11 +00:00
|
|
|
return (TRUE);
|
2001-10-14 08:38:16 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
2001-10-14 08:38:16 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* See if we saved the bit off. If so, cache it and return
|
|
|
|
* success.
|
|
|
|
*/
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_attr_save(m, ptebit);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No luck, now go through the hard part of looking at the PTEs
|
|
|
|
* themselves. Sync so that any pending REF/CHG bits are flushed to
|
|
|
|
* the PTEs.
|
|
|
|
*/
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2002-02-14 01:39:11 +00:00
|
|
|
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if this pvo has a valid PTE. if so, fetch the
|
|
|
|
* REF/CHG bits from the valid PTE. If the appropriate
|
|
|
|
* ptebit is set, cache it and return success.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
pt = moea_pvo_to_pte(pvo, -1);
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pt != NULL) {
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_synch(pt, &pvo->pvo_pte.pte);
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2008-09-23 03:02:57 +00:00
|
|
|
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_attr_save(m, ptebit);
|
2002-02-14 01:39:11 +00:00
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-12-09 14:47:33 +00:00
|
|
|
return (FALSE);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
|
|
|
|
2003-02-01 02:56:48 +00:00
|
|
|
static u_int
|
2010-06-10 16:56:35 +00:00
|
|
|
moea_clear_bit(vm_page_t m, int ptebit)
|
2002-02-14 01:39:11 +00:00
|
|
|
{
|
2003-02-01 02:56:48 +00:00
|
|
|
u_int count;
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pvo_entry *pvo;
|
|
|
|
struct pte *pt;
|
2010-06-10 16:56:35 +00:00
|
|
|
|
2012-07-10 22:10:21 +00:00
|
|
|
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the cached value.
|
|
|
|
*/
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_attr_clear(m, ptebit);
|
2001-10-14 08:38:16 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Sync so that any pending REF/CHG bits are flushed to the PTEs (so
|
|
|
|
* we can reset the right ones). note that since the pvo entries and
|
|
|
|
* list heads are accessed via BAT0 and are never placed in the page
|
|
|
|
* table, we don't have to worry about further accesses setting the
|
|
|
|
* REF/CHG bits.
|
|
|
|
*/
|
2008-09-16 19:16:33 +00:00
|
|
|
powerpc_sync();
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For each pvo entry, clear the pvo's ptebit. If this pvo has a
|
|
|
|
* valid pte clear the ptebit from the valid pte.
|
|
|
|
*/
|
2003-02-01 02:56:48 +00:00
|
|
|
count = 0;
|
2002-02-14 01:39:11 +00:00
|
|
|
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
2005-11-08 06:49:45 +00:00
|
|
|
pt = moea_pvo_to_pte(pvo, -1);
|
2002-02-14 01:39:11 +00:00
|
|
|
if (pt != NULL) {
|
2008-09-23 03:02:57 +00:00
|
|
|
moea_pte_synch(pt, &pvo->pvo_pte.pte);
|
|
|
|
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
|
2003-02-01 02:56:48 +00:00
|
|
|
count++;
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_pte_clear(pt, PVO_VADDR(pvo), ptebit);
|
2003-02-01 02:56:48 +00:00
|
|
|
}
|
2006-06-25 19:07:01 +00:00
|
|
|
mtx_unlock(&moea_table_mutex);
|
2002-02-14 01:39:11 +00:00
|
|
|
}
|
2008-09-23 03:02:57 +00:00
|
|
|
pvo->pvo_pte.pte.pte_lo &= ~ptebit;
|
2001-10-14 08:38:16 +00:00
|
|
|
}
|
|
|
|
|
2003-02-01 02:56:48 +00:00
|
|
|
return (count);
|
2001-10-14 08:38:16 +00:00
|
|
|
}
|
2002-06-29 09:45:59 +00:00
|
|
|
|
2002-09-19 04:36:20 +00:00
|
|
|
/*
|
|
|
|
* Return true if the physical range is encompassed by the battable[idx]
|
|
|
|
*/
|
|
|
|
static int
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
|
2002-09-19 04:36:20 +00:00
|
|
|
{
|
|
|
|
u_int prot;
|
|
|
|
u_int32_t start;
|
|
|
|
u_int32_t end;
|
|
|
|
u_int32_t bat_ble;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return immediately if not a valid mapping
|
|
|
|
*/
|
2010-10-31 15:07:09 +00:00
|
|
|
if (!(battable[idx].batu & BAT_Vs))
|
2002-09-19 04:36:20 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The BAT entry must be cache-inhibited, guarded, and r/w
|
|
|
|
* so it can function as an i/o page
|
|
|
|
*/
|
|
|
|
prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
|
|
|
|
if (prot != (BAT_I|BAT_G|BAT_PP_RW))
|
|
|
|
return (EPERM);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The address should be within the BAT range. Assume that the
|
|
|
|
* start address in the BAT has the correct alignment (thus
|
|
|
|
* not requiring masking)
|
|
|
|
*/
|
|
|
|
start = battable[idx].batl & BAT_PBS;
|
|
|
|
bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
|
|
|
|
end = start | (bat_ble << 15) | 0x7fff;
|
|
|
|
|
|
|
|
if ((pa < start) || ((pa + size) > end))
|
|
|
|
return (ERANGE);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2005-11-08 06:49:45 +00:00
|
|
|
boolean_t
|
2012-05-24 21:13:24 +00:00
|
|
|
moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
2004-08-16 13:07:40 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This currently does not work for entries that
|
|
|
|
* overlap 256M BAT segments.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for(i = 0; i < 16; i++)
|
2005-11-08 06:49:45 +00:00
|
|
|
if (moea_bat_mapped(i, pa, size) == 0)
|
2004-08-16 13:07:40 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
return (EFAULT);
|
|
|
|
}
|
2002-09-19 04:36:20 +00:00
|
|
|
|
2002-06-29 09:45:59 +00:00
|
|
|
/*
|
|
|
|
* Map a set of physical memory pages into the kernel virtual
|
|
|
|
* address space. Return a pointer to where it is mapped. This
|
|
|
|
* routine is intended to be used for mapping device memory,
|
|
|
|
* NOT real memory.
|
|
|
|
*/
|
|
|
|
void *
|
2012-05-24 21:13:24 +00:00
|
|
|
moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
|
2010-09-30 18:14:12 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
|
2002-06-29 09:45:59 +00:00
|
|
|
{
|
2002-09-19 04:36:20 +00:00
|
|
|
vm_offset_t va, tmpva, ppa, offset;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ppa = trunc_page(pa);
|
2002-06-29 09:45:59 +00:00
|
|
|
offset = pa & PAGE_MASK;
|
|
|
|
size = roundup(offset + size, PAGE_SIZE);
|
|
|
|
|
2002-09-19 04:36:20 +00:00
|
|
|
/*
|
|
|
|
* If the physical address lies within a valid BAT table entry,
|
|
|
|
* return the 1:1 mapping. This currently doesn't work
|
|
|
|
* for regions that overlap 256M BAT segments.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 16; i++) {
|
2005-11-08 06:49:45 +00:00
|
|
|
if (moea_bat_mapped(i, pa, size) == 0)
|
2002-09-19 04:36:20 +00:00
|
|
|
return ((void *) pa);
|
|
|
|
}
|
|
|
|
|
2003-08-02 19:26:09 +00:00
|
|
|
va = kmem_alloc_nofault(kernel_map, size);
|
2002-06-29 09:45:59 +00:00
|
|
|
if (!va)
|
2005-11-08 06:49:45 +00:00
|
|
|
panic("moea_mapdev: Couldn't alloc kernel virtual memory");
|
2002-06-29 09:45:59 +00:00
|
|
|
|
|
|
|
for (tmpva = va; size > 0;) {
|
2010-09-30 18:14:12 +00:00
|
|
|
moea_kenter_attr(mmu, tmpva, ppa, ma);
|
2008-09-16 19:16:33 +00:00
|
|
|
tlbie(tmpva);
|
2002-06-29 09:45:59 +00:00
|
|
|
size -= PAGE_SIZE;
|
|
|
|
tmpva += PAGE_SIZE;
|
2002-09-19 04:36:20 +00:00
|
|
|
ppa += PAGE_SIZE;
|
2002-06-29 09:45:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ((void *)(va + offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2005-11-08 06:49:45 +00:00
|
|
|
moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
|
2002-06-29 09:45:59 +00:00
|
|
|
{
|
|
|
|
vm_offset_t base, offset;
|
|
|
|
|
2002-09-19 04:36:20 +00:00
|
|
|
/*
|
|
|
|
* If this is outside kernel virtual space, then it's a
|
|
|
|
* battable entry and doesn't require unmapping
|
|
|
|
*/
|
2010-02-20 16:23:29 +00:00
|
|
|
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) {
|
2002-09-19 04:36:20 +00:00
|
|
|
base = trunc_page(va);
|
|
|
|
offset = va & PAGE_MASK;
|
|
|
|
size = roundup(offset + size, PAGE_SIZE);
|
|
|
|
kmem_free(kernel_map, base, size);
|
|
|
|
}
|
2002-06-29 09:45:59 +00:00
|
|
|
}
|
2009-10-21 18:38:02 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
|
|
|
{
|
|
|
|
struct pvo_entry *pvo;
|
|
|
|
vm_offset_t lim;
|
|
|
|
vm_paddr_t pa;
|
|
|
|
vm_size_t len;
|
|
|
|
|
|
|
|
PMAP_LOCK(pm);
|
|
|
|
while (sz > 0) {
|
|
|
|
lim = round_page(va);
|
|
|
|
len = MIN(lim - va, sz);
|
|
|
|
pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
|
|
|
|
if (pvo != NULL) {
|
|
|
|
pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
|
|
|
|
(va & ADDR_POFF);
|
|
|
|
moea_syncicache(pa, len);
|
|
|
|
}
|
|
|
|
va += len;
|
|
|
|
sz -= len;
|
|
|
|
}
|
|
|
|
PMAP_UNLOCK(pm);
|
|
|
|
}
|