freebsd-skq/sys/powerpc/aim/moea64_if.m
Nathan Whitehorn 827cc9b981 New pmap implementation for 64-bit PowerPC processors. The main focus of
this change is to improve concurrency:
- Drop global state stored in the shadow overflow page table (and all other
  global state)
- Remove all global locks
- Use per-PTE lock bits to allow parallel page insertion
- Reconstruct state when requested for evicted PTEs instead of buffering
  it during overflow

This drops total wall time for make buildworld on a 32-thread POWER8 system
by a factor of two and system time by a factor of three, providing performance
20% better than similarly clocked Core i7 Xeons per-core. Performance on
smaller SMP systems, where PMAP lock contention was not as much of an issue,
is nearly unchanged.

Tested on:	POWER8, POWER5+, G5 UP, G5 SMP (64-bit and 32-bit kernels)
Merged from:	user/nwhitehorn/ppc64-pmap-rework
Looked over by:	jhibbits, andreast
MFC after:	3 months
Relnotes:	yes
Sponsored by:	FreeBSD Foundation
2015-02-24 21:37:20 +00:00

122 lines
3.7 KiB
Objective-C

#-
# Copyright (c) 2010,2015 Nathan Whitehorn
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <machine/mmuvar.h>
/**
* MOEA64 kobj methods for 64-bit Book-S page table
* manipulation routines used, for example, by hypervisors.
*/
INTERFACE moea64;
CODE {
static moea64_pte_replace_t moea64_pte_replace_default;
static int64_t moea64_pte_replace_default(mmu_t mmu,
struct pvo_entry *pvo, int flags)
{
int64_t refchg;
refchg = MOEA64_PTE_UNSET(mmu, pvo);
MOEA64_PTE_INSERT(mmu, pvo);
return (refchg);
}
}
/**
* Return ref/changed bits from PTE referenced by _pvo if _pvo is currently in
* the page table. Returns -1 if _pvo not currently present in the page table.
*/
METHOD int64_t pte_synch {
mmu_t _mmu;
struct pvo_entry *_pvo;
};
/**
* Clear bits ptebit (a mask) from the low word of the PTE referenced by
* _pvo. Return previous values of ref/changed bits or -1 if _pvo is not
* currently in the page table.
*/
METHOD int64_t pte_clear {
mmu_t _mmu;
struct pvo_entry *_pvo;
uint64_t _ptebit;
};
/**
* Invalidate the PTE referenced by _pvo, returning its ref/changed bits.
* Returns -1 if PTE not currently present in page table.
*/
METHOD int64_t pte_unset {
mmu_t _mmu;
struct pvo_entry *_pvo;
};
/**
* Update the reference PTE to correspond to the contents of _pvo. Has the
* same ref/changed semantics as pte_unset() (and should clear R/C bits). May
* change the PVO's location in the page table or return with it unmapped if
* PVO_WIRED is not set. By default, does unset() followed by insert().
*
* _flags is a bitmask describing what level of page invalidation should occur:
* 0 means no invalidation is required
* MOEA64_PTE_PROT_UPDATE signifies that the page protection bits are changing
* MOEA64_PTE_INVALIDATE requires an invalidation of the same strength as
* pte_unset() followed by pte_insert()
*/
METHOD int64_t pte_replace {
mmu_t _mmu;
struct pvo_entry *_pvo;
int _flags;
} DEFAULT moea64_pte_replace_default;
/**
* Insert a PTE corresponding to _pvo into the page table, returning any errors
* encountered and (optionally) setting the PVO slot value to some
* representation of where the entry was placed.
*
* Must not replace PTEs marked LPTE_WIRED. If an existing valid PTE is spilled,
* must synchronize ref/changed bits as in pte_unset().
*/
METHOD int pte_insert {
mmu_t _mmu;
struct pvo_entry *_pvo;
};