Add support to the virtual memory system for configuring machine-

dependent memory attributes:

Rename vm_cache_mode_t to vm_memattr_t.  The new name reflects the
fact that there are machine-dependent memory attributes that have
nothing to do with controlling the cache's behavior.

Introduce vm_object_set_memattr() for setting the default memory
attributes that will be given to an object's pages.

Introduce and use pmap_page_{get,set}_memattr() for getting and
setting a page's machine-dependent memory attributes.  Add full
support for these functions on amd64 and i386 and stubs for them on
the other architectures.  The function pmap_page_set_memattr() is also
responsible for any other machine-dependent aspects of changing a
page's memory attributes, such as flushing the cache or updating the
direct map.  The uses include kmem_alloc_contig(), vm_page_alloc(),
and the device pager:

  kmem_alloc_contig() can now be used to allocate kernel memory with
  non-default memory attributes on amd64 and i386.

  vm_page_alloc() and the device pager will set the memory attributes
  for the real or fictitious page according to the object's default
  memory attributes.

Update the various pmap functions on amd64 and i386 that map pages to
incorporate each page's memory attributes in the mapping.

Notes: (1) Inherent to this design are safety features that prevent
the specification of inconsistent memory attributes by different
mappings on amd64 and i386.  In addition, the device pager provides a
warning when a device driver creates a fictitious page with memory
attributes that are inconsistent with the real page that the
fictitious page is an alias for. (2) Storing the machine-dependent
memory attributes for amd64 and i386 as a dedicated "int" in "struct
md_page" represents a compromise between space efficiency and the ease
of MFCing these changes to RELENG_7.

In collaboration with: jhb

Approved by:	re (kib)
This commit is contained in:
Alan Cox 2009-07-12 23:31:20 +00:00
parent ac60940338
commit 3153e878dd
30 changed files with 277 additions and 108 deletions

View File

@ -614,6 +614,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pat_mode = PAT_WRITE_BACK;
}
/*
@ -1120,7 +1121,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
endpte = pte + count;
while (pte < endpte) {
oldpte |= *pte;
pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | PG_RW | PG_V);
pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
pte++;
ma++;
}
@ -3025,7 +3027,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
/*
* Now validate mapping with desired protection/wiring.
*/
newpte = (pt_entry_t)(pa | PG_V);
newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
vm_page_flag_set(m, PG_WRITEABLE);
@ -3110,7 +3112,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
" in pmap %p", va, pmap);
return (FALSE);
}
newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V;
newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
PG_PS | PG_V;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
newpde |= PG_MANAGED;
@ -3292,7 +3295,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m);
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
if ((prot & VM_PROT_EXECUTE) == 0)
pa |= pg_nx;
@ -3333,6 +3336,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
pd_entry_t *pde;
vm_paddr_t pa, ptepa;
vm_page_t p, pdpg;
int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT(object->type == OBJT_DEVICE,
@ -3343,6 +3347,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
p = vm_page_lookup(object, pindex);
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
pat_mode = p->md.pat_mode;
/*
* Abort the mapping if the first page is not physically
@ -3354,21 +3359,28 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
/*
* Skip the first page. Abort the mapping if the rest of
* the pages are not physically contiguous.
* the pages are not physically contiguous or have differing
* memory attributes.
*/
p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
if (pa != VM_PAGE_TO_PHYS(p))
if (pa != VM_PAGE_TO_PHYS(p) ||
pat_mode != p->md.pat_mode)
return;
p = TAILQ_NEXT(p, listq);
}
/* Map using 2MB pages. */
/*
* Map using 2MB pages. Since "ptepa" is 2M aligned and
* "size" is a multiple of 2M, adding the PAT setting to "pa"
* will not affect the termination of this loop.
*/
PMAP_LOCK(pmap);
for (pa = ptepa; pa < ptepa + size; pa += NBPDR) {
for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
size; pa += NBPDR) {
pdpg = pmap_allocpde(pmap, addr, M_NOWAIT);
if (pdpg == NULL) {
/*
@ -4372,6 +4384,23 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
return (TRUE);
}
/*
* Sets the memory attribute for the specified page.
*/
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
m->md.pat_mode = ma;
/*
* Update the direct mapping and flush the cache.
*/
if (pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
m->md.pat_mode))
panic("memory attribute change on the direct map failed");
}
/*
* Changes the specified virtual address range's memory type to that given by
* the parameter "mode". The specified virtual address range must be

View File

@ -233,6 +233,7 @@ struct pv_chunk;
struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
int pat_mode;
};
/*
@ -299,6 +300,7 @@ extern vm_paddr_t dump_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t *);
@ -312,6 +314,7 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
boolean_t pmap_page_is_mapped(vm_page_t m);
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_invalidate_page(pmap_t, vm_offset_t);
void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);

View File

@ -32,14 +32,14 @@
#include <machine/specialreg.h>
/* Cache control options. */
#define VM_CACHE_UNCACHEABLE ((vm_cache_mode_t)PAT_UNCACHEABLE)
#define VM_CACHE_WRITE_COMBINING ((vm_cache_mode_t)PAT_WRITE_COMBINING)
#define VM_CACHE_WRITE_THROUGH ((vm_cache_mode_t)PAT_WRITE_THROUGH)
#define VM_CACHE_WRITE_PROTECTED ((vm_cache_mode_t)PAT_WRITE_PROTECTED)
#define VM_CACHE_WRITE_BACK ((vm_cache_mode_t)PAT_WRITE_BACK)
#define VM_CACHE_UNCACHED ((vm_cache_mode_t)PAT_UNCACHED)
/* Memory attributes. */
#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)PAT_UNCACHEABLE)
#define VM_MEMATTR_WRITE_COMBINING ((vm_memattr_t)PAT_WRITE_COMBINING)
#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PAT_WRITE_THROUGH)
#define VM_MEMATTR_WRITE_PROTECTED ((vm_memattr_t)PAT_WRITE_PROTECTED)
#define VM_MEMATTR_WRITE_BACK ((vm_memattr_t)PAT_WRITE_BACK)
#define VM_MEMATTR_UNCACHED ((vm_memattr_t)PAT_UNCACHED)
#define VM_CACHE_DEFAULT VM_CACHE_WRITE_BACK
#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
#endif /* !_MACHINE_VM_H_ */

View File

@ -75,7 +75,10 @@
#endif
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_set_memattr(m, ma) (void)0
/*
* Pmap stuff
*/

View File

@ -29,7 +29,7 @@
#ifndef _MACHINE_VM_H_
#define _MACHINE_VM_H_
/* Cache control is not (yet) implemented. */
#define VM_CACHE_DEFAULT 0
/* Memory attribute configuration is not (yet) implemented. */
#define VM_MEMATTR_DEFAULT 0
#endif /* !_MACHINE_VM_H_ */

View File

@ -559,6 +559,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pat_mode = PAT_WRITE_BACK;
}
#ifdef PAE
@ -569,7 +570,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
0xffffffffULL, 1, 0, VM_CACHE_DEFAULT));
0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif
@ -1210,7 +1211,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
endpte = pte + count;
while (pte < endpte) {
oldpte |= *pte;
pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V);
pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
pte++;
ma++;
}
@ -3132,7 +3134,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
/*
* Now validate mapping with desired protection/wiring.
*/
newpte = (pt_entry_t)(pa | PG_V);
newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
if ((prot & VM_PROT_WRITE) != 0) {
newpte |= PG_RW;
vm_page_flag_set(m, PG_WRITEABLE);
@ -3214,7 +3216,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
" in pmap %p", va, pmap);
return (FALSE);
}
newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V;
newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
PG_PS | PG_V;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
newpde |= PG_MANAGED;
@ -3399,7 +3402,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m);
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
pa |= pg_nx;
@ -3442,6 +3445,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
pd_entry_t *pde;
vm_paddr_t pa, ptepa;
vm_page_t p;
int pat_mode;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT(object->type == OBJT_DEVICE,
@ -3453,6 +3457,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
p = vm_page_lookup(object, pindex);
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
pat_mode = p->md.pat_mode;
/*
* Abort the mapping if the first page is not physically
@ -3464,21 +3469,28 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
/*
* Skip the first page. Abort the mapping if the rest of
* the pages are not physically contiguous.
* the pages are not physically contiguous or have differing
* memory attributes.
*/
p = TAILQ_NEXT(p, listq);
for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
pa += PAGE_SIZE) {
KASSERT(p->valid == VM_PAGE_BITS_ALL,
("pmap_object_init_pt: invalid page %p", p));
if (pa != VM_PAGE_TO_PHYS(p))
if (pa != VM_PAGE_TO_PHYS(p) ||
pat_mode != p->md.pat_mode)
return;
p = TAILQ_NEXT(p, listq);
}
/* Map using 2/4MB pages. */
/*
* Map using 2/4MB pages. Since "ptepa" is 2/4M aligned and
* "size" is a multiple of 2/4M, adding the PAT setting to
* "pa" will not affect the termination of this loop.
*/
PMAP_LOCK(pmap);
for (pa = ptepa; pa < ptepa + size; pa += NBPDR) {
for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
size; pa += NBPDR) {
pde = pmap_pde(pmap, addr);
if (*pde == 0) {
pde_store(pde, pa | PG_PS | PG_M | PG_A |
@ -3696,7 +3708,8 @@ pmap_zero_page(vm_page_t m)
if (*sysmaps->CMAP2)
panic("pmap_zero_page: CMAP2 busy");
sched_pin();
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
pmap_cache_bits(m->md.pat_mode, 0);
invlcaddr(sysmaps->CADDR2);
pagezero(sysmaps->CADDR2);
*sysmaps->CMAP2 = 0;
@ -3718,9 +3731,10 @@ pmap_zero_page_area(vm_page_t m, int off, int size)
sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
mtx_lock(&sysmaps->lock);
if (*sysmaps->CMAP2)
panic("pmap_zero_page: CMAP2 busy");
panic("pmap_zero_page_area: CMAP2 busy");
sched_pin();
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
pmap_cache_bits(m->md.pat_mode, 0);
invlcaddr(sysmaps->CADDR2);
if (off == 0 && size == PAGE_SIZE)
pagezero(sysmaps->CADDR2);
@ -3742,9 +3756,10 @@ pmap_zero_page_idle(vm_page_t m)
{
if (*CMAP3)
panic("pmap_zero_page: CMAP3 busy");
panic("pmap_zero_page_idle: CMAP3 busy");
sched_pin();
*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
pmap_cache_bits(m->md.pat_mode, 0);
invlcaddr(CADDR3);
pagezero(CADDR3);
*CMAP3 = 0;
@ -3771,8 +3786,10 @@ pmap_copy_page(vm_page_t src, vm_page_t dst)
sched_pin();
invlpg((u_int)sysmaps->CADDR1);
invlpg((u_int)sysmaps->CADDR2);
*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
pmap_cache_bits(src->md.pat_mode, 0);
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
pmap_cache_bits(dst->md.pat_mode, 0);
bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
*sysmaps->CMAP1 = 0;
*sysmaps->CMAP2 = 0;
@ -4437,6 +4454,22 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size)
kmem_free(kernel_map, base, size);
}
/*
* Sets the memory attribute for the specified page.
*/
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
m->md.pat_mode = ma;
/*
* Flush CPU caches to make sure any data isn't cached that shouldn't
* be, etc.
*/
pmap_invalidate_cache();
}
int
pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
{

View File

@ -389,6 +389,7 @@ struct pv_chunk;
struct md_page {
TAILQ_HEAD(,pv_entry) pv_list;
int pat_mode;
};
struct pmap {
@ -458,6 +459,7 @@ extern char *ptvmmap; /* poor name! */
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t);
@ -470,6 +472,7 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
boolean_t pmap_page_is_mapped(vm_page_t m);
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
void pmap_unmapdev(vm_offset_t, vm_size_t);
pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
void pmap_set_pg(void);

View File

@ -32,14 +32,14 @@
#include <machine/specialreg.h>
/* Cache control options. */
#define VM_CACHE_UNCACHEABLE ((vm_cache_mode_t)PAT_UNCACHEABLE)
#define VM_CACHE_WRITE_COMBINING ((vm_cache_mode_t)PAT_WRITE_COMBINING)
#define VM_CACHE_WRITE_THROUGH ((vm_cache_mode_t)PAT_WRITE_THROUGH)
#define VM_CACHE_WRITE_PROTECTED ((vm_cache_mode_t)PAT_WRITE_PROTECTED)
#define VM_CACHE_WRITE_BACK ((vm_cache_mode_t)PAT_WRITE_BACK)
#define VM_CACHE_UNCACHED ((vm_cache_mode_t)PAT_UNCACHED)
/* Memory attributes. */
#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)PAT_UNCACHEABLE)
#define VM_MEMATTR_WRITE_COMBINING ((vm_memattr_t)PAT_WRITE_COMBINING)
#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PAT_WRITE_THROUGH)
#define VM_MEMATTR_WRITE_PROTECTED ((vm_memattr_t)PAT_WRITE_PROTECTED)
#define VM_MEMATTR_WRITE_BACK ((vm_memattr_t)PAT_WRITE_BACK)
#define VM_MEMATTR_UNCACHED ((vm_memattr_t)PAT_UNCACHED)
#define VM_CACHE_DEFAULT VM_CACHE_WRITE_BACK
#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
#endif /* !_MACHINE_VM_H_ */

View File

@ -605,6 +605,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pat_mode = PAT_WRITE_BACK;
}
#if defined(PAE) && !defined(XEN)
@ -615,7 +616,7 @@ pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
0xffffffffULL, 1, 0, VM_CACHE_DEFAULT));
0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif

View File

@ -118,7 +118,9 @@ extern vm_offset_t virtual_end;
extern uint64_t pmap_vhpt_base[];
extern int pmap_vhpt_log2size;
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_set_memattr(m, ma) (void)0
#define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz)
#define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz)

View File

@ -32,13 +32,13 @@
#include <machine/atomic.h>
#include <machine/pte.h>
/* Cache control options. */
#define VM_CACHE_WRITE_BACK ((vm_cache_mode_t)PTE_MA_WB)
#define VM_CACHE_UNCACHEABLE ((vm_cache_mode_t)PTE_MA_UC)
#define VM_CACHE_UNCACHEABLE_EXPORTED ((vm_cache_mode_t)PTE_MA_UCE)
#define VM_CACHE_WRITE_COMBINING ((vm_cache_mode_t)PTE_MA_WC)
#define VM_CACHE_NATPAGE ((vm_cache_mode_t)PTE_MA_NATPAGE)
/* Memory attributes. */
#define VM_MEMATTR_WRITE_BACK ((vm_memattr_t)PTE_MA_WB)
#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)PTE_MA_UC)
#define VM_MEMATTR_UNCACHEABLE_EXPORTED ((vm_memattr_t)PTE_MA_UCE)
#define VM_MEMATTR_WRITE_COMBINING ((vm_memattr_t)PTE_MA_WC)
#define VM_MEMATTR_NATPAGE ((vm_memattr_t)PTE_MA_NATPAGE)
#define VM_CACHE_DEFAULT VM_CACHE_WRITE_BACK
#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
#endif /* !_MACHINE_VM_H_ */

View File

@ -356,7 +356,7 @@ mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
(vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_CACHE_DEFAULT));
(vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
}
/*

View File

@ -155,7 +155,9 @@ extern vm_paddr_t mips_wired_tlb_physmem_start;
extern vm_paddr_t mips_wired_tlb_physmem_end;
extern u_int need_wired_tlb_page_pool;
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_set_memattr(m, ma) (void)0
void pmap_bootstrap(void);
void *pmap_mapdev(vm_offset_t, vm_size_t);

View File

@ -31,10 +31,10 @@
#include <machine/pte.h>
/* Cache control options. */
#define VM_CACHE_UNCACHED ((vm_cache_mode_t)PTE_UNCACHED)
#define VM_CACHE_CACHEABLE_NONCOHERENT ((vm_cache_mode_t)PTE_CACHE)
/* Memory attributes. */
#define VM_MEMATTR_UNCACHED ((vm_memattr_t)PTE_UNCACHED)
#define VM_MEMATTR_CACHEABLE_NONCOHERENT ((vm_memattr_t)PTE_CACHE)
#define VM_CACHE_DEFAULT VM_CACHE_CACHEABLE_NONCOHERENT
#define VM_MEMATTR_DEFAULT VM_MEMATTR_CACHEABLE_NONCOHERENT
#endif /* !_MACHINE_VM_H_ */

View File

@ -113,7 +113,9 @@ struct md_page {
struct pvo_head mdpg_pvoh;
};
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
#define pmap_page_set_memattr(m, ma) (void)0
#else
@ -143,7 +145,9 @@ struct md_page {
TAILQ_HEAD(, pv_entry) pv_list;
};
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_set_memattr(m, ma) (void)0
#endif /* AIM */

View File

@ -31,10 +31,12 @@
#include <machine/pte.h>
/* Cache control options. */
#define VM_CACHE_INHIBIT ((vm_cache_mode_t)PTE_I)
#define VM_CACHE_WRITE_THROUGH ((vm_cache_mode_t)PTE_W)
/* Memory attributes. */
#define VM_MEMATTR_CACHING_INHIBIT ((vm_memattr_t)PTE_I)
#define VM_MEMATTR_GUARD ((vm_memattr_t)PTE_G)
#define VM_MEMATTR_MEMORY_COHERENCE ((vm_memattr_t)PTE_M)
#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PTE_W)
#define VM_CACHE_DEFAULT 0
#define VM_MEMATTR_DEFAULT 0
#endif /* !_MACHINE_VM_H_ */

View File

@ -77,6 +77,9 @@ struct pmap {
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_set_memattr(m, ma) (void)0
void pmap_bootstrap(void);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kenter(vm_offset_t va, vm_page_t m);

View File

@ -29,7 +29,7 @@
#ifndef _MACHINE_VM_H_
#define _MACHINE_VM_H_
/* Cache control is not (yet) implemented. */
#define VM_CACHE_DEFAULT 0
/* Memory attribute configuration is not (yet) implemented. */
#define VM_MEMATTR_DEFAULT 0
#endif /* !_MACHINE_VM_H_ */

View File

@ -106,7 +106,9 @@ typedef struct pv_entry {
TAILQ_ENTRY(pv_entry) pv_plist;
} *pv_entry_t;
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_set_memattr(m, ma) (void)0
void pmap_bootstrap(vm_offset_t ekva);
vm_paddr_t pmap_kextract(vm_offset_t va);

View File

@ -29,7 +29,7 @@
#ifndef _MACHINE_VM_H_
#define _MACHINE_VM_H_
/* Cache control is not (yet) implemented. */
#define VM_CACHE_DEFAULT 0
/* Memory attribute configuration is not (yet) implemented. */
#define VM_MEMATTR_DEFAULT 0
#endif /* !_MACHINE_VM_H_ */

View File

@ -70,9 +70,9 @@ static struct mtx dev_pager_mtx;
static uma_zone_t fakepg_zone;
static vm_page_t dev_pager_getfake(vm_paddr_t);
static vm_page_t dev_pager_getfake(vm_paddr_t, vm_memattr_t);
static void dev_pager_putfake(vm_page_t);
static void dev_pager_updatefake(vm_page_t, vm_paddr_t);
static void dev_pager_updatefake(vm_page_t, vm_paddr_t, vm_memattr_t);
struct pagerops devicepagerops = {
.pgo_init = dev_pager_init,
@ -210,7 +210,8 @@ dev_pager_getpages(object, m, count, reqpage)
{
vm_pindex_t offset;
vm_paddr_t paddr;
vm_page_t page;
vm_page_t m_paddr, page;
vm_memattr_t memattr;
struct cdev *dev;
int i, ret;
int prot;
@ -222,6 +223,7 @@ dev_pager_getpages(object, m, count, reqpage)
dev = object->handle;
page = m[reqpage];
offset = page->pindex;
memattr = object->memattr;
VM_OBJECT_UNLOCK(object);
csw = dev_refthread(dev);
if (csw == NULL)
@ -235,14 +237,20 @@ dev_pager_getpages(object, m, count, reqpage)
KASSERT(ret == 0, ("dev_pager_getpage: map function returns error"));
td->td_fpop = fpop;
dev_relthread(dev);
/* If "paddr" is a real page, perform a sanity check on "memattr". */
if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
pmap_page_get_memattr(m_paddr) != memattr) {
memattr = pmap_page_get_memattr(m_paddr);
printf(
"WARNING: A device driver has set \"memattr\" inconsistently.\n");
}
if ((page->flags & PG_FICTITIOUS) != 0) {
/*
* If the passed in reqpage page is a fake page, update it with
* the new physical address.
*/
VM_OBJECT_LOCK(object);
dev_pager_updatefake(page, paddr);
dev_pager_updatefake(page, paddr, memattr);
if (count > 1) {
vm_page_lock_queues();
for (i = 0; i < count; i++) {
@ -256,7 +264,7 @@ dev_pager_getpages(object, m, count, reqpage)
* Replace the passed in reqpage page with our own fake page and
* free up the all of the original pages.
*/
page = dev_pager_getfake(paddr);
page = dev_pager_getfake(paddr, memattr);
VM_OBJECT_LOCK(object);
TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
vm_page_lock_queues();
@ -296,47 +304,56 @@ dev_pager_haspage(object, pindex, before, after)
}
/*
* Instantiate a fictitious page. Unlike physical memory pages, only
* the machine-independent fields must be initialized.
* Create a fictitious page with the specified physical address and memory
* attribute.
*/
static vm_page_t
dev_pager_getfake(paddr)
vm_paddr_t paddr;
dev_pager_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
{
vm_page_t m;
m = uma_zalloc(fakepg_zone, M_WAITOK);
m->flags = PG_FICTITIOUS;
m->oflags = VPO_BUSY;
/* Fictitious pages don't use "act_count". */
m->dirty = 0;
m->busy = 0;
m->queue = PQ_NONE;
m->object = NULL;
m->wire_count = 1;
m->hold_count = 0;
m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
m->phys_addr = paddr;
/* Fictitious pages don't use "segind". */
m->flags = PG_FICTITIOUS;
/* Fictitious pages don't use "order" or "pool". */
pmap_page_init(m);
m->oflags = VPO_BUSY;
m->wire_count = 1;
if (memattr != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, memattr);
return (m);
}
/*
* Release a fictitious page.
*/
static void
dev_pager_putfake(m)
vm_page_t m;
dev_pager_putfake(vm_page_t m)
{
if (!(m->flags & PG_FICTITIOUS))
panic("dev_pager_putfake: bad page");
/* Restore the default memory attribute to "phys_addr". */
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
uma_zfree(fakepg_zone, m);
}
/*
* Update the given fictitious page to the specified physical address and
* memory attribute.
*/
static void
dev_pager_updatefake(m, paddr)
vm_page_t m;
vm_paddr_t paddr;
dev_pager_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
{
if (!(m->flags & PG_FICTITIOUS))
panic("dev_pager_updatefake: bad page");
/* Restore the default memory attribute before changing "phys_addr". */
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
m->phys_addr = paddr;
if (memattr != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, memattr);
}

View File

@ -79,10 +79,16 @@ struct pmap_statistics {
};
typedef struct pmap_statistics *pmap_statistics_t;
/*
* Each machine dependent implementation is expected to provide:
*
* vm_memattr_t pmap_page_get_memattr(vm_page_t);
* boolean_t pmap_page_is_mapped(vm_page_t);
* void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
*/
#include <machine/pmap.h>
#ifdef _KERNEL
struct proc;
struct thread;
/*

View File

@ -64,10 +64,10 @@
#include <machine/vm.h>
/*
* The exact set of cache control codes is machine dependent. However, every
* machine is required to define VM_CACHE_DEFAULT.
* The exact set of memory attributes is machine dependent. However, every
* machine is required to define VM_MEMATTR_DEFAULT.
*/
typedef char vm_cache_mode_t; /* cache control codes */
typedef char vm_memattr_t; /* memory attribute codes */
typedef char vm_inherit_t; /* inheritance codes */

View File

@ -194,7 +194,8 @@ vm_page_release_contig(vm_page_t m, vm_pindex_t count)
* before they are mapped.
*/
static vm_offset_t
contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, vm_memattr_t memattr,
int flags)
{
vm_object_t object = kernel_object;
vm_offset_t addr, tmp_addr;
@ -210,6 +211,8 @@ contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
vm_map_unlock(map);
VM_OBJECT_LOCK(object);
for (tmp_addr = addr; tmp_addr < addr + size; tmp_addr += PAGE_SIZE) {
if (memattr != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, memattr);
vm_page_insert(m, object,
OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
@ -236,7 +239,7 @@ contigmalloc(
void *ret;
ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
alignment, boundary, VM_CACHE_DEFAULT);
alignment, boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
malloc_type_allocated(type, round_page(size));
return (ret);
@ -245,7 +248,7 @@ contigmalloc(
vm_offset_t
kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, unsigned long alignment, unsigned long boundary,
vm_cache_mode_t mode)
vm_memattr_t memattr)
{
vm_offset_t ret;
vm_page_t pages;
@ -256,8 +259,7 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
npgs = size >> PAGE_SHIFT;
tries = 0;
retry:
pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary,
mode);
pages = vm_phys_alloc_contig(npgs, low, high, alignment, boundary);
if (pages == NULL) {
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_page_lock_queues();
@ -282,7 +284,7 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
}
ret = 0;
} else {
ret = contigmapping(map, size, pages, flags);
ret = contigmapping(map, size, pages, memattr, flags);
if (ret == 0)
vm_page_release_contig(pages, npgs);
}

View File

@ -43,7 +43,7 @@ int kernacc(void *, int, int);
vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
unsigned long boundary, vm_cache_mode_t mode);
unsigned long boundary, vm_memattr_t memattr);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
void kmem_free(vm_map_t, vm_offset_t, vm_size_t);

View File

@ -222,6 +222,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->size = size;
object->generation = 1;
object->ref_count = 1;
object->memattr = VM_MEMATTR_DEFAULT;
object->flags = 0;
object->uip = NULL;
object->charge = 0;
@ -290,6 +291,36 @@ vm_object_clear_flag(vm_object_t object, u_short bits)
object->flags &= ~bits;
}
/*
* Sets the default memory attribute for the specified object. Pages
* that are allocated to this object are by default assigned this memory
* attribute.
*
* Presently, this function must be called before any pages are allocated
* to the object. In the future, this requirement may be relaxed for
* "default" and "swap" objects.
*/
int
vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
{
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
switch (object->type) {
case OBJT_DEFAULT:
case OBJT_DEVICE:
case OBJT_PHYS:
case OBJT_SWAP:
case OBJT_VNODE:
if (!TAILQ_EMPTY(&object->memq))
return (KERN_FAILURE);
break;
case OBJT_DEAD:
return (KERN_INVALID_ARGUMENT);
}
object->memattr = memattr;
return (KERN_SUCCESS);
}
void
vm_object_pip_add(vm_object_t object, short i)
{

View File

@ -92,6 +92,7 @@ struct vm_object {
int generation; /* generation ID */
int ref_count; /* How many refs?? */
int shadow_count; /* how many objects that this is a shadow for */
vm_memattr_t memattr; /* default memory attribute for pages */
objtype_t type; /* type of pager */
u_short flags; /* see below */
u_short pg_color; /* (c) color of first page in obj */
@ -213,6 +214,7 @@ void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
void vm_object_reference (vm_object_t);
void vm_object_reference_locked(vm_object_t);
int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr);
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
void vm_object_split(vm_map_entry_t);
void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,

View File

@ -1109,12 +1109,15 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
*/
KASSERT(m != NULL, ("vm_page_alloc: missing page"));
KASSERT(m->queue == PQ_NONE, ("vm_page_alloc: page %p has unexpected queue %d",
m, m->queue));
KASSERT(m->queue == PQ_NONE,
("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
("vm_page_alloc: page %p has unexpected memattr %d", m,
pmap_page_get_memattr(m)));
if ((m->flags & PG_CACHED) != 0) {
KASSERT(m->valid != 0,
("vm_page_alloc: cached page %p is invalid", m));
@ -1157,9 +1160,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
m->act_count = 0;
mtx_unlock(&vm_page_queue_free_mtx);
if ((req & VM_ALLOC_NOOBJ) == 0)
if (object != NULL) {
if (object->memattr != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, object->memattr);
vm_page_insert(m, object, pindex);
else
} else
m->pindex = pindex;
/*
@ -1415,6 +1420,16 @@ vm_page_free_toq(vm_page_t m)
m->flags &= ~PG_ZERO;
vm_page_enqueue(PQ_HOLD, m);
} else {
/*
* Restore the default memory attribute to the page.
*/
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
/*
* Insert the page into the physical memory allocator's
* cache/free page queues.
*/
mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_FREE;
cnt.v_free_count++;
@ -1663,6 +1678,12 @@ vm_page_cache(vm_page_t m)
object->resident_page_count--;
object->generation++;
/*
* Restore the default memory attribute to the page.
*/
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
/*
* Insert the page into the object's collection of cached pages
* and the physical memory allocator's cache/free page queues.

View File

@ -588,7 +588,7 @@ vm_phys_zero_pages_idle(void)
*/
vm_page_t
vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, unsigned long boundary, vm_cache_mode_t mode)
unsigned long alignment, unsigned long boundary)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@ -698,6 +698,9 @@ vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
("vm_phys_alloc_contig: page %p is busy", m));
KASSERT(m->dirty == 0,
("vm_phys_alloc_contig: page %p is dirty", m));
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
("vm_phys_alloc_contig: page %p has unexpected memattr %d",
m, pmap_page_get_memattr(m)));
if ((m->flags & PG_CACHED) != 0) {
m->valid = 0;
m_object = m->object;

View File

@ -43,7 +43,7 @@
void vm_phys_add_page(vm_paddr_t pa);
vm_page_t vm_phys_alloc_contig(unsigned long npages,
vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, unsigned long boundary, vm_cache_mode_t mode);
unsigned long alignment, unsigned long boundary);
vm_page_t vm_phys_alloc_pages(int pool, int order);
vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment);
void vm_phys_free_pages(vm_page_t m, int order);