MI changes:

- provide an interface (macros) to the page coloring part of the VM system,
   this allows to try different coloring algorithms without the need to
   touch every file [1]
 - make the page queue tuning values readable: sysctl vm.stats.pagequeue
 - autotuning of the page coloring values based upon the cache size instead
   of options in the kernel config (disabling of the page coloring as a
   kernel option is still possible)

MD changes:
 - detection of the cache size: only IA32 and AMD64 (untested) contains
   cache size detection code, every other arch just comes with a dummy
   function (this results in the use of default values like it was the
   case without the autotuning of the page coloring)
 - print some more info on Intel CPU's (like we do on AMD and Transmeta
   CPU's)

Note to AMD owners (IA32 and AMD64): please run "sysctl vm.stats.pagequeue"
and report if the cache* values are zero (= bug in the cache detection code)
or not.

Based upon work by:	Chad David <davidc@acns.ab.ca> [1]
Reviewed by:		alc, arch (in 2004)
Discussed with:		alc, Chad David, arch (in 2004)
This commit is contained in:
Alexander Leidinger 2005-12-31 14:39:20 +00:00
parent a54b458055
commit ef39c05baa
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=153940
19 changed files with 757 additions and 154 deletions

View File

@ -171,6 +171,13 @@ struct cpuinit api_cpuinit[] = {
};
int napi_cpuinit = (sizeof(api_cpuinit) / sizeof(api_cpuinit[0]));
void setPQL2(int *const size, int *const ways);
void
setPQL2(int *const size, int *const ways)
{
return;
}
void
platform_not_configured(int cputype)

View File

@ -69,6 +69,8 @@ void panicifcpuunsupported(void);
static void print_AMD_info(void);
static void print_AMD_assoc(int i);
void setPQL2(int *const size, int *const ways);
static void setPQL2_AMD(void);
int cpu_class;
char machine[] = "amd64";
@ -93,6 +95,9 @@ static struct {
{ "Sledgehammer", CPUCLASS_K8 }, /* CPU_SLEDGEHAMMER */
};
extern int pq_l2size;
extern int pq_l2nways;
void
printcpuinfo(void)
{
@ -526,3 +531,30 @@ print_AMD_info(void)
print_AMD_l2_assoc((regs[2] >> 12) & 0x0f);
}
}
static void
setPQL2_AMD(void)
{
if (cpu_exthigh >= 0x80000006) {
u_int regs[4];
do_cpuid(0x80000006, regs);
*size = regs[2] >> 16;
*ways = (regs[2] >> 12) & 0x0f;
switch (*ways) {
case 0: /* disabled/not present */
case 15: /* fully associative */
default: *ways = 1; break; /* reserved configuration */
case 4: *ways = 4; break;
case 6: *ways = 8; break;
case 8: *ways = 16; break;
}
}
}
void
setPQL2(int *const size, int *const ways);
{
if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
setPQL2_AMD(size, ways);
}

View File

@ -298,6 +298,15 @@ static const char * const wtnames[] = {
"**unknown 15**",
};
void setPQL2(int *const size, int *const ways);
void
setPQL2(int *const size, int *const ways)
{
return;
}
extern int ctrl;
void
identify_arm_cpu(void)

View File

@ -111,14 +111,8 @@ options DFLDSIZ=(1024UL*1024*1024)
options BLKDEV_IOSIZE=8192
# Options for the VM subsystem
# L2 cache size (in KB) can be specified in PQ_CACHESIZE
options PQ_CACHESIZE=512 # color for 512k cache
# Deprecated options supported for backwards compatibility
#options PQ_NOOPT # No coloring
#options PQ_LARGECACHE # color for 512k cache
#options PQ_HUGECACHE # color for 1024k cache
#options PQ_MEDIUMCACHE # color for 256k cache
#options PQ_NORMALCACHE # color for 64k cache
# This allows you to actually store this configuration file into
# the kernel binary itself, where it may be later read by saying:
@ -409,7 +403,7 @@ options REGRESSION
#
# RESTARTABLE_PANICS allows one to continue from a panic as if it were
# a call to the debugger via the Debugger() function instead. It is only
# a call to the debugger to continue from a panic as instead. It is only
# useful if a kernel debugger is present. To restart from a panic, reset
# the panicstr variable to NULL and continue execution. This option is
# for development use only and should NOT be used in production systems

View File

@ -517,11 +517,6 @@ NO_SWAPPING opt_vm.h
MALLOC_MAKE_FAILURES opt_vm.h
MALLOC_PROFILE opt_vm.h
PQ_NOOPT opt_vmpage.h
PQ_NORMALCACHE opt_vmpage.h
PQ_MEDIUMCACHE opt_vmpage.h
PQ_LARGECACHE opt_vmpage.h
PQ_HUGECACHE opt_vmpage.h
PQ_CACHESIZE opt_vmpage.h
# The MemGuard replacement allocator used for tamper-after-free detection
DEBUG_MEMGUARD opt_vm.h

View File

@ -74,7 +74,13 @@ void enable_K6_2_wt_alloc(void);
void panicifcpuunsupported(void);
static void identifycyrix(void);
void setPQL2(int *const size, int *const ways);
static void setPQL2_AMD(int *const size, int *const ways);
static void setPQL2_INTEL(int *const size, int *const ways);
static void get_INTEL_TLB(u_int data, int *const size, int *const ways);
static void print_AMD_info(void);
static void print_INTEL_info(void);
static void print_INTEL_TLB(u_int data);
static void print_AMD_assoc(int i);
static void print_transmeta_info(void);
@ -873,6 +879,8 @@ printcpuinfo(void)
if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
print_AMD_info();
else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
print_INTEL_info();
else if (strcmp(cpu_vendor, "GenuineTMx86") == 0 ||
strcmp(cpu_vendor, "TransmetaCPU") == 0)
print_transmeta_info();
@ -1217,6 +1225,464 @@ print_AMD_info(void)
}
}
static void
print_INTEL_info(void)
{
u_int regs[4];
u_int rounds, regnum;
do_cpuid(0x2, regs);
rounds = (regs[0] & 0xff) - 1;
for (regnum = 0; regnum <= 3; ++regnum) {
if ((regs[regnum] & (1<<31)) == 0) {
if (regnum != 0)
print_INTEL_TLB(regs[regnum] & 0xff);
print_INTEL_TLB((regs[regnum] >> 8) & 0xff);
print_INTEL_TLB((regs[regnum] >> 16) & 0xff);
print_INTEL_TLB((regs[regnum] >> 24) & 0xff);
}
}
while (rounds > 0) {
do_cpuid(0x2, regs);
for (regnum = 0; regnum <= 3; ++regnum) {
if ((regs[regnum] & (1<<31)) == 0) {
if (regnum != 0)
print_INTEL_TLB(regs[regnum] & 0xff);
print_INTEL_TLB((regs[regnum] >> 8) & 0xff);
print_INTEL_TLB((regs[regnum] >> 16) & 0xff);
print_INTEL_TLB((regs[regnum] >> 24) & 0xff);
}
}
--rounds;
}
if (cpu_exthigh >= 0x80000006) {
do_cpuid(0x80000006, regs);
printf("\nL2 cache: %u kbytes, %u-way associative, %u bytes/line",
regs[2] & 0xffff, (regs[2] >> 16) & 0xff, regs[2] >> 24);
}
printf("\n");
}
static void
print_INTEL_TLB(u_int data)
{
switch (data) {
case 0x0:
case 0x40:
default:
break;
case 0x1:
printf("\nInstruction TLB: 4-KBPages, 4-way set associative, 32 entries");
break;
case 0x2:
printf("\nInstruction TLB: 4-MB Pages, fully associative, 2 entries");
break;
case 0x3:
printf("\nData TLB: 4-KB Pages, 4-way set associative, 64 entries");
break;
case 0x4:
printf("\nData TLB: 4-MB Pages, 4-way set associative, 8 entries");
break;
case 0x6:
printf("\n1st-level instruction cache: 8-KB, 4-way set associative, 32-byte line size");
break;
case 0x8:
printf("\n1st-level instruction cache: 16-KB, 4-way set associative, 32-byte line size");
break;
case 0xa:
printf("\n1st-level data cache: 8-KB, 2-way set associative, 32-byte line size");
break;
case 0xc:
printf("\n1st-level data cache: 16-KB, 4-way set associative, 32-byte line size");
break;
case 0x22:
printf("\n3rd-level cache: 512 KB, 4-way set associative, sectored cache, 64-byte line size");
break;
case 0x23:
printf("\n3rd-level cache: 1-MB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x25:
printf("\n3rd-level cache: 2-MB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x29:
printf("\n3rd-level cache: 4-MB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x2c:
printf("\n1st-level data cache: 32-KB, 8-way set associative, 64-byte line size");
break;
case 0x30:
printf("\n1st-level instruction cache: 32-KB, 8-way set associative, 64-byte line size");
break;
case 0x39:
printf("\n2nd-level cache: 128-KB, 4-way set associative, sectored cache, 64-byte line size");
break;
case 0x3b:
printf("\n2nd-level cache: 128-KB, 2-way set associative, sectored cache, 64-byte line size");
break;
case 0x3c:
printf("\n2nd-level cache: 256-KB, 4-way set associative, sectored cache, 64-byte line size");
break;
case 0x41:
printf("\n2nd-level cache: 128-KB, 4-way set associative, 32-byte line size");
break;
case 0x42:
printf("\n2nd-level cache: 256-KB, 4-way set associative, 32-byte line size");
break;
case 0x43:
printf("\n2nd-level cache: 512-KB, 4-way set associative, 32 byte line size");
break;
case 0x44:
printf("\n2nd-level cache: 1-MB, 4-way set associative, 32 byte line size");
break;
case 0x45:
printf("\n2nd-level cache: 2-MB, 4-way set associative, 32 byte line size");
break;
case 0x50:
printf("\nInstruction TLB: 4-KB, 2-MB or 4-MB pages, fully associative, 64 entries");
break;
case 0x51:
printf("\nInstruction TLB: 4-KB, 2-MB or 4-MB pages, fully associative, 128 entries");
break;
case 0x52:
printf("\nInstruction TLB: 4-KB, 2-MB or 4-MB pages, fully associative, 256 entries");
break;
case 0x5b:
printf("\nData TLB: 4-KB or 4-MB pages, fully associative, 64 entries");
break;
case 0x5c:
printf("\nData TLB: 4-KB or 4-MB pages, fully associative, 128 entries");
break;
case 0x5d:
printf("\nData TLB: 4-KB or 4-MB pages, fully associative, 256 entries");
break;
case 0x60:
printf("\n1st-level data cache: 16-KB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x66:
printf("\n1st-level data cache: 8-KB, 4-way set associative, sectored cache, 64-byte line size");
break;
case 0x67:
printf("\n1st-level data cache: 16-KB, 4-way set associative, sectored cache, 64-byte line size");
break;
case 0x68:
printf("\n1st-level data cache: 32-KB, 4 way set associative, sectored cache, 64-byte line size");
break;
case 0x70:
printf("\nTrace cache: 12K-uops, 8-way set associative");
break;
case 0x71:
printf("\nTrace cache: 16K-uops, 8-way set associative");
break;
case 0x72:
printf("\nTrace cache: 32K-uops, 8-way set associative");
break;
case 0x79:
printf("\n2nd-level cache: 128-KB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x7a:
printf("\n2nd-level cache: 256-KB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x7b:
printf("\n2nd-level cache: 512-KB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x7c:
printf("\n2nd-level cache: 1-MB, 8-way set associative, sectored cache, 64-byte line size");
break;
case 0x82:
printf("\n2nd-level cache: 256-KB, 8-way set associative, 32 byte line size");
break;
case 0x83:
printf("\n2nd-level cache: 512-KB, 8-way set associative, 32 byte line size");
break;
case 0x84:
printf("\n2nd-level cache: 1-MB, 8-way set associative, 32 byte line size");
break;
case 0x85:
printf("\n2nd-level cache: 2-MB, 8-way set associative, 32 byte line size");
break;
case 0x86:
printf("\n2nd-level cache: 512-KB, 4-way set associative, 64 byte line size");
break;
case 0x87:
printf("\n2nd-level cache: 1-MB, 8-way set associative, 64 byte line size");
break;
case 0xb0:
printf("\nInstruction TLB: 4-KB Pages, 4-way set associative, 128 entries");
break;
case 0xb3:
printf("\nData TLB: 4-KB Pages, 4-way set associative, 128 entries");
break;
}
}
static void
setPQL2_AMD(int *const size, int *const ways) {
if (cpu_exthigh >= 0x80000006) {
u_int regs[4];
do_cpuid(0x80000006, regs);
*size = regs[2] >> 16;
*ways = (regs[2] >> 12) & 0x0f;
if(*ways == 255) /* fully associative */
*ways = 1;
}
}
static void
setPQL2_INTEL(int *const size, int *const ways)
{
u_int rounds, regnum;
u_int regs[4];
do_cpuid(0x2, regs);
rounds = (regs[0] & 0xff) - 1;
for (regnum = 0; regnum <= 3; ++regnum) {
if ((regs[regnum] & (1<<31)) == 0) {
if (regnum != 0)
get_INTEL_TLB(regs[regnum] & 0xff,
size, ways);
get_INTEL_TLB((regs[regnum] >> 8) & 0xff,
size, ways);
get_INTEL_TLB((regs[regnum] >> 16) & 0xff,
size, ways);
get_INTEL_TLB((regs[regnum] >> 24) & 0xff,
size, ways);
}
}
while (rounds > 0) {
do_cpuid(0x2, regs);
for (regnum = 0; regnum <= 3; ++regnum) {
if ((regs[regnum] & (1<<31)) == 0) {
if (regnum != 0)
get_INTEL_TLB(regs[regnum] & 0xff,
size, ways);
get_INTEL_TLB((regs[regnum] >> 8) & 0xff,
size, ways);
get_INTEL_TLB((regs[regnum] >> 16) & 0xff,
size, ways);
get_INTEL_TLB((regs[regnum] >> 24) & 0xff,
size, ways);
}
}
--rounds;
}
if (cpu_exthigh >= 0x80000006) {
do_cpuid(0x80000006, regs);
if (*size < (regs[2] & 0xffff)) {
*size = regs[2] & 0xffff;
*ways = (regs[2] >> 16) & 0xff;
}
}
}
static void
get_INTEL_TLB(u_int data, int *const size, int *const ways)
{
switch (data) {
default:
break;
case 0x22:
/* 3rd-level cache: 512 KB, 4-way set associative,
* sectored cache, 64-byte line size */
if (*size < 512) {
*size = 512;
*ways = 4;
}
break;
case 0x23:
/* 3rd-level cache: 1-MB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 1024) {
*size = 1024;
*ways = 8;
}
break;
case 0x25:
/* 3rd-level cache: 2-MB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 2048) {
*size = 2048;
*ways = 8;
}
break;
case 0x29:
/* 3rd-level cache: 4-MB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 4096) {
*size = 4096;
*ways = 8;
}
break;
case 0x39:
/* 2nd-level cache: 128-KB, 4-way set associative,
* sectored cache, 64-byte line size */
if (*size < 128) {
*size = 128;
*ways = 4;
}
break;
case 0x3b:
/* 2nd-level cache: 128-KB, 2-way set associative,
* sectored cache, 64-byte line size */
if (*size < 128) {
*size = 128;
*ways = 2;
}
break;
case 0x3c:
/* 2nd-level cache: 256-KB, 4-way set associative,
* sectored cache, 64-byte line size */
if (*size < 256) {
*size = 256;
*ways = 4;
}
break;
case 0x41:
/* 2nd-level cache: 128-KB, 4-way set associative,
* 32-byte line size */
if (*size < 128) {
*size = 128;
*ways = 4;
}
break;
case 0x42:
/* 2nd-level cache: 256-KB, 4-way set associative,
* 32-byte line size */
if (*size < 256) {
*size = 256;
*ways = 4;
}
break;
case 0x43:
/* 2nd-level cache: 512-KB, 4-way set associative,
* 32 byte line size */
if (*size < 512) {
*size = 512;
*ways = 4;
}
break;
case 0x44:
/* 2nd-level cache: 1-MB, 4-way set associative,
* 32 byte line size */
if (*size < 1024) {
*size = 1024;
*ways = 4;
}
break;
case 0x45:
/* 2nd-level cache: 2-MB, 4-way set associative,
* 32 byte line size */
if (*size < 2048) {
*size = 2048;
*ways = 4;
}
break;
case 0x79:
/* 2nd-level cache: 128-KB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 128) {
*size = 128;
*ways = 8;
}
break;
case 0x7a:
/* 2nd-level cache: 256-KB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 256) {
*size = 256;
*ways = 8;
}
break;
case 0x7b:
/* 2nd-level cache: 512-KB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 512) {
*size = 512;
*ways = 8;
}
break;
case 0x7c:
/* 2nd-level cache: 1-MB, 8-way set associative,
* sectored cache, 64-byte line size */
if (*size < 1024) {
*size = 1024;
*ways = 8;
}
break;
case 0x82:
/* 2nd-level cache: 256-KB, 8-way set associative,
* 32 byte line size */
if (*size < 128) {
*size = 128;
*ways = 8;
}
break;
case 0x83:
/* 2nd-level cache: 512-KB, 8-way set associative,
* 32 byte line size */
if (*size < 512) {
*size = 512;
*ways = 8;
}
break;
case 0x84:
/* 2nd-level cache: 1-MB, 8-way set associative,
* 32 byte line size */
if (*size < 1024) {
*size = 1024;
*ways = 8;
}
break;
case 0x85:
/* 2nd-level cache: 2-MB, 8-way set associative,
* 32 byte line size */
if (*size < 2048) {
*size = 2048;
*ways = 8;
}
break;
case 0x86:
/* 2nd-level cache: 512-KB, 4-way set associative,
* 64 byte line size */
if (*size < 512) {
*size = 512;
*ways = 4;
}
break;
case 0x87:
/* 2nd-level cache: 1-MB, 8-way set associative,
* 64 byte line size */
if (*size < 1024) {
*size = 512;
*ways = 8;
}
break;
}
}
void
setPQL2(int *const size, int *const ways)
{
if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
setPQL2_AMD(size, ways);
else if (strcmp(cpu_vendor, "GenuineIntel") == 0)
setPQL2_INTEL(size, ways);
}
static void
print_transmeta_info()
{

View File

@ -158,6 +158,14 @@ struct kva_md_info kmi;
#define Mhz 1000000L
#define Ghz (1000L*Mhz)
void setPQL2(int *const size, int *const ways);
void
setPQL2(int *const size, int *const ways)
{
return;
}
static void
identifycpu(void)
{

View File

@ -2863,9 +2863,9 @@ allocbuf(struct buf *bp, int size)
* page daemon?
*/
if ((curproc != pageproc) &&
((m->queue - m->pc) == PQ_CACHE) &&
(VM_PAGE_INQUEUE1(m, PQ_CACHE)) &&
((cnt.v_free_count + cnt.v_cache_count) <
(cnt.v_free_min + cnt.v_cache_min))) {
(cnt.v_free_min + cnt.v_cache_min))) {
pagedaemon_wakeup();
}
vm_page_wire(m);

View File

@ -170,6 +170,14 @@ struct bat battable[16];
struct kva_md_info kmi;
void setPQL2(int *const size, int *const ways);
void
setPQL2(int *const size, int *const ways)
{
return;
}
static void
powerpc_ofw_shutdown(void *junk, int howto)
{

View File

@ -170,6 +170,14 @@ struct bat battable[16];
struct kva_md_info kmi;
void setPQL2(int *const size, int *const ways);
void
setPQL2(int *const size, int *const ways)
{
return;
}
static void
powerpc_ofw_shutdown(void *junk, int howto)
{

View File

@ -27,6 +27,14 @@ SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
int cpu_impl;
void setPQL2(int *const size, int *const ways);
void
setPQL2(int *const size, int *const ways)
{
return;
}
void
cpu_identify(u_long vers, u_int freq, u_int id)
{

View File

@ -141,7 +141,7 @@ vm_contig_launder(int queue)
if ((m->flags & PG_MARKER) != 0)
continue;
KASSERT(m->queue == queue,
KASSERT(VM_PAGE_INQUEUE2(m, queue),
("vm_contig_launder: page %p's queue is not %d", m, queue));
error = vm_contig_launder_page(m);
if (error == 0)
@ -255,7 +255,7 @@ contigmalloc1(
for (i = start; i < (start + size / PAGE_SIZE); i++) {
vm_page_t m = &pga[i];
if ((m->queue - m->pc) == PQ_CACHE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE)) {
if (m->hold_count != 0) {
start++;
goto again0;
@ -456,16 +456,15 @@ vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
pqtype = m->queue - m->pc;
if (pass != 0 && pqtype != PQ_FREE &&
pqtype != PQ_CACHE) {
switch (m->queue) {
case PQ_ACTIVE:
case PQ_INACTIVE:
if (m->queue == PQ_ACTIVE ||
m->queue == PQ_INACTIVE) {
if (vm_contig_launder_page(m) != 0)
goto cleanup_freed;
pqtype = m->queue - m->pc;
if (pqtype == PQ_FREE ||
pqtype == PQ_CACHE)
break;
default:
} else {
cleanup_freed:
vm_page_release_contigl(&pga[i + 1],
start + npages - 1 - i);

View File

@ -410,7 +410,8 @@ RetryFault:;
vm_pageq_remove_nowakeup(fs.m);
if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) {
if ((queue - fs.m->pc) == PQ_CACHE \
&& vm_page_count_severe()) {
vm_page_activate(fs.m);
vm_page_unlock_queues();
unlock_and_deallocate(&fs);
@ -1006,7 +1007,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
vm_page_lock_queues();
if ((m->queue - m->pc) == PQ_CACHE)
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
vm_page_deactivate(m);
mpte = pmap_enter_quick(pmap, addr, m,
entry->protection, mpte);

View File

@ -215,12 +215,11 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->flags = 0;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
object->flags = OBJ_ONEMAPPING;
if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
else
incr = PQ_MAXLENGTH;
if (size <= incr)
incr = size;
object->pg_color = next_index;
next_index = (object->pg_color + incr) & PQ_L2_MASK;
next_index = (object->pg_color + incr) & PQ_COLORMASK;
object->handle = NULL;
object->backing_object = NULL;
object->backing_object_offset = (vm_ooffset_t) 0;
@ -1228,15 +1227,13 @@ vm_object_shadow(
source->generation++;
if (length < source->size)
length = source->size;
if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 ||
source->generation > 1)
length = PQ_L2_SIZE / 3 + PQ_PRIME1;
if (length > PQ_MAXLENGTH || source->generation > 1)
length = PQ_MAXLENGTH;
result->pg_color = (source->pg_color +
length * source->generation) & PQ_L2_MASK;
length * source->generation) & PQ_COLORMASK;
result->flags |= source->flags & OBJ_NEEDGIANT;
VM_OBJECT_UNLOCK(source);
next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) &
PQ_L2_MASK;
next_index = (result->pg_color + PQ_MAXLENGTH) & PQ_COLORMASK;
}
@ -2127,7 +2124,7 @@ DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
if (rcount) {
padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
padiff >>= PAGE_SHIFT;
padiff &= PQ_L2_MASK;
padiff &= PQ_COLORMASK;
if (padiff == 0) {
pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
++rcount;

View File

@ -382,7 +382,7 @@ vm_page_unhold(vm_page_t mem)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
--mem->hold_count;
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
vm_page_free_toq(mem);
}
@ -457,9 +457,9 @@ vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
void
vm_page_dirty(vm_page_t m)
{
KASSERT(m->queue - m->pc != PQ_CACHE,
KASSERT(VM_PAGE_GETKNOWNQUEUE1(m) != PQ_CACHE,
("vm_page_dirty: page in cache!"));
KASSERT(m->queue - m->pc != PQ_FREE,
KASSERT(VM_PAGE_GETKNOWNQUEUE1(m) != PQ_FREE,
("vm_page_dirty: page is free!"));
m->dirty = VM_PAGE_BITS_ALL;
}
@ -700,7 +700,7 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
vm_page_remove(m);
vm_page_insert(m, new_object, new_pindex);
if (m->queue - m->pc == PQ_CACHE)
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
vm_page_deactivate(m);
vm_page_dirty(m);
}
@ -777,9 +777,9 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
KASSERT(object != NULL,
("vm_page_alloc: NULL object."));
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
color = (pindex + object->pg_color) & PQ_L2_MASK;
color = (pindex + object->pg_color) & PQ_COLORMASK;
} else
color = pindex & PQ_L2_MASK;
color = pindex & PQ_COLORMASK;
/*
* The pager is allowed to eat deeper into the free page list.
@ -946,8 +946,8 @@ vm_page_activate(vm_page_t m)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->queue != PQ_ACTIVE) {
if ((m->queue - m->pc) == PQ_CACHE)
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
cnt.v_reactivated++;
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
@ -1016,12 +1016,12 @@ vm_page_free_toq(vm_page_t m)
("vm_page_free_toq: freeing mapped page %p", m));
cnt.v_tfree++;
if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
if (m->busy || VM_PAGE_INQUEUE1(m, PQ_FREE)) {
printf(
"vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
(u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
m->hold_count);
if ((m->queue - m->pc) == PQ_FREE)
if (VM_PAGE_INQUEUE1(m, PQ_FREE))
panic("vm_page_free: freeing free page");
else
panic("vm_page_free: freeing busy page");
@ -1064,10 +1064,10 @@ vm_page_free_toq(vm_page_t m)
if (m->hold_count != 0) {
m->flags &= ~PG_ZERO;
m->queue = PQ_HOLD;
VM_PAGE_SETQUEUE2(m, PQ_HOLD);
} else
m->queue = PQ_FREE + m->pc;
pq = &vm_page_queues[m->queue];
VM_PAGE_SETQUEUE1(m, PQ_FREE);
pq = &vm_page_queues[VM_PAGE_GETQUEUE(m)];
mtx_lock_spin(&vm_page_queue_free_mtx);
pq->lcnt++;
++(*pq->cnt);
@ -1220,10 +1220,10 @@ _vm_page_deactivate(vm_page_t m, int athead)
/*
* Ignore if already inactive.
*/
if (m->queue == PQ_INACTIVE)
if (VM_PAGE_INQUEUE2(m, PQ_INACTIVE))
return;
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if ((m->queue - m->pc) == PQ_CACHE)
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
cnt.v_reactivated++;
vm_page_flag_clear(m, PG_WINATCFLS);
vm_pageq_remove(m);
@ -1231,7 +1231,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
else
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
m->queue = PQ_INACTIVE;
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
vm_page_queues[PQ_INACTIVE].lcnt++;
cnt.v_inactive_count++;
}
@ -1307,7 +1307,7 @@ vm_page_cache(vm_page_t m)
printf("vm_page_cache: attempting to cache busy page\n");
return;
}
if ((m->queue - m->pc) == PQ_CACHE)
if (VM_PAGE_INQUEUE1(m, PQ_CACHE))
return;
/*
@ -1359,8 +1359,8 @@ vm_page_dontneed(vm_page_t m)
* occassionally leave the page alone
*/
if ((dnw & 0x01F0) == 0 ||
m->queue == PQ_INACTIVE ||
m->queue - m->pc == PQ_CACHE
VM_PAGE_INQUEUE2(m, PQ_INACTIVE) ||
VM_PAGE_INQUEUE1(m, PQ_CACHE)
) {
if (m->act_count >= ACT_INIT)
--m->act_count;
@ -1734,13 +1734,13 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
{
int i;
db_printf("PQ_FREE:");
for (i = 0; i < PQ_L2_SIZE; i++) {
for (i = 0; i < PQ_NUMCOLORS; i++) {
db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
}
db_printf("\n");
db_printf("PQ_CACHE:");
for (i = 0; i < PQ_L2_SIZE; i++) {
for (i = 0; i < PQ_NUMCOLORS; i++) {
db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
}
db_printf("\n");

View File

@ -146,70 +146,39 @@ CTASSERT(sizeof(u_long) >= 8);
#endif
#endif
#if !defined(KLD_MODULE)
/*
* Page coloring parameters
*/
/* PQ_CACHE and PQ_FREE represents a PQ_NUMCOLORS consecutive queue. */
#define PQ_NONE 0
#define PQ_FREE 1
#define PQ_INACTIVE (page_queue_coloring.inactive)
#define PQ_ACTIVE (page_queue_coloring.active)
#define PQ_CACHE (page_queue_coloring.cache)
#define PQ_HOLD (page_queue_coloring.hold)
#define PQ_COUNT (page_queue_coloring.count)
#define PQ_MAXCOLORS 1024
#define PQ_MAXCOUNT (4 + 2 * PQ_MAXCOLORS)
#define PQ_NUMCOLORS (page_queue_coloring.numcolors)
#define PQ_PRIME1 (page_queue_coloring.prime1)
#define PQ_PRIME2 (page_queue_coloring.prime2)
#define PQ_COLORMASK (page_queue_coloring.colormask)
#define PQ_MAXLENGTH (page_queue_coloring.maxlength)
/* Backward compatibility for existing PQ_*CACHE config options. */
#if !defined(PQ_CACHESIZE)
#if defined(PQ_HUGECACHE)
#define PQ_CACHESIZE 1024
#elif defined(PQ_LARGECACHE)
#define PQ_CACHESIZE 512
#elif defined(PQ_MEDIUMCACHE)
#define PQ_CACHESIZE 256
#elif defined(PQ_NORMALCACHE)
#define PQ_CACHESIZE 64
#elif defined(PQ_NOOPT)
#define PQ_CACHESIZE 0
#else
#define PQ_CACHESIZE 128
#endif
#endif /* !defined(PQ_CACHESIZE) */
/* Returns the real queue a page is on. */
#define VM_PAGE_GETQUEUE(m) ((m)->queue)
#if PQ_CACHESIZE >= 1024
#define PQ_PRIME1 31 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_PRIME2 23 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */
/* Returns the well known queue a page is on. */
#define VM_PAGE_GETKNOWNQUEUE1(m) ((m)->queue - (m)->pc)
#define VM_PAGE_GETKNOWNQUEUE2(m) VM_PAGE_GETQUEUE(m)
#elif PQ_CACHESIZE >= 512
#define PQ_PRIME1 31 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_PRIME2 23 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */
/* Given the real queue number and a page color return the well know queue. */
#define VM_PAGE_RESOLVEQUEUE(m, q) ((q) - (m)->pc)
#elif PQ_CACHESIZE >= 256
#define PQ_PRIME1 13 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_PRIME2 7 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */
/* Returns true if the page is in the named well known queue. */
#define VM_PAGE_INQUEUE1(m, q) (VM_PAGE_GETKNOWNQUEUE1(m) == (q))
#define VM_PAGE_INQUEUE2(m, q) (VM_PAGE_GETKNOWNQUEUE2(m) == (q))
#elif PQ_CACHESIZE >= 128
#define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
#define PQ_PRIME2 5 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */
#elif PQ_CACHESIZE >= 64
#define PQ_PRIME1 5 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_PRIME2 3 /* Prime number somewhat less than PQ_L2_SIZE */
#define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */
#else
#define PQ_PRIME1 1 /* Disable page coloring. */
#define PQ_PRIME2 1
#define PQ_L2_SIZE 1
#endif
#define PQ_L2_MASK (PQ_L2_SIZE - 1)
/* PQ_CACHE and PQ_FREE represent PQ_L2_SIZE consecutive queues. */
#define PQ_NONE 0
#define PQ_FREE 1
#define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
#define PQ_ACTIVE (2 + 1*PQ_L2_SIZE)
#define PQ_CACHE (3 + 1*PQ_L2_SIZE)
#define PQ_HOLD (3 + 2*PQ_L2_SIZE)
#define PQ_COUNT (4 + 2*PQ_L2_SIZE)
/* Sets the queue a page is on. */
#define VM_PAGE_SETQUEUE1(m, q) (VM_PAGE_GETQUEUE(m) = (q) + (m)->pc)
#define VM_PAGE_SETQUEUE2(m, q) (VM_PAGE_GETQUEUE(m) = (q))
struct vpgqueues {
struct pglist pl;
@ -217,10 +186,22 @@ struct vpgqueues {
int lcnt;
};
extern struct vpgqueues vm_page_queues[PQ_COUNT];
extern struct mtx vm_page_queue_free_mtx;
struct pq_coloring {
int numcolors;
int colormask;
int prime1;
int prime2;
int inactive;
int active;
int cache;
int hold;
int count;
int maxlength;
};
#endif /* !defined(KLD_MODULE) */
extern struct vpgqueues vm_page_queues[PQ_MAXCOUNT];
extern struct mtx vm_page_queue_free_mtx;
extern struct pq_coloring page_queue_coloring;
/*
* These are the flags defined for vm_page.

View File

@ -741,7 +741,7 @@ vm_pageout_scan(int pass)
cnt.v_pdpages++;
if (m->queue != PQ_INACTIVE) {
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
goto rescan0;
}
@ -957,7 +957,7 @@ vm_pageout_scan(int pass)
* reused for another vnode. The object might
* have been reused for another vnode.
*/
if (m->queue != PQ_INACTIVE ||
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
m->object != object ||
object->handle != vp) {
if (object->flags & OBJ_MIGHTBEDIRTY)
@ -1039,7 +1039,7 @@ vm_pageout_scan(int pass)
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
KASSERT(m->queue == PQ_ACTIVE,
KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
("vm_pageout_scan: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
@ -1132,7 +1132,7 @@ vm_pageout_scan(int pass)
cache_cur = cache_last_free;
cache_first_failure = -1;
while (cnt.v_free_count < cnt.v_free_reserved && (cache_cur =
(cache_cur + PQ_PRIME2) & PQ_L2_MASK) != cache_first_failure) {
(cache_cur + PQ_PRIME2) & PQ_COLORMASK) != cache_first_failure) {
TAILQ_FOREACH(m, &vm_page_queues[PQ_CACHE + cache_cur].pl,
pageq) {
KASSERT(m->dirty == 0,
@ -1316,7 +1316,7 @@ vm_pageout_page_stats()
while ((m != NULL) && (pcount-- > 0)) {
int actcount;
KASSERT(m->queue == PQ_ACTIVE,
KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
("vm_pageout_page_stats: page %p isn't active", m));
next = TAILQ_NEXT(m, pageq);
@ -1407,7 +1407,7 @@ vm_pageout()
cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
cnt.v_interrupt_free_min;
cnt.v_free_reserved = vm_pageout_page_count +
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_L2_SIZE;
cnt.v_pageout_free_min + (cnt.v_page_count / 768) + PQ_NUMCOLORS;
cnt.v_free_severe = cnt.v_free_min / 2;
cnt.v_free_min += cnt.v_free_reserved;
cnt.v_free_severe += cnt.v_free_reserved;

View File

@ -30,9 +30,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/linker_set.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/proc.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
@ -46,18 +48,99 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
struct vpgqueues vm_page_queues[PQ_COUNT];
static void vm_coloring_init(void);
void setPQL2(int *const size, int *const ways);
struct vpgqueues vm_page_queues[PQ_MAXCOUNT];
struct pq_coloring page_queue_coloring;
static int pq_cachesize = 0; /* size of the cache in KB */
static int pq_cachenways = 0; /* associativity of the cache */
SYSCTL_DECL(_vm_stats);
SYSCTL_NODE(_vm_stats, OID_AUTO, pagequeue, CTLFLAG_RW, 0, "VM meter stats");
SYSCTL_INT(_vm_stats_pagequeue, OID_AUTO, page_colors, CTLFLAG_RD,
&(PQ_NUMCOLORS), 0, "Number of colors in the page queue");
SYSCTL_INT(_vm_stats_pagequeue, OID_AUTO, cachesize, CTLFLAG_RD,
&pq_cachesize, 0, "Size of the processor cache in KB");
SYSCTL_INT(_vm_stats_pagequeue, OID_AUTO, cachenways, CTLFLAG_RD,
&pq_cachenways, 0, "Associativity of the processor cache");
SYSCTL_INT(_vm_stats_pagequeue, OID_AUTO, prime1, CTLFLAG_RD,
&(PQ_PRIME1), 0, "Cache tuning value");
SYSCTL_INT(_vm_stats_pagequeue, OID_AUTO, prime2, CTLFLAG_RD,
&(PQ_PRIME2), 0, "Cache tuning value");
static void
vm_coloring_init(void)
{
#ifdef PQ_NOOPT
PQ_NUMCOLORS = PQ_PRIME1 = PQ_PRIME2 = 1;
#else
setPQL2(&pq_cachesize, &pq_cachenways);
if (pq_cachesize > 0)
PQ_NUMCOLORS = pq_cachesize / (PAGE_SIZE/1024) / \
pq_cachenways;
else
PQ_NUMCOLORS = 32;
if (PQ_MAXCOLORS < PQ_NUMCOLORS) {
printf("VM-PQ color limit (PQ_MAXCOLORS=%u) exceeded (%u), see vm_page.h", PQ_MAXCOLORS, PQ_NUMCOLORS);
PQ_NUMCOLORS = PQ_MAXCOLORS;
}
if (PQ_NUMCOLORS >= 128) {
PQ_PRIME1 = 31;
PQ_PRIME2 = 23;
} else if (PQ_NUMCOLORS >= 64) {
PQ_PRIME1 = 13;
PQ_PRIME2 = 7;
} else if (PQ_NUMCOLORS >= 32) {
PQ_PRIME1 = 9;
PQ_PRIME2 = 5;
} else if (PQ_NUMCOLORS >= 16) {
PQ_PRIME1 = 5;
PQ_PRIME2 = 3;
} else
PQ_NUMCOLORS = PQ_PRIME1 = PQ_PRIME2 = 1;
#endif
/*
* PQ_CACHE represents a
* PQ_NUMCOLORS consecutive queue.
*/
PQ_COLORMASK = PQ_NUMCOLORS - 1;
PQ_INACTIVE = 1 + PQ_NUMCOLORS;
PQ_ACTIVE = 2 + PQ_NUMCOLORS;
PQ_CACHE = 3 + PQ_NUMCOLORS;
PQ_HOLD = 3 + 2 * PQ_NUMCOLORS;
PQ_COUNT = 4 + 2 * PQ_NUMCOLORS;
PQ_MAXLENGTH = PQ_NUMCOLORS / 3 + PQ_PRIME1;
#if 0
/* XXX: is it possible to allocate vm_page_queues[PQ_COUNT] here? */
#error XXX: vm_page_queues = malloc(PQ_COUNT * sizeof(struct vpgqueues));
#endif
if (bootverbose)
if (PQ_NUMCOLORS > 1)
printf("Using %d colors for the VM-PQ tuning (%d, %d)\n",
PQ_NUMCOLORS, pq_cachesize, pq_cachenways);
}
void
vm_pageq_init(void)
vm_pageq_init(void)
{
int i;
for (i = 0; i < PQ_L2_SIZE; i++) {
vm_coloring_init();
for (i = 0; i < PQ_NUMCOLORS; ++i) {
vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
}
for (i = 0; i < PQ_L2_SIZE; i++) {
vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
for (i = 0; i < PQ_NUMCOLORS; ++i) {
vm_page_queues[PQ_CACHE + i].cnt = &cnt.v_cache_count;
}
vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
@ -71,7 +154,7 @@ vm_pageq_init(void)
void
vm_pageq_requeue(vm_page_t m)
{
int queue = m->queue;
int queue = VM_PAGE_GETQUEUE(m);
struct vpgqueues *vpq;
if (queue != PQ_NONE) {
@ -90,7 +173,7 @@ vm_pageq_enqueue(int queue, vm_page_t m)
struct vpgqueues *vpq;
vpq = &vm_page_queues[queue];
m->queue = queue;
VM_PAGE_SETQUEUE2(m, queue);
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
++*vpq->cnt;
++vpq->lcnt;
@ -142,7 +225,7 @@ vm_pageq_add_new_page(vm_paddr_t pa)
m = PHYS_TO_VM_PAGE(pa);
m->phys_addr = pa;
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
m->pc = (pa >> PAGE_SHIFT) & PQ_COLORMASK;
pmap_page_init(m);
vm_pageq_enqueue(m->pc + PQ_FREE, m);
return (m);
@ -159,11 +242,11 @@ vm_pageq_add_new_page(vm_paddr_t pa)
void
vm_pageq_remove_nowakeup(vm_page_t m)
{
int queue = m->queue;
int queue = VM_PAGE_GETQUEUE(m);
struct vpgqueues *pq;
if (queue != PQ_NONE) {
pq = &vm_page_queues[queue];
m->queue = PQ_NONE;
VM_PAGE_SETQUEUE2(m, PQ_NONE);
TAILQ_REMOVE(&pq->pl, m, pageq);
(*pq->cnt)--;
pq->lcnt--;
@ -181,11 +264,11 @@ vm_pageq_remove_nowakeup(vm_page_t m)
void
vm_pageq_remove(vm_page_t m)
{
int queue = m->queue;
int queue = VM_PAGE_GETQUEUE(m);
struct vpgqueues *pq;
if (queue != PQ_NONE) {
m->queue = PQ_NONE;
VM_PAGE_SETQUEUE2(m, PQ_NONE);
pq = &vm_page_queues[queue];
TAILQ_REMOVE(&pq->pl, m, pageq);
(*pq->cnt)--;
@ -197,7 +280,7 @@ vm_pageq_remove(vm_page_t m)
}
}
#if PQ_L2_SIZE > 1
#ifndef PQ_NOOPT
/*
* vm_pageq_find:
@ -230,37 +313,44 @@ _vm_pageq_find(int basequeue, int index)
* same place. Even though this is not totally optimal, we've already
* blown it by missing the cache case so we do not care.
*/
for (i = PQ_L2_SIZE / 2; i > 0; --i) {
if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
for (i = PQ_NUMCOLORS / 2; i > 0; --i) {
if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_COLORMASK].pl)) \
!= NULL)
break;
if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_COLORMASK].pl)) \
!= NULL)
break;
}
return (m);
}
#endif /* PQ_L2_SIZE > 1 */
#endif /* PQ_NOOPT */
vm_page_t
vm_pageq_find(int basequeue, int index, boolean_t prefer_zero)
{
vm_page_t m;
#if PQ_L2_SIZE > 1
if (prefer_zero) {
m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
} else {
m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
}
if (m == NULL) {
m = _vm_pageq_find(basequeue, index);
#ifndef PQ_NOOPT
if (PQ_NUMCOLORS > 1) {
if (prefer_zero) {
m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, \
pglist);
} else {
m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
}
if (m == NULL) {
m = _vm_pageq_find(basequeue, index);
}
} else {
#endif
if (prefer_zero) {
m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
} else {
m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
}
#ifndef PQ_NOOPT
}
#else
if (prefer_zero) {
m = TAILQ_LAST(&vm_page_queues[basequeue].pl, pglist);
} else {
m = TAILQ_FIRST(&vm_page_queues[basequeue].pl);
}
#endif
return (m);
}

View File

@ -120,7 +120,7 @@ vm_page_zero_idle(void)
if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK;
mtx_unlock_spin(&vm_page_queue_free_mtx);
return (1);
}